1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64TargetTransformInfo.h"
10 #include "AArch64ExpandImm.h"
11 #include "AArch64PerfectShuffle.h"
12 #include "MCTargetDesc/AArch64AddressingModes.h"
13 #include "llvm/Analysis/IVDescriptors.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/Analysis/TargetTransformInfo.h"
16 #include "llvm/CodeGen/BasicTTIImpl.h"
17 #include "llvm/CodeGen/CostTable.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/IR/IntrinsicInst.h"
20 #include "llvm/IR/Intrinsics.h"
21 #include "llvm/IR/IntrinsicsAArch64.h"
22 #include "llvm/IR/PatternMatch.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Transforms/InstCombine/InstCombiner.h"
25 #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h"
26 #include <algorithm>
27 #include <optional>
28 using namespace llvm;
29 using namespace llvm::PatternMatch;
30 
31 #define DEBUG_TYPE "aarch64tti"
32 
33 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
34                                                cl::init(true), cl::Hidden);
35 
36 static cl::opt<unsigned> SVEGatherOverhead("sve-gather-overhead", cl::init(10),
37                                            cl::Hidden);
38 
39 static cl::opt<unsigned> SVEScatterOverhead("sve-scatter-overhead",
40                                             cl::init(10), cl::Hidden);
41 
42 static cl::opt<unsigned> SVETailFoldInsnThreshold("sve-tail-folding-insn-threshold",
43                                                   cl::init(15), cl::Hidden);
44 
45 static cl::opt<unsigned>
46     NeonNonConstStrideOverhead("neon-nonconst-stride-overhead", cl::init(10),
47                                cl::Hidden);
48 
49 static cl::opt<unsigned> CallPenaltyChangeSM(
50     "call-penalty-sm-change", cl::init(5), cl::Hidden,
51     cl::desc(
52         "Penalty of calling a function that requires a change to PSTATE.SM"));
53 
54 static cl::opt<unsigned> InlineCallPenaltyChangeSM(
55     "inline-call-penalty-sm-change", cl::init(10), cl::Hidden,
56     cl::desc("Penalty of inlining a call that requires a change to PSTATE.SM"));
57 
58 static cl::opt<bool> EnableOrLikeSelectOpt("enable-aarch64-or-like-select",
59                                            cl::init(true), cl::Hidden);
60 
61 namespace {
62 class TailFoldingOption {
63   // These bitfields will only ever be set to something non-zero in operator=,
64   // when setting the -sve-tail-folding option. This option should always be of
65   // the form (default|simple|all|disable)[+(Flag1|Flag2|etc)], where here
66   // InitialBits is one of (disabled|all|simple). EnableBits represents
67   // additional flags we're enabling, and DisableBits for those flags we're
68   // disabling. The default flag is tracked in the variable NeedsDefault, since
69   // at the time of setting the option we may not know what the default value
70   // for the CPU is.
71   TailFoldingOpts InitialBits = TailFoldingOpts::Disabled;
72   TailFoldingOpts EnableBits = TailFoldingOpts::Disabled;
73   TailFoldingOpts DisableBits = TailFoldingOpts::Disabled;
74 
75   // This value needs to be initialised to true in case the user does not
76   // explicitly set the -sve-tail-folding option.
77   bool NeedsDefault = true;
78 
setInitialBits(TailFoldingOpts Bits)79   void setInitialBits(TailFoldingOpts Bits) { InitialBits = Bits; }
80 
setNeedsDefault(bool V)81   void setNeedsDefault(bool V) { NeedsDefault = V; }
82 
setEnableBit(TailFoldingOpts Bit)83   void setEnableBit(TailFoldingOpts Bit) {
84     EnableBits |= Bit;
85     DisableBits &= ~Bit;
86   }
87 
setDisableBit(TailFoldingOpts Bit)88   void setDisableBit(TailFoldingOpts Bit) {
89     EnableBits &= ~Bit;
90     DisableBits |= Bit;
91   }
92 
getBits(TailFoldingOpts DefaultBits) const93   TailFoldingOpts getBits(TailFoldingOpts DefaultBits) const {
94     TailFoldingOpts Bits = TailFoldingOpts::Disabled;
95 
96     assert((InitialBits == TailFoldingOpts::Disabled || !NeedsDefault) &&
97            "Initial bits should only include one of "
98            "(disabled|all|simple|default)");
99     Bits = NeedsDefault ? DefaultBits : InitialBits;
100     Bits |= EnableBits;
101     Bits &= ~DisableBits;
102 
103     return Bits;
104   }
105 
reportError(std::string Opt)106   void reportError(std::string Opt) {
107     errs() << "invalid argument '" << Opt
108            << "' to -sve-tail-folding=; the option should be of the form\n"
109               "  (disabled|all|default|simple)[+(reductions|recurrences"
110               "|reverse|noreductions|norecurrences|noreverse)]\n";
111     report_fatal_error("Unrecognised tail-folding option");
112   }
113 
114 public:
115 
operator =(const std::string & Val)116   void operator=(const std::string &Val) {
117     // If the user explicitly sets -sve-tail-folding= then treat as an error.
118     if (Val.empty()) {
119       reportError("");
120       return;
121     }
122 
123     // Since the user is explicitly setting the option we don't automatically
124     // need the default unless they require it.
125     setNeedsDefault(false);
126 
127     SmallVector<StringRef, 4> TailFoldTypes;
128     StringRef(Val).split(TailFoldTypes, '+', -1, false);
129 
130     unsigned StartIdx = 1;
131     if (TailFoldTypes[0] == "disabled")
132       setInitialBits(TailFoldingOpts::Disabled);
133     else if (TailFoldTypes[0] == "all")
134       setInitialBits(TailFoldingOpts::All);
135     else if (TailFoldTypes[0] == "default")
136       setNeedsDefault(true);
137     else if (TailFoldTypes[0] == "simple")
138       setInitialBits(TailFoldingOpts::Simple);
139     else {
140       StartIdx = 0;
141       setInitialBits(TailFoldingOpts::Disabled);
142     }
143 
144     for (unsigned I = StartIdx; I < TailFoldTypes.size(); I++) {
145       if (TailFoldTypes[I] == "reductions")
146         setEnableBit(TailFoldingOpts::Reductions);
147       else if (TailFoldTypes[I] == "recurrences")
148         setEnableBit(TailFoldingOpts::Recurrences);
149       else if (TailFoldTypes[I] == "reverse")
150         setEnableBit(TailFoldingOpts::Reverse);
151       else if (TailFoldTypes[I] == "noreductions")
152         setDisableBit(TailFoldingOpts::Reductions);
153       else if (TailFoldTypes[I] == "norecurrences")
154         setDisableBit(TailFoldingOpts::Recurrences);
155       else if (TailFoldTypes[I] == "noreverse")
156         setDisableBit(TailFoldingOpts::Reverse);
157       else
158         reportError(Val);
159     }
160   }
161 
satisfies(TailFoldingOpts DefaultBits,TailFoldingOpts Required) const162   bool satisfies(TailFoldingOpts DefaultBits, TailFoldingOpts Required) const {
163     return (getBits(DefaultBits) & Required) == Required;
164   }
165 };
166 } // namespace
167 
168 TailFoldingOption TailFoldingOptionLoc;
169 
170 cl::opt<TailFoldingOption, true, cl::parser<std::string>> SVETailFolding(
171     "sve-tail-folding",
172     cl::desc(
173         "Control the use of vectorisation using tail-folding for SVE where the"
174         " option is specified in the form (Initial)[+(Flag1|Flag2|...)]:"
175         "\ndisabled      (Initial) No loop types will vectorize using "
176         "tail-folding"
177         "\ndefault       (Initial) Uses the default tail-folding settings for "
178         "the target CPU"
179         "\nall           (Initial) All legal loop types will vectorize using "
180         "tail-folding"
181         "\nsimple        (Initial) Use tail-folding for simple loops (not "
182         "reductions or recurrences)"
183         "\nreductions    Use tail-folding for loops containing reductions"
184         "\nnoreductions  Inverse of above"
185         "\nrecurrences   Use tail-folding for loops containing fixed order "
186         "recurrences"
187         "\nnorecurrences Inverse of above"
188         "\nreverse       Use tail-folding for loops requiring reversed "
189         "predicates"
190         "\nnoreverse     Inverse of above"),
191     cl::location(TailFoldingOptionLoc));
192 
193 // Experimental option that will only be fully functional when the
194 // code-generator is changed to use SVE instead of NEON for all fixed-width
195 // operations.
196 static cl::opt<bool> EnableFixedwidthAutovecInStreamingMode(
197     "enable-fixedwidth-autovec-in-streaming-mode", cl::init(false), cl::Hidden);
198 
199 // Experimental option that will only be fully functional when the cost-model
200 // and code-generator have been changed to avoid using scalable vector
201 // instructions that are not legal in streaming SVE mode.
202 static cl::opt<bool> EnableScalableAutovecInStreamingMode(
203     "enable-scalable-autovec-in-streaming-mode", cl::init(false), cl::Hidden);
204 
isSMEABIRoutineCall(const CallInst & CI)205 static bool isSMEABIRoutineCall(const CallInst &CI) {
206   const auto *F = CI.getCalledFunction();
207   return F && StringSwitch<bool>(F->getName())
208                   .Case("__arm_sme_state", true)
209                   .Case("__arm_tpidr2_save", true)
210                   .Case("__arm_tpidr2_restore", true)
211                   .Case("__arm_za_disable", true)
212                   .Default(false);
213 }
214 
215 /// Returns true if the function has explicit operations that can only be
216 /// lowered using incompatible instructions for the selected mode. This also
217 /// returns true if the function F may use or modify ZA state.
hasPossibleIncompatibleOps(const Function * F)218 static bool hasPossibleIncompatibleOps(const Function *F) {
219   for (const BasicBlock &BB : *F) {
220     for (const Instruction &I : BB) {
221       // Be conservative for now and assume that any call to inline asm or to
222       // intrinsics could could result in non-streaming ops (e.g. calls to
223       // @llvm.aarch64.* or @llvm.gather/scatter intrinsics). We can assume that
224       // all native LLVM instructions can be lowered to compatible instructions.
225       if (isa<CallInst>(I) && !I.isDebugOrPseudoInst() &&
226           (cast<CallInst>(I).isInlineAsm() || isa<IntrinsicInst>(I) ||
227            isSMEABIRoutineCall(cast<CallInst>(I))))
228         return true;
229     }
230   }
231   return false;
232 }
233 
areInlineCompatible(const Function * Caller,const Function * Callee) const234 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
235                                          const Function *Callee) const {
236   SMEAttrs CallerAttrs(*Caller), CalleeAttrs(*Callee);
237 
238   // When inlining, we should consider the body of the function, not the
239   // interface.
240   if (CalleeAttrs.hasStreamingBody()) {
241     CalleeAttrs.set(SMEAttrs::SM_Compatible, false);
242     CalleeAttrs.set(SMEAttrs::SM_Enabled, true);
243   }
244 
245   if (CalleeAttrs.hasNewZABody())
246     return false;
247 
248   if (CallerAttrs.requiresLazySave(CalleeAttrs) ||
249       CallerAttrs.requiresSMChange(CalleeAttrs)) {
250     if (hasPossibleIncompatibleOps(Callee))
251       return false;
252   }
253 
254   const TargetMachine &TM = getTLI()->getTargetMachine();
255 
256   const FeatureBitset &CallerBits =
257       TM.getSubtargetImpl(*Caller)->getFeatureBits();
258   const FeatureBitset &CalleeBits =
259       TM.getSubtargetImpl(*Callee)->getFeatureBits();
260 
261   // Inline a callee if its target-features are a subset of the callers
262   // target-features.
263   return (CallerBits & CalleeBits) == CalleeBits;
264 }
265 
areTypesABICompatible(const Function * Caller,const Function * Callee,const ArrayRef<Type * > & Types) const266 bool AArch64TTIImpl::areTypesABICompatible(
267     const Function *Caller, const Function *Callee,
268     const ArrayRef<Type *> &Types) const {
269   if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
270     return false;
271 
272   // We need to ensure that argument promotion does not attempt to promote
273   // pointers to fixed-length vector types larger than 128 bits like
274   // <8 x float> (and pointers to aggregate types which have such fixed-length
275   // vector type members) into the values of the pointees. Such vector types
276   // are used for SVE VLS but there is no ABI for SVE VLS arguments and the
277   // backend cannot lower such value arguments. The 128-bit fixed-length SVE
278   // types can be safely treated as 128-bit NEON types and they cannot be
279   // distinguished in IR.
280   if (ST->useSVEForFixedLengthVectors() && llvm::any_of(Types, [](Type *Ty) {
281         auto FVTy = dyn_cast<FixedVectorType>(Ty);
282         return FVTy &&
283                FVTy->getScalarSizeInBits() * FVTy->getNumElements() > 128;
284       }))
285     return false;
286 
287   return true;
288 }
289 
290 unsigned
getInlineCallPenalty(const Function * F,const CallBase & Call,unsigned DefaultCallPenalty) const291 AArch64TTIImpl::getInlineCallPenalty(const Function *F, const CallBase &Call,
292                                      unsigned DefaultCallPenalty) const {
293   // This function calculates a penalty for executing Call in F.
294   //
295   // There are two ways this function can be called:
296   // (1)  F:
297   //       call from F -> G (the call here is Call)
298   //
299   // For (1), Call.getCaller() == F, so it will always return a high cost if
300   // a streaming-mode change is required (thus promoting the need to inline the
301   // function)
302   //
303   // (2)  F:
304   //       call from F -> G (the call here is not Call)
305   //      G:
306   //       call from G -> H (the call here is Call)
307   //
308   // For (2), if after inlining the body of G into F the call to H requires a
309   // streaming-mode change, and the call to G from F would also require a
310   // streaming-mode change, then there is benefit to do the streaming-mode
311   // change only once and avoid inlining of G into F.
312   SMEAttrs FAttrs(*F);
313   SMEAttrs CalleeAttrs(Call);
314   if (FAttrs.requiresSMChange(CalleeAttrs)) {
315     if (F == Call.getCaller()) // (1)
316       return CallPenaltyChangeSM * DefaultCallPenalty;
317     if (FAttrs.requiresSMChange(SMEAttrs(*Call.getCaller()))) // (2)
318       return InlineCallPenaltyChangeSM * DefaultCallPenalty;
319   }
320 
321   return DefaultCallPenalty;
322 }
323 
shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const324 bool AArch64TTIImpl::shouldMaximizeVectorBandwidth(
325     TargetTransformInfo::RegisterKind K) const {
326   assert(K != TargetTransformInfo::RGK_Scalar);
327   return (K == TargetTransformInfo::RGK_FixedWidthVector &&
328           ST->isNeonAvailable());
329 }
330 
331 /// Calculate the cost of materializing a 64-bit value. This helper
332 /// method might only calculate a fraction of a larger immediate. Therefore it
333 /// is valid to return a cost of ZERO.
getIntImmCost(int64_t Val)334 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) {
335   // Check if the immediate can be encoded within an instruction.
336   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
337     return 0;
338 
339   if (Val < 0)
340     Val = ~Val;
341 
342   // Calculate how many moves we will need to materialize this constant.
343   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
344   AArch64_IMM::expandMOVImm(Val, 64, Insn);
345   return Insn.size();
346 }
347 
348 /// Calculate the cost of materializing the given constant.
getIntImmCost(const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)349 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
350                                               TTI::TargetCostKind CostKind) {
351   assert(Ty->isIntegerTy());
352 
353   unsigned BitSize = Ty->getPrimitiveSizeInBits();
354   if (BitSize == 0)
355     return ~0U;
356 
357   // Sign-extend all constants to a multiple of 64-bit.
358   APInt ImmVal = Imm;
359   if (BitSize & 0x3f)
360     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
361 
362   // Split the constant into 64-bit chunks and calculate the cost for each
363   // chunk.
364   InstructionCost Cost = 0;
365   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
366     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
367     int64_t Val = Tmp.getSExtValue();
368     Cost += getIntImmCost(Val);
369   }
370   // We need at least one instruction to materialze the constant.
371   return std::max<InstructionCost>(1, Cost);
372 }
373 
getIntImmCostInst(unsigned Opcode,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind,Instruction * Inst)374 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
375                                                   const APInt &Imm, Type *Ty,
376                                                   TTI::TargetCostKind CostKind,
377                                                   Instruction *Inst) {
378   assert(Ty->isIntegerTy());
379 
380   unsigned BitSize = Ty->getPrimitiveSizeInBits();
381   // There is no cost model for constants with a bit size of 0. Return TCC_Free
382   // here, so that constant hoisting will ignore this constant.
383   if (BitSize == 0)
384     return TTI::TCC_Free;
385 
386   unsigned ImmIdx = ~0U;
387   switch (Opcode) {
388   default:
389     return TTI::TCC_Free;
390   case Instruction::GetElementPtr:
391     // Always hoist the base address of a GetElementPtr.
392     if (Idx == 0)
393       return 2 * TTI::TCC_Basic;
394     return TTI::TCC_Free;
395   case Instruction::Store:
396     ImmIdx = 0;
397     break;
398   case Instruction::Add:
399   case Instruction::Sub:
400   case Instruction::Mul:
401   case Instruction::UDiv:
402   case Instruction::SDiv:
403   case Instruction::URem:
404   case Instruction::SRem:
405   case Instruction::And:
406   case Instruction::Or:
407   case Instruction::Xor:
408   case Instruction::ICmp:
409     ImmIdx = 1;
410     break;
411   // Always return TCC_Free for the shift value of a shift instruction.
412   case Instruction::Shl:
413   case Instruction::LShr:
414   case Instruction::AShr:
415     if (Idx == 1)
416       return TTI::TCC_Free;
417     break;
418   case Instruction::Trunc:
419   case Instruction::ZExt:
420   case Instruction::SExt:
421   case Instruction::IntToPtr:
422   case Instruction::PtrToInt:
423   case Instruction::BitCast:
424   case Instruction::PHI:
425   case Instruction::Call:
426   case Instruction::Select:
427   case Instruction::Ret:
428   case Instruction::Load:
429     break;
430   }
431 
432   if (Idx == ImmIdx) {
433     int NumConstants = (BitSize + 63) / 64;
434     InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
435     return (Cost <= NumConstants * TTI::TCC_Basic)
436                ? static_cast<int>(TTI::TCC_Free)
437                : Cost;
438   }
439   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
440 }
441 
442 InstructionCost
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TTI::TargetCostKind CostKind)443 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
444                                     const APInt &Imm, Type *Ty,
445                                     TTI::TargetCostKind CostKind) {
446   assert(Ty->isIntegerTy());
447 
448   unsigned BitSize = Ty->getPrimitiveSizeInBits();
449   // There is no cost model for constants with a bit size of 0. Return TCC_Free
450   // here, so that constant hoisting will ignore this constant.
451   if (BitSize == 0)
452     return TTI::TCC_Free;
453 
454   // Most (all?) AArch64 intrinsics do not support folding immediates into the
455   // selected instruction, so we compute the materialization cost for the
456   // immediate directly.
457   if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
458     return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
459 
460   switch (IID) {
461   default:
462     return TTI::TCC_Free;
463   case Intrinsic::sadd_with_overflow:
464   case Intrinsic::uadd_with_overflow:
465   case Intrinsic::ssub_with_overflow:
466   case Intrinsic::usub_with_overflow:
467   case Intrinsic::smul_with_overflow:
468   case Intrinsic::umul_with_overflow:
469     if (Idx == 1) {
470       int NumConstants = (BitSize + 63) / 64;
471       InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
472       return (Cost <= NumConstants * TTI::TCC_Basic)
473                  ? static_cast<int>(TTI::TCC_Free)
474                  : Cost;
475     }
476     break;
477   case Intrinsic::experimental_stackmap:
478     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
479       return TTI::TCC_Free;
480     break;
481   case Intrinsic::experimental_patchpoint_void:
482   case Intrinsic::experimental_patchpoint_i64:
483     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
484       return TTI::TCC_Free;
485     break;
486   case Intrinsic::experimental_gc_statepoint:
487     if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
488       return TTI::TCC_Free;
489     break;
490   }
491   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
492 }
493 
494 TargetTransformInfo::PopcntSupportKind
getPopcntSupport(unsigned TyWidth)495 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
496   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
497   if (TyWidth == 32 || TyWidth == 64)
498     return TTI::PSK_FastHardware;
499   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
500   return TTI::PSK_Software;
501 }
502 
503 InstructionCost
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)504 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
505                                       TTI::TargetCostKind CostKind) {
506   auto *RetTy = ICA.getReturnType();
507   switch (ICA.getID()) {
508   case Intrinsic::umin:
509   case Intrinsic::umax:
510   case Intrinsic::smin:
511   case Intrinsic::smax: {
512     static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
513                                         MVT::v8i16, MVT::v2i32, MVT::v4i32,
514                                         MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32,
515                                         MVT::nxv2i64};
516     auto LT = getTypeLegalizationCost(RetTy);
517     // v2i64 types get converted to cmp+bif hence the cost of 2
518     if (LT.second == MVT::v2i64)
519       return LT.first * 2;
520     if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
521       return LT.first;
522     break;
523   }
524   case Intrinsic::sadd_sat:
525   case Intrinsic::ssub_sat:
526   case Intrinsic::uadd_sat:
527   case Intrinsic::usub_sat: {
528     static const auto ValidSatTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
529                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
530                                      MVT::v2i64};
531     auto LT = getTypeLegalizationCost(RetTy);
532     // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
533     // need to extend the type, as it uses shr(qadd(shl, shl)).
534     unsigned Instrs =
535         LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
536     if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
537       return LT.first * Instrs;
538     break;
539   }
540   case Intrinsic::abs: {
541     static const auto ValidAbsTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
542                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
543                                      MVT::v2i64};
544     auto LT = getTypeLegalizationCost(RetTy);
545     if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }))
546       return LT.first;
547     break;
548   }
549   case Intrinsic::bswap: {
550     static const auto ValidAbsTys = {MVT::v4i16, MVT::v8i16, MVT::v2i32,
551                                      MVT::v4i32, MVT::v2i64};
552     auto LT = getTypeLegalizationCost(RetTy);
553     if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }) &&
554         LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits())
555       return LT.first;
556     break;
557   }
558   case Intrinsic::experimental_stepvector: {
559     InstructionCost Cost = 1; // Cost of the `index' instruction
560     auto LT = getTypeLegalizationCost(RetTy);
561     // Legalisation of illegal vectors involves an `index' instruction plus
562     // (LT.first - 1) vector adds.
563     if (LT.first > 1) {
564       Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
565       InstructionCost AddCost =
566           getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
567       Cost += AddCost * (LT.first - 1);
568     }
569     return Cost;
570   }
571   case Intrinsic::bitreverse: {
572     static const CostTblEntry BitreverseTbl[] = {
573         {Intrinsic::bitreverse, MVT::i32, 1},
574         {Intrinsic::bitreverse, MVT::i64, 1},
575         {Intrinsic::bitreverse, MVT::v8i8, 1},
576         {Intrinsic::bitreverse, MVT::v16i8, 1},
577         {Intrinsic::bitreverse, MVT::v4i16, 2},
578         {Intrinsic::bitreverse, MVT::v8i16, 2},
579         {Intrinsic::bitreverse, MVT::v2i32, 2},
580         {Intrinsic::bitreverse, MVT::v4i32, 2},
581         {Intrinsic::bitreverse, MVT::v1i64, 2},
582         {Intrinsic::bitreverse, MVT::v2i64, 2},
583     };
584     const auto LegalisationCost = getTypeLegalizationCost(RetTy);
585     const auto *Entry =
586         CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second);
587     if (Entry) {
588       // Cost Model is using the legal type(i32) that i8 and i16 will be
589       // converted to +1 so that we match the actual lowering cost
590       if (TLI->getValueType(DL, RetTy, true) == MVT::i8 ||
591           TLI->getValueType(DL, RetTy, true) == MVT::i16)
592         return LegalisationCost.first * Entry->Cost + 1;
593 
594       return LegalisationCost.first * Entry->Cost;
595     }
596     break;
597   }
598   case Intrinsic::ctpop: {
599     if (!ST->hasNEON()) {
600       // 32-bit or 64-bit ctpop without NEON is 12 instructions.
601       return getTypeLegalizationCost(RetTy).first * 12;
602     }
603     static const CostTblEntry CtpopCostTbl[] = {
604         {ISD::CTPOP, MVT::v2i64, 4},
605         {ISD::CTPOP, MVT::v4i32, 3},
606         {ISD::CTPOP, MVT::v8i16, 2},
607         {ISD::CTPOP, MVT::v16i8, 1},
608         {ISD::CTPOP, MVT::i64,   4},
609         {ISD::CTPOP, MVT::v2i32, 3},
610         {ISD::CTPOP, MVT::v4i16, 2},
611         {ISD::CTPOP, MVT::v8i8,  1},
612         {ISD::CTPOP, MVT::i32,   5},
613     };
614     auto LT = getTypeLegalizationCost(RetTy);
615     MVT MTy = LT.second;
616     if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) {
617       // Extra cost of +1 when illegal vector types are legalized by promoting
618       // the integer type.
619       int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() !=
620                                             RetTy->getScalarSizeInBits()
621                           ? 1
622                           : 0;
623       return LT.first * Entry->Cost + ExtraCost;
624     }
625     break;
626   }
627   case Intrinsic::sadd_with_overflow:
628   case Intrinsic::uadd_with_overflow:
629   case Intrinsic::ssub_with_overflow:
630   case Intrinsic::usub_with_overflow:
631   case Intrinsic::smul_with_overflow:
632   case Intrinsic::umul_with_overflow: {
633     static const CostTblEntry WithOverflowCostTbl[] = {
634         {Intrinsic::sadd_with_overflow, MVT::i8, 3},
635         {Intrinsic::uadd_with_overflow, MVT::i8, 3},
636         {Intrinsic::sadd_with_overflow, MVT::i16, 3},
637         {Intrinsic::uadd_with_overflow, MVT::i16, 3},
638         {Intrinsic::sadd_with_overflow, MVT::i32, 1},
639         {Intrinsic::uadd_with_overflow, MVT::i32, 1},
640         {Intrinsic::sadd_with_overflow, MVT::i64, 1},
641         {Intrinsic::uadd_with_overflow, MVT::i64, 1},
642         {Intrinsic::ssub_with_overflow, MVT::i8, 3},
643         {Intrinsic::usub_with_overflow, MVT::i8, 3},
644         {Intrinsic::ssub_with_overflow, MVT::i16, 3},
645         {Intrinsic::usub_with_overflow, MVT::i16, 3},
646         {Intrinsic::ssub_with_overflow, MVT::i32, 1},
647         {Intrinsic::usub_with_overflow, MVT::i32, 1},
648         {Intrinsic::ssub_with_overflow, MVT::i64, 1},
649         {Intrinsic::usub_with_overflow, MVT::i64, 1},
650         {Intrinsic::smul_with_overflow, MVT::i8, 5},
651         {Intrinsic::umul_with_overflow, MVT::i8, 4},
652         {Intrinsic::smul_with_overflow, MVT::i16, 5},
653         {Intrinsic::umul_with_overflow, MVT::i16, 4},
654         {Intrinsic::smul_with_overflow, MVT::i32, 2}, // eg umull;tst
655         {Intrinsic::umul_with_overflow, MVT::i32, 2}, // eg umull;cmp sxtw
656         {Intrinsic::smul_with_overflow, MVT::i64, 3}, // eg mul;smulh;cmp
657         {Intrinsic::umul_with_overflow, MVT::i64, 3}, // eg mul;umulh;cmp asr
658     };
659     EVT MTy = TLI->getValueType(DL, RetTy->getContainedType(0), true);
660     if (MTy.isSimple())
661       if (const auto *Entry = CostTableLookup(WithOverflowCostTbl, ICA.getID(),
662                                               MTy.getSimpleVT()))
663         return Entry->Cost;
664     break;
665   }
666   case Intrinsic::fptosi_sat:
667   case Intrinsic::fptoui_sat: {
668     if (ICA.getArgTypes().empty())
669       break;
670     bool IsSigned = ICA.getID() == Intrinsic::fptosi_sat;
671     auto LT = getTypeLegalizationCost(ICA.getArgTypes()[0]);
672     EVT MTy = TLI->getValueType(DL, RetTy);
673     // Check for the legal types, which are where the size of the input and the
674     // output are the same, or we are using cvt f64->i32 or f32->i64.
675     if ((LT.second == MVT::f32 || LT.second == MVT::f64 ||
676          LT.second == MVT::v2f32 || LT.second == MVT::v4f32 ||
677          LT.second == MVT::v2f64) &&
678         (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits() ||
679          (LT.second == MVT::f64 && MTy == MVT::i32) ||
680          (LT.second == MVT::f32 && MTy == MVT::i64)))
681       return LT.first;
682     // Similarly for fp16 sizes
683     if (ST->hasFullFP16() &&
684         ((LT.second == MVT::f16 && MTy == MVT::i32) ||
685          ((LT.second == MVT::v4f16 || LT.second == MVT::v8f16) &&
686           (LT.second.getScalarSizeInBits() == MTy.getScalarSizeInBits()))))
687       return LT.first;
688 
689     // Otherwise we use a legal convert followed by a min+max
690     if ((LT.second.getScalarType() == MVT::f32 ||
691          LT.second.getScalarType() == MVT::f64 ||
692          (ST->hasFullFP16() && LT.second.getScalarType() == MVT::f16)) &&
693         LT.second.getScalarSizeInBits() >= MTy.getScalarSizeInBits()) {
694       Type *LegalTy =
695           Type::getIntNTy(RetTy->getContext(), LT.second.getScalarSizeInBits());
696       if (LT.second.isVector())
697         LegalTy = VectorType::get(LegalTy, LT.second.getVectorElementCount());
698       InstructionCost Cost = 1;
699       IntrinsicCostAttributes Attrs1(IsSigned ? Intrinsic::smin : Intrinsic::umin,
700                                     LegalTy, {LegalTy, LegalTy});
701       Cost += getIntrinsicInstrCost(Attrs1, CostKind);
702       IntrinsicCostAttributes Attrs2(IsSigned ? Intrinsic::smax : Intrinsic::umax,
703                                     LegalTy, {LegalTy, LegalTy});
704       Cost += getIntrinsicInstrCost(Attrs2, CostKind);
705       return LT.first * Cost;
706     }
707     break;
708   }
709   case Intrinsic::fshl:
710   case Intrinsic::fshr: {
711     if (ICA.getArgs().empty())
712       break;
713 
714     // TODO: Add handling for fshl where third argument is not a constant.
715     const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(ICA.getArgs()[2]);
716     if (!OpInfoZ.isConstant())
717       break;
718 
719     const auto LegalisationCost = getTypeLegalizationCost(RetTy);
720     if (OpInfoZ.isUniform()) {
721       // FIXME: The costs could be lower if the codegen is better.
722       static const CostTblEntry FshlTbl[] = {
723           {Intrinsic::fshl, MVT::v4i32, 3}, // ushr + shl + orr
724           {Intrinsic::fshl, MVT::v2i64, 3}, {Intrinsic::fshl, MVT::v16i8, 4},
725           {Intrinsic::fshl, MVT::v8i16, 4}, {Intrinsic::fshl, MVT::v2i32, 3},
726           {Intrinsic::fshl, MVT::v8i8, 4},  {Intrinsic::fshl, MVT::v4i16, 4}};
727       // Costs for both fshl & fshr are the same, so just pass Intrinsic::fshl
728       // to avoid having to duplicate the costs.
729       const auto *Entry =
730           CostTableLookup(FshlTbl, Intrinsic::fshl, LegalisationCost.second);
731       if (Entry)
732         return LegalisationCost.first * Entry->Cost;
733     }
734 
735     auto TyL = getTypeLegalizationCost(RetTy);
736     if (!RetTy->isIntegerTy())
737       break;
738 
739     // Estimate cost manually, as types like i8 and i16 will get promoted to
740     // i32 and CostTableLookup will ignore the extra conversion cost.
741     bool HigherCost = (RetTy->getScalarSizeInBits() != 32 &&
742                        RetTy->getScalarSizeInBits() < 64) ||
743                       (RetTy->getScalarSizeInBits() % 64 != 0);
744     unsigned ExtraCost = HigherCost ? 1 : 0;
745     if (RetTy->getScalarSizeInBits() == 32 ||
746         RetTy->getScalarSizeInBits() == 64)
747       ExtraCost = 0; // fhsl/fshr for i32 and i64 can be lowered to a single
748                      // extr instruction.
749     else if (HigherCost)
750       ExtraCost = 1;
751     else
752       break;
753     return TyL.first + ExtraCost;
754   }
755   default:
756     break;
757   }
758   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
759 }
760 
761 /// The function will remove redundant reinterprets casting in the presence
762 /// of the control flow
processPhiNode(InstCombiner & IC,IntrinsicInst & II)763 static std::optional<Instruction *> processPhiNode(InstCombiner &IC,
764                                                    IntrinsicInst &II) {
765   SmallVector<Instruction *, 32> Worklist;
766   auto RequiredType = II.getType();
767 
768   auto *PN = dyn_cast<PHINode>(II.getArgOperand(0));
769   assert(PN && "Expected Phi Node!");
770 
771   // Don't create a new Phi unless we can remove the old one.
772   if (!PN->hasOneUse())
773     return std::nullopt;
774 
775   for (Value *IncValPhi : PN->incoming_values()) {
776     auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi);
777     if (!Reinterpret ||
778         Reinterpret->getIntrinsicID() !=
779             Intrinsic::aarch64_sve_convert_to_svbool ||
780         RequiredType != Reinterpret->getArgOperand(0)->getType())
781       return std::nullopt;
782   }
783 
784   // Create the new Phi
785   IC.Builder.SetInsertPoint(PN);
786   PHINode *NPN = IC.Builder.CreatePHI(RequiredType, PN->getNumIncomingValues());
787   Worklist.push_back(PN);
788 
789   for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) {
790     auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I));
791     NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I));
792     Worklist.push_back(Reinterpret);
793   }
794 
795   // Cleanup Phi Node and reinterprets
796   return IC.replaceInstUsesWith(II, NPN);
797 }
798 
799 // (from_svbool (binop (to_svbool pred) (svbool_t _) (svbool_t _))))
800 // => (binop (pred) (from_svbool _) (from_svbool _))
801 //
802 // The above transformation eliminates a `to_svbool` in the predicate
803 // operand of bitwise operation `binop` by narrowing the vector width of
804 // the operation. For example, it would convert a `<vscale x 16 x i1>
805 // and` into a `<vscale x 4 x i1> and`. This is profitable because
806 // to_svbool must zero the new lanes during widening, whereas
807 // from_svbool is free.
808 static std::optional<Instruction *>
tryCombineFromSVBoolBinOp(InstCombiner & IC,IntrinsicInst & II)809 tryCombineFromSVBoolBinOp(InstCombiner &IC, IntrinsicInst &II) {
810   auto BinOp = dyn_cast<IntrinsicInst>(II.getOperand(0));
811   if (!BinOp)
812     return std::nullopt;
813 
814   auto IntrinsicID = BinOp->getIntrinsicID();
815   switch (IntrinsicID) {
816   case Intrinsic::aarch64_sve_and_z:
817   case Intrinsic::aarch64_sve_bic_z:
818   case Intrinsic::aarch64_sve_eor_z:
819   case Intrinsic::aarch64_sve_nand_z:
820   case Intrinsic::aarch64_sve_nor_z:
821   case Intrinsic::aarch64_sve_orn_z:
822   case Intrinsic::aarch64_sve_orr_z:
823     break;
824   default:
825     return std::nullopt;
826   }
827 
828   auto BinOpPred = BinOp->getOperand(0);
829   auto BinOpOp1 = BinOp->getOperand(1);
830   auto BinOpOp2 = BinOp->getOperand(2);
831 
832   auto PredIntr = dyn_cast<IntrinsicInst>(BinOpPred);
833   if (!PredIntr ||
834       PredIntr->getIntrinsicID() != Intrinsic::aarch64_sve_convert_to_svbool)
835     return std::nullopt;
836 
837   auto PredOp = PredIntr->getOperand(0);
838   auto PredOpTy = cast<VectorType>(PredOp->getType());
839   if (PredOpTy != II.getType())
840     return std::nullopt;
841 
842   SmallVector<Value *> NarrowedBinOpArgs = {PredOp};
843   auto NarrowBinOpOp1 = IC.Builder.CreateIntrinsic(
844       Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp1});
845   NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
846   if (BinOpOp1 == BinOpOp2)
847     NarrowedBinOpArgs.push_back(NarrowBinOpOp1);
848   else
849     NarrowedBinOpArgs.push_back(IC.Builder.CreateIntrinsic(
850         Intrinsic::aarch64_sve_convert_from_svbool, {PredOpTy}, {BinOpOp2}));
851 
852   auto NarrowedBinOp =
853       IC.Builder.CreateIntrinsic(IntrinsicID, {PredOpTy}, NarrowedBinOpArgs);
854   return IC.replaceInstUsesWith(II, NarrowedBinOp);
855 }
856 
857 static std::optional<Instruction *>
instCombineConvertFromSVBool(InstCombiner & IC,IntrinsicInst & II)858 instCombineConvertFromSVBool(InstCombiner &IC, IntrinsicInst &II) {
859   // If the reinterpret instruction operand is a PHI Node
860   if (isa<PHINode>(II.getArgOperand(0)))
861     return processPhiNode(IC, II);
862 
863   if (auto BinOpCombine = tryCombineFromSVBoolBinOp(IC, II))
864     return BinOpCombine;
865 
866   // Ignore converts to/from svcount_t.
867   if (isa<TargetExtType>(II.getArgOperand(0)->getType()) ||
868       isa<TargetExtType>(II.getType()))
869     return std::nullopt;
870 
871   SmallVector<Instruction *, 32> CandidatesForRemoval;
872   Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr;
873 
874   const auto *IVTy = cast<VectorType>(II.getType());
875 
876   // Walk the chain of conversions.
877   while (Cursor) {
878     // If the type of the cursor has fewer lanes than the final result, zeroing
879     // must take place, which breaks the equivalence chain.
880     const auto *CursorVTy = cast<VectorType>(Cursor->getType());
881     if (CursorVTy->getElementCount().getKnownMinValue() <
882         IVTy->getElementCount().getKnownMinValue())
883       break;
884 
885     // If the cursor has the same type as I, it is a viable replacement.
886     if (Cursor->getType() == IVTy)
887       EarliestReplacement = Cursor;
888 
889     auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor);
890 
891     // If this is not an SVE conversion intrinsic, this is the end of the chain.
892     if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() ==
893                                   Intrinsic::aarch64_sve_convert_to_svbool ||
894                               IntrinsicCursor->getIntrinsicID() ==
895                                   Intrinsic::aarch64_sve_convert_from_svbool))
896       break;
897 
898     CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor);
899     Cursor = IntrinsicCursor->getOperand(0);
900   }
901 
902   // If no viable replacement in the conversion chain was found, there is
903   // nothing to do.
904   if (!EarliestReplacement)
905     return std::nullopt;
906 
907   return IC.replaceInstUsesWith(II, EarliestReplacement);
908 }
909 
isAllActivePredicate(Value * Pred)910 static bool isAllActivePredicate(Value *Pred) {
911   // Look through convert.from.svbool(convert.to.svbool(...) chain.
912   Value *UncastedPred;
913   if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_convert_from_svbool>(
914                       m_Intrinsic<Intrinsic::aarch64_sve_convert_to_svbool>(
915                           m_Value(UncastedPred)))))
916     // If the predicate has the same or less lanes than the uncasted
917     // predicate then we know the casting has no effect.
918     if (cast<ScalableVectorType>(Pred->getType())->getMinNumElements() <=
919         cast<ScalableVectorType>(UncastedPred->getType())->getMinNumElements())
920       Pred = UncastedPred;
921 
922   return match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
923                          m_ConstantInt<AArch64SVEPredPattern::all>()));
924 }
925 
instCombineSVESel(InstCombiner & IC,IntrinsicInst & II)926 static std::optional<Instruction *> instCombineSVESel(InstCombiner &IC,
927                                                       IntrinsicInst &II) {
928   // svsel(ptrue, x, y) => x
929   auto *OpPredicate = II.getOperand(0);
930   if (isAllActivePredicate(OpPredicate))
931     return IC.replaceInstUsesWith(II, II.getOperand(1));
932 
933   auto Select =
934       IC.Builder.CreateSelect(OpPredicate, II.getOperand(1), II.getOperand(2));
935   return IC.replaceInstUsesWith(II, Select);
936 }
937 
instCombineSVEDup(InstCombiner & IC,IntrinsicInst & II)938 static std::optional<Instruction *> instCombineSVEDup(InstCombiner &IC,
939                                                       IntrinsicInst &II) {
940   IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
941   if (!Pg)
942     return std::nullopt;
943 
944   if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
945     return std::nullopt;
946 
947   const auto PTruePattern =
948       cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
949   if (PTruePattern != AArch64SVEPredPattern::vl1)
950     return std::nullopt;
951 
952   // The intrinsic is inserting into lane zero so use an insert instead.
953   auto *IdxTy = Type::getInt64Ty(II.getContext());
954   auto *Insert = InsertElementInst::Create(
955       II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0));
956   Insert->insertBefore(&II);
957   Insert->takeName(&II);
958 
959   return IC.replaceInstUsesWith(II, Insert);
960 }
961 
instCombineSVEDupX(InstCombiner & IC,IntrinsicInst & II)962 static std::optional<Instruction *> instCombineSVEDupX(InstCombiner &IC,
963                                                        IntrinsicInst &II) {
964   // Replace DupX with a regular IR splat.
965   auto *RetTy = cast<ScalableVectorType>(II.getType());
966   Value *Splat = IC.Builder.CreateVectorSplat(RetTy->getElementCount(),
967                                               II.getArgOperand(0));
968   Splat->takeName(&II);
969   return IC.replaceInstUsesWith(II, Splat);
970 }
971 
instCombineSVECmpNE(InstCombiner & IC,IntrinsicInst & II)972 static std::optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
973                                                         IntrinsicInst &II) {
974   LLVMContext &Ctx = II.getContext();
975 
976   // Check that the predicate is all active
977   auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0));
978   if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
979     return std::nullopt;
980 
981   const auto PTruePattern =
982       cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
983   if (PTruePattern != AArch64SVEPredPattern::all)
984     return std::nullopt;
985 
986   // Check that we have a compare of zero..
987   auto *SplatValue =
988       dyn_cast_or_null<ConstantInt>(getSplatValue(II.getArgOperand(2)));
989   if (!SplatValue || !SplatValue->isZero())
990     return std::nullopt;
991 
992   // ..against a dupq
993   auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
994   if (!DupQLane ||
995       DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane)
996     return std::nullopt;
997 
998   // Where the dupq is a lane 0 replicate of a vector insert
999   if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero())
1000     return std::nullopt;
1001 
1002   auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0));
1003   if (!VecIns || VecIns->getIntrinsicID() != Intrinsic::vector_insert)
1004     return std::nullopt;
1005 
1006   // Where the vector insert is a fixed constant vector insert into undef at
1007   // index zero
1008   if (!isa<UndefValue>(VecIns->getArgOperand(0)))
1009     return std::nullopt;
1010 
1011   if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero())
1012     return std::nullopt;
1013 
1014   auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1));
1015   if (!ConstVec)
1016     return std::nullopt;
1017 
1018   auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType());
1019   auto *OutTy = dyn_cast<ScalableVectorType>(II.getType());
1020   if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements())
1021     return std::nullopt;
1022 
1023   unsigned NumElts = VecTy->getNumElements();
1024   unsigned PredicateBits = 0;
1025 
1026   // Expand intrinsic operands to a 16-bit byte level predicate
1027   for (unsigned I = 0; I < NumElts; ++I) {
1028     auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I));
1029     if (!Arg)
1030       return std::nullopt;
1031     if (!Arg->isZero())
1032       PredicateBits |= 1 << (I * (16 / NumElts));
1033   }
1034 
1035   // If all bits are zero bail early with an empty predicate
1036   if (PredicateBits == 0) {
1037     auto *PFalse = Constant::getNullValue(II.getType());
1038     PFalse->takeName(&II);
1039     return IC.replaceInstUsesWith(II, PFalse);
1040   }
1041 
1042   // Calculate largest predicate type used (where byte predicate is largest)
1043   unsigned Mask = 8;
1044   for (unsigned I = 0; I < 16; ++I)
1045     if ((PredicateBits & (1 << I)) != 0)
1046       Mask |= (I % 8);
1047 
1048   unsigned PredSize = Mask & -Mask;
1049   auto *PredType = ScalableVectorType::get(
1050       Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8));
1051 
1052   // Ensure all relevant bits are set
1053   for (unsigned I = 0; I < 16; I += PredSize)
1054     if ((PredicateBits & (1 << I)) == 0)
1055       return std::nullopt;
1056 
1057   auto *PTruePat =
1058       ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
1059   auto *PTrue = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
1060                                            {PredType}, {PTruePat});
1061   auto *ConvertToSVBool = IC.Builder.CreateIntrinsic(
1062       Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue});
1063   auto *ConvertFromSVBool =
1064       IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
1065                                  {II.getType()}, {ConvertToSVBool});
1066 
1067   ConvertFromSVBool->takeName(&II);
1068   return IC.replaceInstUsesWith(II, ConvertFromSVBool);
1069 }
1070 
instCombineSVELast(InstCombiner & IC,IntrinsicInst & II)1071 static std::optional<Instruction *> instCombineSVELast(InstCombiner &IC,
1072                                                        IntrinsicInst &II) {
1073   Value *Pg = II.getArgOperand(0);
1074   Value *Vec = II.getArgOperand(1);
1075   auto IntrinsicID = II.getIntrinsicID();
1076   bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta;
1077 
1078   // lastX(splat(X)) --> X
1079   if (auto *SplatVal = getSplatValue(Vec))
1080     return IC.replaceInstUsesWith(II, SplatVal);
1081 
1082   // If x and/or y is a splat value then:
1083   // lastX (binop (x, y)) --> binop(lastX(x), lastX(y))
1084   Value *LHS, *RHS;
1085   if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) {
1086     if (isSplatValue(LHS) || isSplatValue(RHS)) {
1087       auto *OldBinOp = cast<BinaryOperator>(Vec);
1088       auto OpC = OldBinOp->getOpcode();
1089       auto *NewLHS =
1090           IC.Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS});
1091       auto *NewRHS =
1092           IC.Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS});
1093       auto *NewBinOp = BinaryOperator::CreateWithCopiedFlags(
1094           OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), &II);
1095       return IC.replaceInstUsesWith(II, NewBinOp);
1096     }
1097   }
1098 
1099   auto *C = dyn_cast<Constant>(Pg);
1100   if (IsAfter && C && C->isNullValue()) {
1101     // The intrinsic is extracting lane 0 so use an extract instead.
1102     auto *IdxTy = Type::getInt64Ty(II.getContext());
1103     auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0));
1104     Extract->insertBefore(&II);
1105     Extract->takeName(&II);
1106     return IC.replaceInstUsesWith(II, Extract);
1107   }
1108 
1109   auto *IntrPG = dyn_cast<IntrinsicInst>(Pg);
1110   if (!IntrPG)
1111     return std::nullopt;
1112 
1113   if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
1114     return std::nullopt;
1115 
1116   const auto PTruePattern =
1117       cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue();
1118 
1119   // Can the intrinsic's predicate be converted to a known constant index?
1120   unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern);
1121   if (!MinNumElts)
1122     return std::nullopt;
1123 
1124   unsigned Idx = MinNumElts - 1;
1125   // Increment the index if extracting the element after the last active
1126   // predicate element.
1127   if (IsAfter)
1128     ++Idx;
1129 
1130   // Ignore extracts whose index is larger than the known minimum vector
1131   // length. NOTE: This is an artificial constraint where we prefer to
1132   // maintain what the user asked for until an alternative is proven faster.
1133   auto *PgVTy = cast<ScalableVectorType>(Pg->getType());
1134   if (Idx >= PgVTy->getMinNumElements())
1135     return std::nullopt;
1136 
1137   // The intrinsic is extracting a fixed lane so use an extract instead.
1138   auto *IdxTy = Type::getInt64Ty(II.getContext());
1139   auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx));
1140   Extract->insertBefore(&II);
1141   Extract->takeName(&II);
1142   return IC.replaceInstUsesWith(II, Extract);
1143 }
1144 
instCombineSVECondLast(InstCombiner & IC,IntrinsicInst & II)1145 static std::optional<Instruction *> instCombineSVECondLast(InstCombiner &IC,
1146                                                            IntrinsicInst &II) {
1147   // The SIMD&FP variant of CLAST[AB] is significantly faster than the scalar
1148   // integer variant across a variety of micro-architectures. Replace scalar
1149   // integer CLAST[AB] intrinsic with optimal SIMD&FP variant. A simple
1150   // bitcast-to-fp + clast[ab] + bitcast-to-int will cost a cycle or two more
1151   // depending on the micro-architecture, but has been observed as generally
1152   // being faster, particularly when the CLAST[AB] op is a loop-carried
1153   // dependency.
1154   Value *Pg = II.getArgOperand(0);
1155   Value *Fallback = II.getArgOperand(1);
1156   Value *Vec = II.getArgOperand(2);
1157   Type *Ty = II.getType();
1158 
1159   if (!Ty->isIntegerTy())
1160     return std::nullopt;
1161 
1162   Type *FPTy;
1163   switch (cast<IntegerType>(Ty)->getBitWidth()) {
1164   default:
1165     return std::nullopt;
1166   case 16:
1167     FPTy = IC.Builder.getHalfTy();
1168     break;
1169   case 32:
1170     FPTy = IC.Builder.getFloatTy();
1171     break;
1172   case 64:
1173     FPTy = IC.Builder.getDoubleTy();
1174     break;
1175   }
1176 
1177   Value *FPFallBack = IC.Builder.CreateBitCast(Fallback, FPTy);
1178   auto *FPVTy = VectorType::get(
1179       FPTy, cast<VectorType>(Vec->getType())->getElementCount());
1180   Value *FPVec = IC.Builder.CreateBitCast(Vec, FPVTy);
1181   auto *FPII = IC.Builder.CreateIntrinsic(
1182       II.getIntrinsicID(), {FPVec->getType()}, {Pg, FPFallBack, FPVec});
1183   Value *FPIItoInt = IC.Builder.CreateBitCast(FPII, II.getType());
1184   return IC.replaceInstUsesWith(II, FPIItoInt);
1185 }
1186 
instCombineRDFFR(InstCombiner & IC,IntrinsicInst & II)1187 static std::optional<Instruction *> instCombineRDFFR(InstCombiner &IC,
1188                                                      IntrinsicInst &II) {
1189   LLVMContext &Ctx = II.getContext();
1190   // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr
1191   // can work with RDFFR_PP for ptest elimination.
1192   auto *AllPat =
1193       ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
1194   auto *PTrue = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
1195                                            {II.getType()}, {AllPat});
1196   auto *RDFFR =
1197       IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue});
1198   RDFFR->takeName(&II);
1199   return IC.replaceInstUsesWith(II, RDFFR);
1200 }
1201 
1202 static std::optional<Instruction *>
instCombineSVECntElts(InstCombiner & IC,IntrinsicInst & II,unsigned NumElts)1203 instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) {
1204   const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue();
1205 
1206   if (Pattern == AArch64SVEPredPattern::all) {
1207     Constant *StepVal = ConstantInt::get(II.getType(), NumElts);
1208     auto *VScale = IC.Builder.CreateVScale(StepVal);
1209     VScale->takeName(&II);
1210     return IC.replaceInstUsesWith(II, VScale);
1211   }
1212 
1213   unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern);
1214 
1215   return MinNumElts && NumElts >= MinNumElts
1216              ? std::optional<Instruction *>(IC.replaceInstUsesWith(
1217                    II, ConstantInt::get(II.getType(), MinNumElts)))
1218              : std::nullopt;
1219 }
1220 
instCombineSVEPTest(InstCombiner & IC,IntrinsicInst & II)1221 static std::optional<Instruction *> instCombineSVEPTest(InstCombiner &IC,
1222                                                         IntrinsicInst &II) {
1223   Value *PgVal = II.getArgOperand(0);
1224   Value *OpVal = II.getArgOperand(1);
1225 
1226   // PTEST_<FIRST|LAST>(X, X) is equivalent to PTEST_ANY(X, X).
1227   // Later optimizations prefer this form.
1228   if (PgVal == OpVal &&
1229       (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_first ||
1230        II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_last)) {
1231     Value *Ops[] = {PgVal, OpVal};
1232     Type *Tys[] = {PgVal->getType()};
1233 
1234     auto *PTest =
1235         IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptest_any, Tys, Ops);
1236     PTest->takeName(&II);
1237 
1238     return IC.replaceInstUsesWith(II, PTest);
1239   }
1240 
1241   IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(PgVal);
1242   IntrinsicInst *Op = dyn_cast<IntrinsicInst>(OpVal);
1243 
1244   if (!Pg || !Op)
1245     return std::nullopt;
1246 
1247   Intrinsic::ID OpIID = Op->getIntrinsicID();
1248 
1249   if (Pg->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
1250       OpIID == Intrinsic::aarch64_sve_convert_to_svbool &&
1251       Pg->getArgOperand(0)->getType() == Op->getArgOperand(0)->getType()) {
1252     Value *Ops[] = {Pg->getArgOperand(0), Op->getArgOperand(0)};
1253     Type *Tys[] = {Pg->getArgOperand(0)->getType()};
1254 
1255     auto *PTest = IC.Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
1256 
1257     PTest->takeName(&II);
1258     return IC.replaceInstUsesWith(II, PTest);
1259   }
1260 
1261   // Transform PTEST_ANY(X=OP(PG,...), X) -> PTEST_ANY(PG, X)).
1262   // Later optimizations may rewrite sequence to use the flag-setting variant
1263   // of instruction X to remove PTEST.
1264   if ((Pg == Op) && (II.getIntrinsicID() == Intrinsic::aarch64_sve_ptest_any) &&
1265       ((OpIID == Intrinsic::aarch64_sve_brka_z) ||
1266        (OpIID == Intrinsic::aarch64_sve_brkb_z) ||
1267        (OpIID == Intrinsic::aarch64_sve_brkpa_z) ||
1268        (OpIID == Intrinsic::aarch64_sve_brkpb_z) ||
1269        (OpIID == Intrinsic::aarch64_sve_rdffr_z) ||
1270        (OpIID == Intrinsic::aarch64_sve_and_z) ||
1271        (OpIID == Intrinsic::aarch64_sve_bic_z) ||
1272        (OpIID == Intrinsic::aarch64_sve_eor_z) ||
1273        (OpIID == Intrinsic::aarch64_sve_nand_z) ||
1274        (OpIID == Intrinsic::aarch64_sve_nor_z) ||
1275        (OpIID == Intrinsic::aarch64_sve_orn_z) ||
1276        (OpIID == Intrinsic::aarch64_sve_orr_z))) {
1277     Value *Ops[] = {Pg->getArgOperand(0), Pg};
1278     Type *Tys[] = {Pg->getType()};
1279 
1280     auto *PTest = IC.Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
1281     PTest->takeName(&II);
1282 
1283     return IC.replaceInstUsesWith(II, PTest);
1284   }
1285 
1286   return std::nullopt;
1287 }
1288 
1289 template <Intrinsic::ID MulOpc, typename Intrinsic::ID FuseOpc>
1290 static std::optional<Instruction *>
instCombineSVEVectorFuseMulAddSub(InstCombiner & IC,IntrinsicInst & II,bool MergeIntoAddendOp)1291 instCombineSVEVectorFuseMulAddSub(InstCombiner &IC, IntrinsicInst &II,
1292                                   bool MergeIntoAddendOp) {
1293   Value *P = II.getOperand(0);
1294   Value *MulOp0, *MulOp1, *AddendOp, *Mul;
1295   if (MergeIntoAddendOp) {
1296     AddendOp = II.getOperand(1);
1297     Mul = II.getOperand(2);
1298   } else {
1299     AddendOp = II.getOperand(2);
1300     Mul = II.getOperand(1);
1301   }
1302 
1303   if (!match(Mul, m_Intrinsic<MulOpc>(m_Specific(P), m_Value(MulOp0),
1304                                       m_Value(MulOp1))))
1305     return std::nullopt;
1306 
1307   if (!Mul->hasOneUse())
1308     return std::nullopt;
1309 
1310   Instruction *FMFSource = nullptr;
1311   if (II.getType()->isFPOrFPVectorTy()) {
1312     llvm::FastMathFlags FAddFlags = II.getFastMathFlags();
1313     // Stop the combine when the flags on the inputs differ in case dropping
1314     // flags would lead to us missing out on more beneficial optimizations.
1315     if (FAddFlags != cast<CallInst>(Mul)->getFastMathFlags())
1316       return std::nullopt;
1317     if (!FAddFlags.allowContract())
1318       return std::nullopt;
1319     FMFSource = &II;
1320   }
1321 
1322   CallInst *Res;
1323   if (MergeIntoAddendOp)
1324     Res = IC.Builder.CreateIntrinsic(FuseOpc, {II.getType()},
1325                                      {P, AddendOp, MulOp0, MulOp1}, FMFSource);
1326   else
1327     Res = IC.Builder.CreateIntrinsic(FuseOpc, {II.getType()},
1328                                      {P, MulOp0, MulOp1, AddendOp}, FMFSource);
1329 
1330   return IC.replaceInstUsesWith(II, Res);
1331 }
1332 
1333 static std::optional<Instruction *>
instCombineSVELD1(InstCombiner & IC,IntrinsicInst & II,const DataLayout & DL)1334 instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) {
1335   Value *Pred = II.getOperand(0);
1336   Value *PtrOp = II.getOperand(1);
1337   Type *VecTy = II.getType();
1338 
1339   if (isAllActivePredicate(Pred)) {
1340     LoadInst *Load = IC.Builder.CreateLoad(VecTy, PtrOp);
1341     Load->copyMetadata(II);
1342     return IC.replaceInstUsesWith(II, Load);
1343   }
1344 
1345   CallInst *MaskedLoad =
1346       IC.Builder.CreateMaskedLoad(VecTy, PtrOp, PtrOp->getPointerAlignment(DL),
1347                                   Pred, ConstantAggregateZero::get(VecTy));
1348   MaskedLoad->copyMetadata(II);
1349   return IC.replaceInstUsesWith(II, MaskedLoad);
1350 }
1351 
1352 static std::optional<Instruction *>
instCombineSVEST1(InstCombiner & IC,IntrinsicInst & II,const DataLayout & DL)1353 instCombineSVEST1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) {
1354   Value *VecOp = II.getOperand(0);
1355   Value *Pred = II.getOperand(1);
1356   Value *PtrOp = II.getOperand(2);
1357 
1358   if (isAllActivePredicate(Pred)) {
1359     StoreInst *Store = IC.Builder.CreateStore(VecOp, PtrOp);
1360     Store->copyMetadata(II);
1361     return IC.eraseInstFromFunction(II);
1362   }
1363 
1364   CallInst *MaskedStore = IC.Builder.CreateMaskedStore(
1365       VecOp, PtrOp, PtrOp->getPointerAlignment(DL), Pred);
1366   MaskedStore->copyMetadata(II);
1367   return IC.eraseInstFromFunction(II);
1368 }
1369 
intrinsicIDToBinOpCode(unsigned Intrinsic)1370 static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic) {
1371   switch (Intrinsic) {
1372   case Intrinsic::aarch64_sve_fmul_u:
1373     return Instruction::BinaryOps::FMul;
1374   case Intrinsic::aarch64_sve_fadd_u:
1375     return Instruction::BinaryOps::FAdd;
1376   case Intrinsic::aarch64_sve_fsub_u:
1377     return Instruction::BinaryOps::FSub;
1378   default:
1379     return Instruction::BinaryOpsEnd;
1380   }
1381 }
1382 
1383 static std::optional<Instruction *>
instCombineSVEVectorBinOp(InstCombiner & IC,IntrinsicInst & II)1384 instCombineSVEVectorBinOp(InstCombiner &IC, IntrinsicInst &II) {
1385   // Bail due to missing support for ISD::STRICT_ scalable vector operations.
1386   if (II.isStrictFP())
1387     return std::nullopt;
1388 
1389   auto *OpPredicate = II.getOperand(0);
1390   auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID());
1391   if (BinOpCode == Instruction::BinaryOpsEnd ||
1392       !match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
1393                               m_ConstantInt<AArch64SVEPredPattern::all>())))
1394     return std::nullopt;
1395   IRBuilderBase::FastMathFlagGuard FMFGuard(IC.Builder);
1396   IC.Builder.setFastMathFlags(II.getFastMathFlags());
1397   auto BinOp =
1398       IC.Builder.CreateBinOp(BinOpCode, II.getOperand(1), II.getOperand(2));
1399   return IC.replaceInstUsesWith(II, BinOp);
1400 }
1401 
1402 // Canonicalise operations that take an all active predicate (e.g. sve.add ->
1403 // sve.add_u).
instCombineSVEAllActive(IntrinsicInst & II,Intrinsic::ID IID)1404 static std::optional<Instruction *> instCombineSVEAllActive(IntrinsicInst &II,
1405                                                             Intrinsic::ID IID) {
1406   auto *OpPredicate = II.getOperand(0);
1407   if (!match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
1408                               m_ConstantInt<AArch64SVEPredPattern::all>())))
1409     return std::nullopt;
1410 
1411   auto *Mod = II.getModule();
1412   auto *NewDecl = Intrinsic::getDeclaration(Mod, IID, {II.getType()});
1413   II.setCalledFunction(NewDecl);
1414 
1415   return &II;
1416 }
1417 
1418 // Simplify operations where predicate has all inactive lanes or try to replace
1419 // with _u form when all lanes are active
1420 static std::optional<Instruction *>
instCombineSVEAllOrNoActive(InstCombiner & IC,IntrinsicInst & II,Intrinsic::ID IID)1421 instCombineSVEAllOrNoActive(InstCombiner &IC, IntrinsicInst &II,
1422                             Intrinsic::ID IID) {
1423   if (match(II.getOperand(0), m_ZeroInt())) {
1424     //  llvm_ir, pred(0), op1, op2 - Spec says to return op1 when all lanes are
1425     //  inactive for sv[func]_m
1426     return IC.replaceInstUsesWith(II, II.getOperand(1));
1427   }
1428   return instCombineSVEAllActive(II, IID);
1429 }
1430 
instCombineSVEVectorAdd(InstCombiner & IC,IntrinsicInst & II)1431 static std::optional<Instruction *> instCombineSVEVectorAdd(InstCombiner &IC,
1432                                                             IntrinsicInst &II) {
1433   if (auto II_U =
1434           instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_add_u))
1435     return II_U;
1436   if (auto MLA = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
1437                                                    Intrinsic::aarch64_sve_mla>(
1438           IC, II, true))
1439     return MLA;
1440   if (auto MAD = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
1441                                                    Intrinsic::aarch64_sve_mad>(
1442           IC, II, false))
1443     return MAD;
1444   return std::nullopt;
1445 }
1446 
1447 static std::optional<Instruction *>
instCombineSVEVectorFAdd(InstCombiner & IC,IntrinsicInst & II)1448 instCombineSVEVectorFAdd(InstCombiner &IC, IntrinsicInst &II) {
1449   if (auto II_U =
1450           instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fadd_u))
1451     return II_U;
1452   if (auto FMLA =
1453           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1454                                             Intrinsic::aarch64_sve_fmla>(IC, II,
1455                                                                          true))
1456     return FMLA;
1457   if (auto FMAD =
1458           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1459                                             Intrinsic::aarch64_sve_fmad>(IC, II,
1460                                                                          false))
1461     return FMAD;
1462   if (auto FMLA =
1463           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
1464                                             Intrinsic::aarch64_sve_fmla>(IC, II,
1465                                                                          true))
1466     return FMLA;
1467   return std::nullopt;
1468 }
1469 
1470 static std::optional<Instruction *>
instCombineSVEVectorFAddU(InstCombiner & IC,IntrinsicInst & II)1471 instCombineSVEVectorFAddU(InstCombiner &IC, IntrinsicInst &II) {
1472   if (auto FMLA =
1473           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1474                                             Intrinsic::aarch64_sve_fmla>(IC, II,
1475                                                                          true))
1476     return FMLA;
1477   if (auto FMAD =
1478           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1479                                             Intrinsic::aarch64_sve_fmad>(IC, II,
1480                                                                          false))
1481     return FMAD;
1482   if (auto FMLA_U =
1483           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
1484                                             Intrinsic::aarch64_sve_fmla_u>(
1485               IC, II, true))
1486     return FMLA_U;
1487   return instCombineSVEVectorBinOp(IC, II);
1488 }
1489 
1490 static std::optional<Instruction *>
instCombineSVEVectorFSub(InstCombiner & IC,IntrinsicInst & II)1491 instCombineSVEVectorFSub(InstCombiner &IC, IntrinsicInst &II) {
1492   if (auto II_U =
1493           instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fsub_u))
1494     return II_U;
1495   if (auto FMLS =
1496           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1497                                             Intrinsic::aarch64_sve_fmls>(IC, II,
1498                                                                          true))
1499     return FMLS;
1500   if (auto FMSB =
1501           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1502                                             Intrinsic::aarch64_sve_fnmsb>(
1503               IC, II, false))
1504     return FMSB;
1505   if (auto FMLS =
1506           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
1507                                             Intrinsic::aarch64_sve_fmls>(IC, II,
1508                                                                          true))
1509     return FMLS;
1510   return std::nullopt;
1511 }
1512 
1513 static std::optional<Instruction *>
instCombineSVEVectorFSubU(InstCombiner & IC,IntrinsicInst & II)1514 instCombineSVEVectorFSubU(InstCombiner &IC, IntrinsicInst &II) {
1515   if (auto FMLS =
1516           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1517                                             Intrinsic::aarch64_sve_fmls>(IC, II,
1518                                                                          true))
1519     return FMLS;
1520   if (auto FMSB =
1521           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul,
1522                                             Intrinsic::aarch64_sve_fnmsb>(
1523               IC, II, false))
1524     return FMSB;
1525   if (auto FMLS_U =
1526           instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_fmul_u,
1527                                             Intrinsic::aarch64_sve_fmls_u>(
1528               IC, II, true))
1529     return FMLS_U;
1530   return instCombineSVEVectorBinOp(IC, II);
1531 }
1532 
instCombineSVEVectorSub(InstCombiner & IC,IntrinsicInst & II)1533 static std::optional<Instruction *> instCombineSVEVectorSub(InstCombiner &IC,
1534                                                             IntrinsicInst &II) {
1535   if (auto II_U =
1536           instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_sub_u))
1537     return II_U;
1538   if (auto MLS = instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul,
1539                                                    Intrinsic::aarch64_sve_mls>(
1540           IC, II, true))
1541     return MLS;
1542   return std::nullopt;
1543 }
1544 
instCombineSVEVectorMul(InstCombiner & IC,IntrinsicInst & II,Intrinsic::ID IID)1545 static std::optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC,
1546                                                             IntrinsicInst &II,
1547                                                             Intrinsic::ID IID) {
1548   auto *OpPredicate = II.getOperand(0);
1549   auto *OpMultiplicand = II.getOperand(1);
1550   auto *OpMultiplier = II.getOperand(2);
1551 
1552   // Return true if a given instruction is a unit splat value, false otherwise.
1553   auto IsUnitSplat = [](auto *I) {
1554     auto *SplatValue = getSplatValue(I);
1555     if (!SplatValue)
1556       return false;
1557     return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
1558   };
1559 
1560   // Return true if a given instruction is an aarch64_sve_dup intrinsic call
1561   // with a unit splat value, false otherwise.
1562   auto IsUnitDup = [](auto *I) {
1563     auto *IntrI = dyn_cast<IntrinsicInst>(I);
1564     if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup)
1565       return false;
1566 
1567     auto *SplatValue = IntrI->getOperand(2);
1568     return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
1569   };
1570 
1571   if (IsUnitSplat(OpMultiplier)) {
1572     // [f]mul pg %n, (dupx 1) => %n
1573     OpMultiplicand->takeName(&II);
1574     return IC.replaceInstUsesWith(II, OpMultiplicand);
1575   } else if (IsUnitDup(OpMultiplier)) {
1576     // [f]mul pg %n, (dup pg 1) => %n
1577     auto *DupInst = cast<IntrinsicInst>(OpMultiplier);
1578     auto *DupPg = DupInst->getOperand(1);
1579     // TODO: this is naive. The optimization is still valid if DupPg
1580     // 'encompasses' OpPredicate, not only if they're the same predicate.
1581     if (OpPredicate == DupPg) {
1582       OpMultiplicand->takeName(&II);
1583       return IC.replaceInstUsesWith(II, OpMultiplicand);
1584     }
1585   }
1586 
1587   return instCombineSVEVectorBinOp(IC, II);
1588 }
1589 
instCombineSVEUnpack(InstCombiner & IC,IntrinsicInst & II)1590 static std::optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC,
1591                                                          IntrinsicInst &II) {
1592   Value *UnpackArg = II.getArgOperand(0);
1593   auto *RetTy = cast<ScalableVectorType>(II.getType());
1594   bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi ||
1595                   II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo;
1596 
1597   // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X))
1598   // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X))
1599   if (auto *ScalarArg = getSplatValue(UnpackArg)) {
1600     ScalarArg =
1601         IC.Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned);
1602     Value *NewVal =
1603         IC.Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg);
1604     NewVal->takeName(&II);
1605     return IC.replaceInstUsesWith(II, NewVal);
1606   }
1607 
1608   return std::nullopt;
1609 }
instCombineSVETBL(InstCombiner & IC,IntrinsicInst & II)1610 static std::optional<Instruction *> instCombineSVETBL(InstCombiner &IC,
1611                                                       IntrinsicInst &II) {
1612   auto *OpVal = II.getOperand(0);
1613   auto *OpIndices = II.getOperand(1);
1614   VectorType *VTy = cast<VectorType>(II.getType());
1615 
1616   // Check whether OpIndices is a constant splat value < minimal element count
1617   // of result.
1618   auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices));
1619   if (!SplatValue ||
1620       SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue()))
1621     return std::nullopt;
1622 
1623   // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to
1624   // splat_vector(extractelement(OpVal, SplatValue)) for further optimization.
1625   auto *Extract = IC.Builder.CreateExtractElement(OpVal, SplatValue);
1626   auto *VectorSplat =
1627       IC.Builder.CreateVectorSplat(VTy->getElementCount(), Extract);
1628 
1629   VectorSplat->takeName(&II);
1630   return IC.replaceInstUsesWith(II, VectorSplat);
1631 }
1632 
instCombineSVEZip(InstCombiner & IC,IntrinsicInst & II)1633 static std::optional<Instruction *> instCombineSVEZip(InstCombiner &IC,
1634                                                       IntrinsicInst &II) {
1635   // zip1(uzp1(A, B), uzp2(A, B)) --> A
1636   // zip2(uzp1(A, B), uzp2(A, B)) --> B
1637   Value *A, *B;
1638   if (match(II.getArgOperand(0),
1639             m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(m_Value(A), m_Value(B))) &&
1640       match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>(
1641                                      m_Specific(A), m_Specific(B))))
1642     return IC.replaceInstUsesWith(
1643         II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B));
1644 
1645   return std::nullopt;
1646 }
1647 
1648 static std::optional<Instruction *>
instCombineLD1GatherIndex(InstCombiner & IC,IntrinsicInst & II)1649 instCombineLD1GatherIndex(InstCombiner &IC, IntrinsicInst &II) {
1650   Value *Mask = II.getOperand(0);
1651   Value *BasePtr = II.getOperand(1);
1652   Value *Index = II.getOperand(2);
1653   Type *Ty = II.getType();
1654   Value *PassThru = ConstantAggregateZero::get(Ty);
1655 
1656   // Contiguous gather => masked load.
1657   // (sve.ld1.gather.index Mask BasePtr (sve.index IndexBase 1))
1658   // => (masked.load (gep BasePtr IndexBase) Align Mask zeroinitializer)
1659   Value *IndexBase;
1660   if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
1661                        m_Value(IndexBase), m_SpecificInt(1)))) {
1662     Align Alignment =
1663         BasePtr->getPointerAlignment(II.getModule()->getDataLayout());
1664 
1665     Type *VecPtrTy = PointerType::getUnqual(Ty);
1666     Value *Ptr = IC.Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(),
1667                                       BasePtr, IndexBase);
1668     Ptr = IC.Builder.CreateBitCast(Ptr, VecPtrTy);
1669     CallInst *MaskedLoad =
1670         IC.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru);
1671     MaskedLoad->takeName(&II);
1672     return IC.replaceInstUsesWith(II, MaskedLoad);
1673   }
1674 
1675   return std::nullopt;
1676 }
1677 
1678 static std::optional<Instruction *>
instCombineST1ScatterIndex(InstCombiner & IC,IntrinsicInst & II)1679 instCombineST1ScatterIndex(InstCombiner &IC, IntrinsicInst &II) {
1680   Value *Val = II.getOperand(0);
1681   Value *Mask = II.getOperand(1);
1682   Value *BasePtr = II.getOperand(2);
1683   Value *Index = II.getOperand(3);
1684   Type *Ty = Val->getType();
1685 
1686   // Contiguous scatter => masked store.
1687   // (sve.st1.scatter.index Value Mask BasePtr (sve.index IndexBase 1))
1688   // => (masked.store Value (gep BasePtr IndexBase) Align Mask)
1689   Value *IndexBase;
1690   if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
1691                        m_Value(IndexBase), m_SpecificInt(1)))) {
1692     Align Alignment =
1693         BasePtr->getPointerAlignment(II.getModule()->getDataLayout());
1694 
1695     Value *Ptr = IC.Builder.CreateGEP(cast<VectorType>(Ty)->getElementType(),
1696                                       BasePtr, IndexBase);
1697     Type *VecPtrTy = PointerType::getUnqual(Ty);
1698     Ptr = IC.Builder.CreateBitCast(Ptr, VecPtrTy);
1699 
1700     (void)IC.Builder.CreateMaskedStore(Val, Ptr, Alignment, Mask);
1701 
1702     return IC.eraseInstFromFunction(II);
1703   }
1704 
1705   return std::nullopt;
1706 }
1707 
instCombineSVESDIV(InstCombiner & IC,IntrinsicInst & II)1708 static std::optional<Instruction *> instCombineSVESDIV(InstCombiner &IC,
1709                                                        IntrinsicInst &II) {
1710   Type *Int32Ty = IC.Builder.getInt32Ty();
1711   Value *Pred = II.getOperand(0);
1712   Value *Vec = II.getOperand(1);
1713   Value *DivVec = II.getOperand(2);
1714 
1715   Value *SplatValue = getSplatValue(DivVec);
1716   ConstantInt *SplatConstantInt = dyn_cast_or_null<ConstantInt>(SplatValue);
1717   if (!SplatConstantInt)
1718     return std::nullopt;
1719   APInt Divisor = SplatConstantInt->getValue();
1720 
1721   if (Divisor.isPowerOf2()) {
1722     Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
1723     auto ASRD = IC.Builder.CreateIntrinsic(
1724         Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
1725     return IC.replaceInstUsesWith(II, ASRD);
1726   }
1727   if (Divisor.isNegatedPowerOf2()) {
1728     Divisor.negate();
1729     Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
1730     auto ASRD = IC.Builder.CreateIntrinsic(
1731         Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
1732     auto NEG = IC.Builder.CreateIntrinsic(
1733         Intrinsic::aarch64_sve_neg, {ASRD->getType()}, {ASRD, Pred, ASRD});
1734     return IC.replaceInstUsesWith(II, NEG);
1735   }
1736 
1737   return std::nullopt;
1738 }
1739 
SimplifyValuePattern(SmallVector<Value * > & Vec,bool AllowPoison)1740 bool SimplifyValuePattern(SmallVector<Value *> &Vec, bool AllowPoison) {
1741   size_t VecSize = Vec.size();
1742   if (VecSize == 1)
1743     return true;
1744   if (!isPowerOf2_64(VecSize))
1745     return false;
1746   size_t HalfVecSize = VecSize / 2;
1747 
1748   for (auto LHS = Vec.begin(), RHS = Vec.begin() + HalfVecSize;
1749        RHS != Vec.end(); LHS++, RHS++) {
1750     if (*LHS != nullptr && *RHS != nullptr) {
1751       if (*LHS == *RHS)
1752         continue;
1753       else
1754         return false;
1755     }
1756     if (!AllowPoison)
1757       return false;
1758     if (*LHS == nullptr && *RHS != nullptr)
1759       *LHS = *RHS;
1760   }
1761 
1762   Vec.resize(HalfVecSize);
1763   SimplifyValuePattern(Vec, AllowPoison);
1764   return true;
1765 }
1766 
1767 // Try to simplify dupqlane patterns like dupqlane(f32 A, f32 B, f32 A, f32 B)
1768 // to dupqlane(f64(C)) where C is A concatenated with B
instCombineSVEDupqLane(InstCombiner & IC,IntrinsicInst & II)1769 static std::optional<Instruction *> instCombineSVEDupqLane(InstCombiner &IC,
1770                                                            IntrinsicInst &II) {
1771   Value *CurrentInsertElt = nullptr, *Default = nullptr;
1772   if (!match(II.getOperand(0),
1773              m_Intrinsic<Intrinsic::vector_insert>(
1774                  m_Value(Default), m_Value(CurrentInsertElt), m_Value())) ||
1775       !isa<FixedVectorType>(CurrentInsertElt->getType()))
1776     return std::nullopt;
1777   auto IIScalableTy = cast<ScalableVectorType>(II.getType());
1778 
1779   // Insert the scalars into a container ordered by InsertElement index
1780   SmallVector<Value *> Elts(IIScalableTy->getMinNumElements(), nullptr);
1781   while (auto InsertElt = dyn_cast<InsertElementInst>(CurrentInsertElt)) {
1782     auto Idx = cast<ConstantInt>(InsertElt->getOperand(2));
1783     Elts[Idx->getValue().getZExtValue()] = InsertElt->getOperand(1);
1784     CurrentInsertElt = InsertElt->getOperand(0);
1785   }
1786 
1787   bool AllowPoison =
1788       isa<PoisonValue>(CurrentInsertElt) && isa<PoisonValue>(Default);
1789   if (!SimplifyValuePattern(Elts, AllowPoison))
1790     return std::nullopt;
1791 
1792   // Rebuild the simplified chain of InsertElements. e.g. (a, b, a, b) as (a, b)
1793   Value *InsertEltChain = PoisonValue::get(CurrentInsertElt->getType());
1794   for (size_t I = 0; I < Elts.size(); I++) {
1795     if (Elts[I] == nullptr)
1796       continue;
1797     InsertEltChain = IC.Builder.CreateInsertElement(InsertEltChain, Elts[I],
1798                                                     IC.Builder.getInt64(I));
1799   }
1800   if (InsertEltChain == nullptr)
1801     return std::nullopt;
1802 
1803   // Splat the simplified sequence, e.g. (f16 a, f16 b, f16 c, f16 d) as one i64
1804   // value or (f16 a, f16 b) as one i32 value. This requires an InsertSubvector
1805   // be bitcast to a type wide enough to fit the sequence, be splatted, and then
1806   // be narrowed back to the original type.
1807   unsigned PatternWidth = IIScalableTy->getScalarSizeInBits() * Elts.size();
1808   unsigned PatternElementCount = IIScalableTy->getScalarSizeInBits() *
1809                                  IIScalableTy->getMinNumElements() /
1810                                  PatternWidth;
1811 
1812   IntegerType *WideTy = IC.Builder.getIntNTy(PatternWidth);
1813   auto *WideScalableTy = ScalableVectorType::get(WideTy, PatternElementCount);
1814   auto *WideShuffleMaskTy =
1815       ScalableVectorType::get(IC.Builder.getInt32Ty(), PatternElementCount);
1816 
1817   auto ZeroIdx = ConstantInt::get(IC.Builder.getInt64Ty(), APInt(64, 0));
1818   auto InsertSubvector = IC.Builder.CreateInsertVector(
1819       II.getType(), PoisonValue::get(II.getType()), InsertEltChain, ZeroIdx);
1820   auto WideBitcast =
1821       IC.Builder.CreateBitOrPointerCast(InsertSubvector, WideScalableTy);
1822   auto WideShuffleMask = ConstantAggregateZero::get(WideShuffleMaskTy);
1823   auto WideShuffle = IC.Builder.CreateShuffleVector(
1824       WideBitcast, PoisonValue::get(WideScalableTy), WideShuffleMask);
1825   auto NarrowBitcast =
1826       IC.Builder.CreateBitOrPointerCast(WideShuffle, II.getType());
1827 
1828   return IC.replaceInstUsesWith(II, NarrowBitcast);
1829 }
1830 
instCombineMaxMinNM(InstCombiner & IC,IntrinsicInst & II)1831 static std::optional<Instruction *> instCombineMaxMinNM(InstCombiner &IC,
1832                                                         IntrinsicInst &II) {
1833   Value *A = II.getArgOperand(0);
1834   Value *B = II.getArgOperand(1);
1835   if (A == B)
1836     return IC.replaceInstUsesWith(II, A);
1837 
1838   return std::nullopt;
1839 }
1840 
instCombineSVESrshl(InstCombiner & IC,IntrinsicInst & II)1841 static std::optional<Instruction *> instCombineSVESrshl(InstCombiner &IC,
1842                                                         IntrinsicInst &II) {
1843   Value *Pred = II.getOperand(0);
1844   Value *Vec = II.getOperand(1);
1845   Value *Shift = II.getOperand(2);
1846 
1847   // Convert SRSHL into the simpler LSL intrinsic when fed by an ABS intrinsic.
1848   Value *AbsPred, *MergedValue;
1849   if (!match(Vec, m_Intrinsic<Intrinsic::aarch64_sve_sqabs>(
1850                       m_Value(MergedValue), m_Value(AbsPred), m_Value())) &&
1851       !match(Vec, m_Intrinsic<Intrinsic::aarch64_sve_abs>(
1852                       m_Value(MergedValue), m_Value(AbsPred), m_Value())))
1853 
1854     return std::nullopt;
1855 
1856   // Transform is valid if any of the following are true:
1857   // * The ABS merge value is an undef or non-negative
1858   // * The ABS predicate is all active
1859   // * The ABS predicate and the SRSHL predicates are the same
1860   if (!isa<UndefValue>(MergedValue) && !match(MergedValue, m_NonNegative()) &&
1861       AbsPred != Pred && !isAllActivePredicate(AbsPred))
1862     return std::nullopt;
1863 
1864   // Only valid when the shift amount is non-negative, otherwise the rounding
1865   // behaviour of SRSHL cannot be ignored.
1866   if (!match(Shift, m_NonNegative()))
1867     return std::nullopt;
1868 
1869   auto LSL = IC.Builder.CreateIntrinsic(Intrinsic::aarch64_sve_lsl,
1870                                         {II.getType()}, {Pred, Vec, Shift});
1871 
1872   return IC.replaceInstUsesWith(II, LSL);
1873 }
1874 
1875 std::optional<Instruction *>
instCombineIntrinsic(InstCombiner & IC,IntrinsicInst & II) const1876 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
1877                                      IntrinsicInst &II) const {
1878   Intrinsic::ID IID = II.getIntrinsicID();
1879   switch (IID) {
1880   default:
1881     break;
1882   case Intrinsic::aarch64_neon_fmaxnm:
1883   case Intrinsic::aarch64_neon_fminnm:
1884     return instCombineMaxMinNM(IC, II);
1885   case Intrinsic::aarch64_sve_convert_from_svbool:
1886     return instCombineConvertFromSVBool(IC, II);
1887   case Intrinsic::aarch64_sve_dup:
1888     return instCombineSVEDup(IC, II);
1889   case Intrinsic::aarch64_sve_dup_x:
1890     return instCombineSVEDupX(IC, II);
1891   case Intrinsic::aarch64_sve_cmpne:
1892   case Intrinsic::aarch64_sve_cmpne_wide:
1893     return instCombineSVECmpNE(IC, II);
1894   case Intrinsic::aarch64_sve_rdffr:
1895     return instCombineRDFFR(IC, II);
1896   case Intrinsic::aarch64_sve_lasta:
1897   case Intrinsic::aarch64_sve_lastb:
1898     return instCombineSVELast(IC, II);
1899   case Intrinsic::aarch64_sve_clasta_n:
1900   case Intrinsic::aarch64_sve_clastb_n:
1901     return instCombineSVECondLast(IC, II);
1902   case Intrinsic::aarch64_sve_cntd:
1903     return instCombineSVECntElts(IC, II, 2);
1904   case Intrinsic::aarch64_sve_cntw:
1905     return instCombineSVECntElts(IC, II, 4);
1906   case Intrinsic::aarch64_sve_cnth:
1907     return instCombineSVECntElts(IC, II, 8);
1908   case Intrinsic::aarch64_sve_cntb:
1909     return instCombineSVECntElts(IC, II, 16);
1910   case Intrinsic::aarch64_sve_ptest_any:
1911   case Intrinsic::aarch64_sve_ptest_first:
1912   case Intrinsic::aarch64_sve_ptest_last:
1913     return instCombineSVEPTest(IC, II);
1914   case Intrinsic::aarch64_sve_fabd:
1915     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fabd_u);
1916   case Intrinsic::aarch64_sve_fadd:
1917     return instCombineSVEVectorFAdd(IC, II);
1918   case Intrinsic::aarch64_sve_fadd_u:
1919     return instCombineSVEVectorFAddU(IC, II);
1920   case Intrinsic::aarch64_sve_fdiv:
1921     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fdiv_u);
1922   case Intrinsic::aarch64_sve_fmax:
1923     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmax_u);
1924   case Intrinsic::aarch64_sve_fmaxnm:
1925     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmaxnm_u);
1926   case Intrinsic::aarch64_sve_fmin:
1927     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmin_u);
1928   case Intrinsic::aarch64_sve_fminnm:
1929     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fminnm_u);
1930   case Intrinsic::aarch64_sve_fmla:
1931     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmla_u);
1932   case Intrinsic::aarch64_sve_fmls:
1933     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmls_u);
1934   case Intrinsic::aarch64_sve_fmul:
1935     if (auto II_U =
1936             instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmul_u))
1937       return II_U;
1938     return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_fmul_u);
1939   case Intrinsic::aarch64_sve_fmul_u:
1940     return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_fmul_u);
1941   case Intrinsic::aarch64_sve_fmulx:
1942     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fmulx_u);
1943   case Intrinsic::aarch64_sve_fnmla:
1944     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fnmla_u);
1945   case Intrinsic::aarch64_sve_fnmls:
1946     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_fnmls_u);
1947   case Intrinsic::aarch64_sve_fsub:
1948     return instCombineSVEVectorFSub(IC, II);
1949   case Intrinsic::aarch64_sve_fsub_u:
1950     return instCombineSVEVectorFSubU(IC, II);
1951   case Intrinsic::aarch64_sve_add:
1952     return instCombineSVEVectorAdd(IC, II);
1953   case Intrinsic::aarch64_sve_add_u:
1954     return instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul_u,
1955                                              Intrinsic::aarch64_sve_mla_u>(
1956         IC, II, true);
1957   case Intrinsic::aarch64_sve_mla:
1958     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_mla_u);
1959   case Intrinsic::aarch64_sve_mls:
1960     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_mls_u);
1961   case Intrinsic::aarch64_sve_mul:
1962     if (auto II_U =
1963             instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_mul_u))
1964       return II_U;
1965     return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_mul_u);
1966   case Intrinsic::aarch64_sve_mul_u:
1967     return instCombineSVEVectorMul(IC, II, Intrinsic::aarch64_sve_mul_u);
1968   case Intrinsic::aarch64_sve_sabd:
1969     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_sabd_u);
1970   case Intrinsic::aarch64_sve_smax:
1971     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_smax_u);
1972   case Intrinsic::aarch64_sve_smin:
1973     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_smin_u);
1974   case Intrinsic::aarch64_sve_smulh:
1975     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_smulh_u);
1976   case Intrinsic::aarch64_sve_sub:
1977     return instCombineSVEVectorSub(IC, II);
1978   case Intrinsic::aarch64_sve_sub_u:
1979     return instCombineSVEVectorFuseMulAddSub<Intrinsic::aarch64_sve_mul_u,
1980                                              Intrinsic::aarch64_sve_mls_u>(
1981         IC, II, true);
1982   case Intrinsic::aarch64_sve_uabd:
1983     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_uabd_u);
1984   case Intrinsic::aarch64_sve_umax:
1985     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_umax_u);
1986   case Intrinsic::aarch64_sve_umin:
1987     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_umin_u);
1988   case Intrinsic::aarch64_sve_umulh:
1989     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_umulh_u);
1990   case Intrinsic::aarch64_sve_asr:
1991     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_asr_u);
1992   case Intrinsic::aarch64_sve_lsl:
1993     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_lsl_u);
1994   case Intrinsic::aarch64_sve_lsr:
1995     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_lsr_u);
1996   case Intrinsic::aarch64_sve_and:
1997     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_and_u);
1998   case Intrinsic::aarch64_sve_bic:
1999     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_bic_u);
2000   case Intrinsic::aarch64_sve_eor:
2001     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_eor_u);
2002   case Intrinsic::aarch64_sve_orr:
2003     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_orr_u);
2004   case Intrinsic::aarch64_sve_sqsub:
2005     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_sqsub_u);
2006   case Intrinsic::aarch64_sve_uqsub:
2007     return instCombineSVEAllOrNoActive(IC, II, Intrinsic::aarch64_sve_uqsub_u);
2008   case Intrinsic::aarch64_sve_tbl:
2009     return instCombineSVETBL(IC, II);
2010   case Intrinsic::aarch64_sve_uunpkhi:
2011   case Intrinsic::aarch64_sve_uunpklo:
2012   case Intrinsic::aarch64_sve_sunpkhi:
2013   case Intrinsic::aarch64_sve_sunpklo:
2014     return instCombineSVEUnpack(IC, II);
2015   case Intrinsic::aarch64_sve_zip1:
2016   case Intrinsic::aarch64_sve_zip2:
2017     return instCombineSVEZip(IC, II);
2018   case Intrinsic::aarch64_sve_ld1_gather_index:
2019     return instCombineLD1GatherIndex(IC, II);
2020   case Intrinsic::aarch64_sve_st1_scatter_index:
2021     return instCombineST1ScatterIndex(IC, II);
2022   case Intrinsic::aarch64_sve_ld1:
2023     return instCombineSVELD1(IC, II, DL);
2024   case Intrinsic::aarch64_sve_st1:
2025     return instCombineSVEST1(IC, II, DL);
2026   case Intrinsic::aarch64_sve_sdiv:
2027     return instCombineSVESDIV(IC, II);
2028   case Intrinsic::aarch64_sve_sel:
2029     return instCombineSVESel(IC, II);
2030   case Intrinsic::aarch64_sve_srshl:
2031     return instCombineSVESrshl(IC, II);
2032   case Intrinsic::aarch64_sve_dupq_lane:
2033     return instCombineSVEDupqLane(IC, II);
2034   }
2035 
2036   return std::nullopt;
2037 }
2038 
simplifyDemandedVectorEltsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt OrigDemandedElts,APInt & UndefElts,APInt & UndefElts2,APInt & UndefElts3,std::function<void (Instruction *,unsigned,APInt,APInt &)> SimplifyAndSetOp) const2039 std::optional<Value *> AArch64TTIImpl::simplifyDemandedVectorEltsIntrinsic(
2040     InstCombiner &IC, IntrinsicInst &II, APInt OrigDemandedElts,
2041     APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
2042     std::function<void(Instruction *, unsigned, APInt, APInt &)>
2043         SimplifyAndSetOp) const {
2044   switch (II.getIntrinsicID()) {
2045   default:
2046     break;
2047   case Intrinsic::aarch64_neon_fcvtxn:
2048   case Intrinsic::aarch64_neon_rshrn:
2049   case Intrinsic::aarch64_neon_sqrshrn:
2050   case Intrinsic::aarch64_neon_sqrshrun:
2051   case Intrinsic::aarch64_neon_sqshrn:
2052   case Intrinsic::aarch64_neon_sqshrun:
2053   case Intrinsic::aarch64_neon_sqxtn:
2054   case Intrinsic::aarch64_neon_sqxtun:
2055   case Intrinsic::aarch64_neon_uqrshrn:
2056   case Intrinsic::aarch64_neon_uqshrn:
2057   case Intrinsic::aarch64_neon_uqxtn:
2058     SimplifyAndSetOp(&II, 0, OrigDemandedElts, UndefElts);
2059     break;
2060   }
2061 
2062   return std::nullopt;
2063 }
2064 
2065 TypeSize
getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const2066 AArch64TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
2067   switch (K) {
2068   case TargetTransformInfo::RGK_Scalar:
2069     return TypeSize::getFixed(64);
2070   case TargetTransformInfo::RGK_FixedWidthVector:
2071     if (!ST->isNeonAvailable() && !EnableFixedwidthAutovecInStreamingMode)
2072       return TypeSize::getFixed(0);
2073 
2074     if (ST->hasSVE())
2075       return TypeSize::getFixed(
2076           std::max(ST->getMinSVEVectorSizeInBits(), 128u));
2077 
2078     return TypeSize::getFixed(ST->hasNEON() ? 128 : 0);
2079   case TargetTransformInfo::RGK_ScalableVector:
2080     if (!ST->isSVEAvailable() && !EnableScalableAutovecInStreamingMode)
2081       return TypeSize::getScalable(0);
2082 
2083     return TypeSize::getScalable(ST->hasSVE() ? 128 : 0);
2084   }
2085   llvm_unreachable("Unsupported register kind");
2086 }
2087 
isWideningInstruction(Type * DstTy,unsigned Opcode,ArrayRef<const Value * > Args,Type * SrcOverrideTy)2088 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
2089                                            ArrayRef<const Value *> Args,
2090                                            Type *SrcOverrideTy) {
2091   // A helper that returns a vector type from the given type. The number of
2092   // elements in type Ty determines the vector width.
2093   auto toVectorTy = [&](Type *ArgTy) {
2094     return VectorType::get(ArgTy->getScalarType(),
2095                            cast<VectorType>(DstTy)->getElementCount());
2096   };
2097 
2098   // Exit early if DstTy is not a vector type whose elements are one of [i16,
2099   // i32, i64]. SVE doesn't generally have the same set of instructions to
2100   // perform an extend with the add/sub/mul. There are SMULLB style
2101   // instructions, but they operate on top/bottom, requiring some sort of lane
2102   // interleaving to be used with zext/sext.
2103   unsigned DstEltSize = DstTy->getScalarSizeInBits();
2104   if (!useNeonVector(DstTy) || Args.size() != 2 ||
2105       (DstEltSize != 16 && DstEltSize != 32 && DstEltSize != 64))
2106     return false;
2107 
2108   // Determine if the operation has a widening variant. We consider both the
2109   // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
2110   // instructions.
2111   //
2112   // TODO: Add additional widening operations (e.g., shl, etc.) once we
2113   //       verify that their extending operands are eliminated during code
2114   //       generation.
2115   Type *SrcTy = SrcOverrideTy;
2116   switch (Opcode) {
2117   case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
2118   case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
2119     // The second operand needs to be an extend
2120     if (isa<SExtInst>(Args[1]) || isa<ZExtInst>(Args[1])) {
2121       if (!SrcTy)
2122         SrcTy =
2123             toVectorTy(cast<Instruction>(Args[1])->getOperand(0)->getType());
2124     } else
2125       return false;
2126     break;
2127   case Instruction::Mul: { // SMULL(2), UMULL(2)
2128     // Both operands need to be extends of the same type.
2129     if ((isa<SExtInst>(Args[0]) && isa<SExtInst>(Args[1])) ||
2130         (isa<ZExtInst>(Args[0]) && isa<ZExtInst>(Args[1]))) {
2131       if (!SrcTy)
2132         SrcTy =
2133             toVectorTy(cast<Instruction>(Args[0])->getOperand(0)->getType());
2134     } else if (isa<ZExtInst>(Args[0]) || isa<ZExtInst>(Args[1])) {
2135       // If one of the operands is a Zext and the other has enough zero bits to
2136       // be treated as unsigned, we can still general a umull, meaning the zext
2137       // is free.
2138       KnownBits Known =
2139           computeKnownBits(isa<ZExtInst>(Args[0]) ? Args[1] : Args[0], DL);
2140       if (Args[0]->getType()->getScalarSizeInBits() -
2141               Known.Zero.countLeadingOnes() >
2142           DstTy->getScalarSizeInBits() / 2)
2143         return false;
2144       if (!SrcTy)
2145         SrcTy = toVectorTy(Type::getIntNTy(DstTy->getContext(),
2146                                            DstTy->getScalarSizeInBits() / 2));
2147     } else
2148       return false;
2149     break;
2150   }
2151   default:
2152     return false;
2153   }
2154 
2155   // Legalize the destination type and ensure it can be used in a widening
2156   // operation.
2157   auto DstTyL = getTypeLegalizationCost(DstTy);
2158   if (!DstTyL.second.isVector() || DstEltSize != DstTy->getScalarSizeInBits())
2159     return false;
2160 
2161   // Legalize the source type and ensure it can be used in a widening
2162   // operation.
2163   assert(SrcTy && "Expected some SrcTy");
2164   auto SrcTyL = getTypeLegalizationCost(SrcTy);
2165   unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
2166   if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
2167     return false;
2168 
2169   // Get the total number of vector elements in the legalized types.
2170   InstructionCost NumDstEls =
2171       DstTyL.first * DstTyL.second.getVectorMinNumElements();
2172   InstructionCost NumSrcEls =
2173       SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
2174 
2175   // Return true if the legalized types have the same number of vector elements
2176   // and the destination element type size is twice that of the source type.
2177   return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstEltSize;
2178 }
2179 
2180 // s/urhadd instructions implement the following pattern, making the
2181 // extends free:
2182 //   %x = add ((zext i8 -> i16), 1)
2183 //   %y = (zext i8 -> i16)
2184 //   trunc i16 (lshr (add %x, %y), 1) -> i8
2185 //
isExtPartOfAvgExpr(const Instruction * ExtUser,Type * Dst,Type * Src)2186 bool AArch64TTIImpl::isExtPartOfAvgExpr(const Instruction *ExtUser, Type *Dst,
2187                                         Type *Src) {
2188   // The source should be a legal vector type.
2189   if (!Src->isVectorTy() || !TLI->isTypeLegal(TLI->getValueType(DL, Src)) ||
2190       (Src->isScalableTy() && !ST->hasSVE2()))
2191     return false;
2192 
2193   if (ExtUser->getOpcode() != Instruction::Add || !ExtUser->hasOneUse())
2194     return false;
2195 
2196   // Look for trunc/shl/add before trying to match the pattern.
2197   const Instruction *Add = ExtUser;
2198   auto *AddUser =
2199       dyn_cast_or_null<Instruction>(Add->getUniqueUndroppableUser());
2200   if (AddUser && AddUser->getOpcode() == Instruction::Add)
2201     Add = AddUser;
2202 
2203   auto *Shr = dyn_cast_or_null<Instruction>(Add->getUniqueUndroppableUser());
2204   if (!Shr || Shr->getOpcode() != Instruction::LShr)
2205     return false;
2206 
2207   auto *Trunc = dyn_cast_or_null<Instruction>(Shr->getUniqueUndroppableUser());
2208   if (!Trunc || Trunc->getOpcode() != Instruction::Trunc ||
2209       Src->getScalarSizeInBits() !=
2210           cast<CastInst>(Trunc)->getDestTy()->getScalarSizeInBits())
2211     return false;
2212 
2213   // Try to match the whole pattern. Ext could be either the first or second
2214   // m_ZExtOrSExt matched.
2215   Instruction *Ex1, *Ex2;
2216   if (!(match(Add, m_c_Add(m_Instruction(Ex1),
2217                            m_c_Add(m_Instruction(Ex2), m_SpecificInt(1))))))
2218     return false;
2219 
2220   // Ensure both extends are of the same type
2221   if (match(Ex1, m_ZExtOrSExt(m_Value())) &&
2222       Ex1->getOpcode() == Ex2->getOpcode())
2223     return true;
2224 
2225   return false;
2226 }
2227 
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,TTI::CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)2228 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
2229                                                  Type *Src,
2230                                                  TTI::CastContextHint CCH,
2231                                                  TTI::TargetCostKind CostKind,
2232                                                  const Instruction *I) {
2233   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2234   assert(ISD && "Invalid opcode");
2235   // If the cast is observable, and it is used by a widening instruction (e.g.,
2236   // uaddl, saddw, etc.), it may be free.
2237   if (I && I->hasOneUser()) {
2238     auto *SingleUser = cast<Instruction>(*I->user_begin());
2239     SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
2240     if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands, Src)) {
2241       // For adds only count the second operand as free if both operands are
2242       // extends but not the same operation. (i.e both operands are not free in
2243       // add(sext, zext)).
2244       if (SingleUser->getOpcode() == Instruction::Add) {
2245         if (I == SingleUser->getOperand(1) ||
2246             (isa<CastInst>(SingleUser->getOperand(1)) &&
2247              cast<CastInst>(SingleUser->getOperand(1))->getOpcode() == Opcode))
2248           return 0;
2249       } else // Others are free so long as isWideningInstruction returned true.
2250         return 0;
2251     }
2252 
2253     // The cast will be free for the s/urhadd instructions
2254     if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
2255         isExtPartOfAvgExpr(SingleUser, Dst, Src))
2256       return 0;
2257   }
2258 
2259   // TODO: Allow non-throughput costs that aren't binary.
2260   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
2261     if (CostKind != TTI::TCK_RecipThroughput)
2262       return Cost == 0 ? 0 : 1;
2263     return Cost;
2264   };
2265 
2266   EVT SrcTy = TLI->getValueType(DL, Src);
2267   EVT DstTy = TLI->getValueType(DL, Dst);
2268 
2269   if (!SrcTy.isSimple() || !DstTy.isSimple())
2270     return AdjustCost(
2271         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2272 
2273   static const TypeConversionCostTblEntry
2274   ConversionTbl[] = {
2275     { ISD::TRUNCATE, MVT::v2i8,   MVT::v2i64,  1},  // xtn
2276     { ISD::TRUNCATE, MVT::v2i16,  MVT::v2i64,  1},  // xtn
2277     { ISD::TRUNCATE, MVT::v2i32,  MVT::v2i64,  1},  // xtn
2278     { ISD::TRUNCATE, MVT::v4i8,   MVT::v4i32,  1},  // xtn
2279     { ISD::TRUNCATE, MVT::v4i8,   MVT::v4i64,  3},  // 2 xtn + 1 uzp1
2280     { ISD::TRUNCATE, MVT::v4i16,  MVT::v4i32,  1},  // xtn
2281     { ISD::TRUNCATE, MVT::v4i16,  MVT::v4i64,  2},  // 1 uzp1 + 1 xtn
2282     { ISD::TRUNCATE, MVT::v4i32,  MVT::v4i64,  1},  // 1 uzp1
2283     { ISD::TRUNCATE, MVT::v8i8,   MVT::v8i16,  1},  // 1 xtn
2284     { ISD::TRUNCATE, MVT::v8i8,   MVT::v8i32,  2},  // 1 uzp1 + 1 xtn
2285     { ISD::TRUNCATE, MVT::v8i8,   MVT::v8i64,  4},  // 3 x uzp1 + xtn
2286     { ISD::TRUNCATE, MVT::v8i16,  MVT::v8i32,  1},  // 1 uzp1
2287     { ISD::TRUNCATE, MVT::v8i16,  MVT::v8i64,  3},  // 3 x uzp1
2288     { ISD::TRUNCATE, MVT::v8i32,  MVT::v8i64,  2},  // 2 x uzp1
2289     { ISD::TRUNCATE, MVT::v16i8,  MVT::v16i16, 1},  // uzp1
2290     { ISD::TRUNCATE, MVT::v16i8,  MVT::v16i32, 3},  // (2 + 1) x uzp1
2291     { ISD::TRUNCATE, MVT::v16i8,  MVT::v16i64, 7},  // (4 + 2 + 1) x uzp1
2292     { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2},  // 2 x uzp1
2293     { ISD::TRUNCATE, MVT::v16i16, MVT::v16i64, 6},  // (4 + 2) x uzp1
2294     { ISD::TRUNCATE, MVT::v16i32, MVT::v16i64, 4},  // 4 x uzp1
2295 
2296     // Truncations on nxvmiN
2297     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 },
2298     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 },
2299     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 },
2300     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 },
2301     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 },
2302     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 },
2303     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 },
2304     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 },
2305     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 },
2306     { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 },
2307     { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 },
2308     { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 },
2309     { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 },
2310     { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 },
2311     { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 },
2312     { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 },
2313 
2314     // The number of shll instructions for the extension.
2315     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
2316     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
2317     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
2318     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
2319     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
2320     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
2321     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
2322     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
2323     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
2324     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
2325     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
2326     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
2327     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
2328     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
2329     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
2330     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
2331 
2332     // LowerVectorINT_TO_FP:
2333     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
2334     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2335     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
2336     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
2337     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2338     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
2339 
2340     // Complex: to v2f32
2341     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
2342     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
2343     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
2344     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
2345     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
2346     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
2347 
2348     // Complex: to v4f32
2349     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
2350     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
2351     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
2352     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
2353 
2354     // Complex: to v8f32
2355     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
2356     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2357     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
2358     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2359 
2360     // Complex: to v16f32
2361     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
2362     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
2363 
2364     // Complex: to v2f64
2365     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
2366     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
2367     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
2368     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
2369     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
2370     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
2371 
2372     // Complex: to v4f64
2373     { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32,  4 },
2374     { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32,  4 },
2375 
2376     // LowerVectorFP_TO_INT
2377     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
2378     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
2379     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
2380     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
2381     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
2382     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
2383 
2384     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
2385     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
2386     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
2387     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
2388     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
2389     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
2390     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
2391 
2392     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
2393     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
2394     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
2395     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
2396     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
2397 
2398     // Complex, from nxv2f32.
2399     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
2400     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
2401     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
2402     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
2403     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
2404     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
2405     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
2406     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
2407 
2408     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
2409     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
2410     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
2411     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
2412     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
2413     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
2414     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
2415 
2416     // Complex, from nxv2f64.
2417     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
2418     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
2419     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
2420     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
2421     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
2422     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
2423     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
2424     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
2425 
2426     // Complex, from nxv4f32.
2427     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
2428     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
2429     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
2430     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
2431     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
2432     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
2433     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
2434     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
2435 
2436     // Complex, from nxv8f64. Illegal -> illegal conversions not required.
2437     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
2438     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
2439     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
2440     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
2441 
2442     // Complex, from nxv4f64. Illegal -> illegal conversions not required.
2443     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
2444     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
2445     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
2446     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
2447     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
2448     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
2449 
2450     // Complex, from nxv8f32. Illegal -> illegal conversions not required.
2451     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
2452     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
2453     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
2454     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
2455 
2456     // Complex, from nxv8f16.
2457     { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
2458     { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
2459     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
2460     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
2461     { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
2462     { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
2463     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
2464     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
2465 
2466     // Complex, from nxv4f16.
2467     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
2468     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
2469     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
2470     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
2471     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
2472     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
2473     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
2474     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
2475 
2476     // Complex, from nxv2f16.
2477     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
2478     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
2479     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
2480     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
2481     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
2482     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
2483     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
2484     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
2485 
2486     // Truncate from nxvmf32 to nxvmf16.
2487     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 },
2488     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 },
2489     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 },
2490 
2491     // Truncate from nxvmf64 to nxvmf16.
2492     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 },
2493     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 },
2494     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 },
2495 
2496     // Truncate from nxvmf64 to nxvmf32.
2497     { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 },
2498     { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 },
2499     { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 },
2500 
2501     // Extend from nxvmf16 to nxvmf32.
2502     { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
2503     { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
2504     { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
2505 
2506     // Extend from nxvmf16 to nxvmf64.
2507     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
2508     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
2509     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
2510 
2511     // Extend from nxvmf32 to nxvmf64.
2512     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
2513     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
2514     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
2515 
2516     // Bitcasts from float to integer
2517     { ISD::BITCAST, MVT::nxv2f16, MVT::nxv2i16, 0 },
2518     { ISD::BITCAST, MVT::nxv4f16, MVT::nxv4i16, 0 },
2519     { ISD::BITCAST, MVT::nxv2f32, MVT::nxv2i32, 0 },
2520 
2521     // Bitcasts from integer to float
2522     { ISD::BITCAST, MVT::nxv2i16, MVT::nxv2f16, 0 },
2523     { ISD::BITCAST, MVT::nxv4i16, MVT::nxv4f16, 0 },
2524     { ISD::BITCAST, MVT::nxv2i32, MVT::nxv2f32, 0 },
2525 
2526     // Add cost for extending to illegal -too wide- scalable vectors.
2527     // zero/sign extend are implemented by multiple unpack operations,
2528     // where each operation has a cost of 1.
2529     { ISD::ZERO_EXTEND, MVT::nxv16i16, MVT::nxv16i8, 2},
2530     { ISD::ZERO_EXTEND, MVT::nxv16i32, MVT::nxv16i8, 6},
2531     { ISD::ZERO_EXTEND, MVT::nxv16i64, MVT::nxv16i8, 14},
2532     { ISD::ZERO_EXTEND, MVT::nxv8i32, MVT::nxv8i16, 2},
2533     { ISD::ZERO_EXTEND, MVT::nxv8i64, MVT::nxv8i16, 6},
2534     { ISD::ZERO_EXTEND, MVT::nxv4i64, MVT::nxv4i32, 2},
2535 
2536     { ISD::SIGN_EXTEND, MVT::nxv16i16, MVT::nxv16i8, 2},
2537     { ISD::SIGN_EXTEND, MVT::nxv16i32, MVT::nxv16i8, 6},
2538     { ISD::SIGN_EXTEND, MVT::nxv16i64, MVT::nxv16i8, 14},
2539     { ISD::SIGN_EXTEND, MVT::nxv8i32, MVT::nxv8i16, 2},
2540     { ISD::SIGN_EXTEND, MVT::nxv8i64, MVT::nxv8i16, 6},
2541     { ISD::SIGN_EXTEND, MVT::nxv4i64, MVT::nxv4i32, 2},
2542   };
2543 
2544   // We have to estimate a cost of fixed length operation upon
2545   // SVE registers(operations) with the number of registers required
2546   // for a fixed type to be represented upon SVE registers.
2547   EVT WiderTy = SrcTy.bitsGT(DstTy) ? SrcTy : DstTy;
2548   if (SrcTy.isFixedLengthVector() && DstTy.isFixedLengthVector() &&
2549       SrcTy.getVectorNumElements() == DstTy.getVectorNumElements() &&
2550       ST->useSVEForFixedLengthVectors(WiderTy)) {
2551     std::pair<InstructionCost, MVT> LT =
2552         getTypeLegalizationCost(WiderTy.getTypeForEVT(Dst->getContext()));
2553     unsigned NumElements = AArch64::SVEBitsPerBlock /
2554                            LT.second.getVectorElementType().getSizeInBits();
2555     return AdjustCost(
2556         LT.first *
2557         getCastInstrCost(
2558             Opcode, ScalableVectorType::get(Dst->getScalarType(), NumElements),
2559             ScalableVectorType::get(Src->getScalarType(), NumElements), CCH,
2560             CostKind, I));
2561   }
2562 
2563   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
2564                                                  DstTy.getSimpleVT(),
2565                                                  SrcTy.getSimpleVT()))
2566     return AdjustCost(Entry->Cost);
2567 
2568   static const TypeConversionCostTblEntry FP16Tbl[] = {
2569       {ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f16, 1}, // fcvtzs
2570       {ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f16, 1},
2571       {ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f16, 1}, // fcvtzs
2572       {ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f16, 1},
2573       {ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f16, 2}, // fcvtl+fcvtzs
2574       {ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f16, 2},
2575       {ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f16, 2}, // fcvtzs+xtn
2576       {ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f16, 2},
2577       {ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f16, 1}, // fcvtzs
2578       {ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f16, 1},
2579       {ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f16, 4}, // 2*fcvtl+2*fcvtzs
2580       {ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f16, 4},
2581       {ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f16, 3}, // 2*fcvtzs+xtn
2582       {ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f16, 3},
2583       {ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f16, 2}, // 2*fcvtzs
2584       {ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f16, 2},
2585       {ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f16, 8}, // 4*fcvtl+4*fcvtzs
2586       {ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f16, 8},
2587       {ISD::UINT_TO_FP, MVT::v8f16, MVT::v8i8, 2},   // ushll + ucvtf
2588       {ISD::SINT_TO_FP, MVT::v8f16, MVT::v8i8, 2},   // sshll + scvtf
2589       {ISD::UINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * ushl(2) + 2 * ucvtf
2590       {ISD::SINT_TO_FP, MVT::v16f16, MVT::v16i8, 4}, // 2 * sshl(2) + 2 * scvtf
2591   };
2592 
2593   if (ST->hasFullFP16())
2594     if (const auto *Entry = ConvertCostTableLookup(
2595             FP16Tbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
2596       return AdjustCost(Entry->Cost);
2597 
2598   if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) &&
2599       CCH == TTI::CastContextHint::Masked && ST->hasSVEorSME() &&
2600       TLI->getTypeAction(Src->getContext(), SrcTy) ==
2601           TargetLowering::TypePromoteInteger &&
2602       TLI->getTypeAction(Dst->getContext(), DstTy) ==
2603           TargetLowering::TypeSplitVector) {
2604     // The standard behaviour in the backend for these cases is to split the
2605     // extend up into two parts:
2606     //  1. Perform an extending load or masked load up to the legal type.
2607     //  2. Extend the loaded data to the final type.
2608     std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
2609     Type *LegalTy = EVT(SrcLT.second).getTypeForEVT(Src->getContext());
2610     InstructionCost Part1 = AArch64TTIImpl::getCastInstrCost(
2611         Opcode, LegalTy, Src, CCH, CostKind, I);
2612     InstructionCost Part2 = AArch64TTIImpl::getCastInstrCost(
2613         Opcode, Dst, LegalTy, TTI::CastContextHint::None, CostKind, I);
2614     return Part1 + Part2;
2615   }
2616 
2617   // The BasicTTIImpl version only deals with CCH==TTI::CastContextHint::Normal,
2618   // but we also want to include the TTI::CastContextHint::Masked case too.
2619   if ((ISD == ISD::ZERO_EXTEND || ISD == ISD::SIGN_EXTEND) &&
2620       CCH == TTI::CastContextHint::Masked && ST->hasSVEorSME() &&
2621       TLI->isTypeLegal(DstTy))
2622     CCH = TTI::CastContextHint::Normal;
2623 
2624   return AdjustCost(
2625       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2626 }
2627 
getExtractWithExtendCost(unsigned Opcode,Type * Dst,VectorType * VecTy,unsigned Index)2628 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode,
2629                                                          Type *Dst,
2630                                                          VectorType *VecTy,
2631                                                          unsigned Index) {
2632 
2633   // Make sure we were given a valid extend opcode.
2634   assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
2635          "Invalid opcode");
2636 
2637   // We are extending an element we extract from a vector, so the source type
2638   // of the extend is the element type of the vector.
2639   auto *Src = VecTy->getElementType();
2640 
2641   // Sign- and zero-extends are for integer types only.
2642   assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
2643 
2644   // Get the cost for the extract. We compute the cost (if any) for the extend
2645   // below.
2646   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2647   InstructionCost Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy,
2648                                             CostKind, Index, nullptr, nullptr);
2649 
2650   // Legalize the types.
2651   auto VecLT = getTypeLegalizationCost(VecTy);
2652   auto DstVT = TLI->getValueType(DL, Dst);
2653   auto SrcVT = TLI->getValueType(DL, Src);
2654 
2655   // If the resulting type is still a vector and the destination type is legal,
2656   // we may get the extension for free. If not, get the default cost for the
2657   // extend.
2658   if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
2659     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
2660                                    CostKind);
2661 
2662   // The destination type should be larger than the element type. If not, get
2663   // the default cost for the extend.
2664   if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
2665     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
2666                                    CostKind);
2667 
2668   switch (Opcode) {
2669   default:
2670     llvm_unreachable("Opcode should be either SExt or ZExt");
2671 
2672   // For sign-extends, we only need a smov, which performs the extension
2673   // automatically.
2674   case Instruction::SExt:
2675     return Cost;
2676 
2677   // For zero-extends, the extend is performed automatically by a umov unless
2678   // the destination type is i64 and the element type is i8 or i16.
2679   case Instruction::ZExt:
2680     if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
2681       return Cost;
2682   }
2683 
2684   // If we are unable to perform the extend for free, get the default cost.
2685   return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
2686                                  CostKind);
2687 }
2688 
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind,const Instruction * I)2689 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
2690                                                TTI::TargetCostKind CostKind,
2691                                                const Instruction *I) {
2692   if (CostKind != TTI::TCK_RecipThroughput)
2693     return Opcode == Instruction::PHI ? 0 : 1;
2694   assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
2695   // Branches are assumed to be predicted.
2696   return 0;
2697 }
2698 
getVectorInstrCostHelper(const Instruction * I,Type * Val,unsigned Index,bool HasRealUse)2699 InstructionCost AArch64TTIImpl::getVectorInstrCostHelper(const Instruction *I,
2700                                                          Type *Val,
2701                                                          unsigned Index,
2702                                                          bool HasRealUse) {
2703   assert(Val->isVectorTy() && "This must be a vector type");
2704 
2705   if (Index != -1U) {
2706     // Legalize the type.
2707     std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val);
2708 
2709     // This type is legalized to a scalar type.
2710     if (!LT.second.isVector())
2711       return 0;
2712 
2713     // The type may be split. For fixed-width vectors we can normalize the
2714     // index to the new type.
2715     if (LT.second.isFixedLengthVector()) {
2716       unsigned Width = LT.second.getVectorNumElements();
2717       Index = Index % Width;
2718     }
2719 
2720     // The element at index zero is already inside the vector.
2721     // - For a physical (HasRealUse==true) insert-element or extract-element
2722     // instruction that extracts integers, an explicit FPR -> GPR move is
2723     // needed. So it has non-zero cost.
2724     // - For the rest of cases (virtual instruction or element type is float),
2725     // consider the instruction free.
2726     if (Index == 0 && (!HasRealUse || !Val->getScalarType()->isIntegerTy()))
2727       return 0;
2728 
2729     // This is recognising a LD1 single-element structure to one lane of one
2730     // register instruction. I.e., if this is an `insertelement` instruction,
2731     // and its second operand is a load, then we will generate a LD1, which
2732     // are expensive instructions.
2733     if (I && dyn_cast<LoadInst>(I->getOperand(1)))
2734       return ST->getVectorInsertExtractBaseCost() + 1;
2735 
2736     // i1 inserts and extract will include an extra cset or cmp of the vector
2737     // value. Increase the cost by 1 to account.
2738     if (Val->getScalarSizeInBits() == 1)
2739       return ST->getVectorInsertExtractBaseCost() + 1;
2740 
2741     // FIXME:
2742     // If the extract-element and insert-element instructions could be
2743     // simplified away (e.g., could be combined into users by looking at use-def
2744     // context), they have no cost. This is not done in the first place for
2745     // compile-time considerations.
2746   }
2747 
2748   // All other insert/extracts cost this much.
2749   return ST->getVectorInsertExtractBaseCost();
2750 }
2751 
getVectorInstrCost(unsigned Opcode,Type * Val,TTI::TargetCostKind CostKind,unsigned Index,Value * Op0,Value * Op1)2752 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
2753                                                    TTI::TargetCostKind CostKind,
2754                                                    unsigned Index, Value *Op0,
2755                                                    Value *Op1) {
2756   bool HasRealUse =
2757       Opcode == Instruction::InsertElement && Op0 && !isa<UndefValue>(Op0);
2758   return getVectorInstrCostHelper(nullptr, Val, Index, HasRealUse);
2759 }
2760 
getVectorInstrCost(const Instruction & I,Type * Val,TTI::TargetCostKind CostKind,unsigned Index)2761 InstructionCost AArch64TTIImpl::getVectorInstrCost(const Instruction &I,
2762                                                    Type *Val,
2763                                                    TTI::TargetCostKind CostKind,
2764                                                    unsigned Index) {
2765   return getVectorInstrCostHelper(&I, Val, Index, true /* HasRealUse */);
2766 }
2767 
getScalarizationOverhead(VectorType * Ty,const APInt & DemandedElts,bool Insert,bool Extract,TTI::TargetCostKind CostKind)2768 InstructionCost AArch64TTIImpl::getScalarizationOverhead(
2769     VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
2770     TTI::TargetCostKind CostKind) {
2771   if (isa<ScalableVectorType>(Ty))
2772     return InstructionCost::getInvalid();
2773   if (Ty->getElementType()->isFloatingPointTy())
2774     return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
2775                                            CostKind);
2776   return DemandedElts.popcount() * (Insert + Extract) *
2777          ST->getVectorInsertExtractBaseCost();
2778 }
2779 
getArithmeticInstrCost(unsigned Opcode,Type * Ty,TTI::TargetCostKind CostKind,TTI::OperandValueInfo Op1Info,TTI::OperandValueInfo Op2Info,ArrayRef<const Value * > Args,const Instruction * CxtI)2780 InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
2781     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
2782     TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
2783     ArrayRef<const Value *> Args,
2784     const Instruction *CxtI) {
2785 
2786   // TODO: Handle more cost kinds.
2787   if (CostKind != TTI::TCK_RecipThroughput)
2788     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
2789                                          Op2Info, Args, CxtI);
2790 
2791   // Legalize the type.
2792   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
2793   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2794 
2795   switch (ISD) {
2796   default:
2797     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
2798                                          Op2Info);
2799   case ISD::SDIV:
2800     if (Op2Info.isConstant() && Op2Info.isUniform() && Op2Info.isPowerOf2()) {
2801       // On AArch64, scalar signed division by constants power-of-two are
2802       // normally expanded to the sequence ADD + CMP + SELECT + SRA.
2803       // The OperandValue properties many not be same as that of previous
2804       // operation; conservatively assume OP_None.
2805       InstructionCost Cost = getArithmeticInstrCost(
2806           Instruction::Add, Ty, CostKind,
2807           Op1Info.getNoProps(), Op2Info.getNoProps());
2808       Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind,
2809                                      Op1Info.getNoProps(), Op2Info.getNoProps());
2810       Cost += getArithmeticInstrCost(
2811           Instruction::Select, Ty, CostKind,
2812           Op1Info.getNoProps(), Op2Info.getNoProps());
2813       Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
2814                                      Op1Info.getNoProps(), Op2Info.getNoProps());
2815       return Cost;
2816     }
2817     [[fallthrough]];
2818   case ISD::UDIV: {
2819     if (Op2Info.isConstant() && Op2Info.isUniform()) {
2820       auto VT = TLI->getValueType(DL, Ty);
2821       if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
2822         // Vector signed division by constant are expanded to the
2823         // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
2824         // to MULHS + SUB + SRL + ADD + SRL.
2825         InstructionCost MulCost = getArithmeticInstrCost(
2826             Instruction::Mul, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps());
2827         InstructionCost AddCost = getArithmeticInstrCost(
2828             Instruction::Add, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps());
2829         InstructionCost ShrCost = getArithmeticInstrCost(
2830             Instruction::AShr, Ty, CostKind, Op1Info.getNoProps(), Op2Info.getNoProps());
2831         return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
2832       }
2833     }
2834 
2835     InstructionCost Cost = BaseT::getArithmeticInstrCost(
2836         Opcode, Ty, CostKind, Op1Info, Op2Info);
2837     if (Ty->isVectorTy()) {
2838       if (TLI->isOperationLegalOrCustom(ISD, LT.second) && ST->hasSVE()) {
2839         // SDIV/UDIV operations are lowered using SVE, then we can have less
2840         // costs.
2841         if (isa<FixedVectorType>(Ty) && cast<FixedVectorType>(Ty)
2842                                                 ->getPrimitiveSizeInBits()
2843                                                 .getFixedValue() < 128) {
2844           EVT VT = TLI->getValueType(DL, Ty);
2845           static const CostTblEntry DivTbl[]{
2846               {ISD::SDIV, MVT::v2i8, 5},  {ISD::SDIV, MVT::v4i8, 8},
2847               {ISD::SDIV, MVT::v8i8, 8},  {ISD::SDIV, MVT::v2i16, 5},
2848               {ISD::SDIV, MVT::v4i16, 5}, {ISD::SDIV, MVT::v2i32, 1},
2849               {ISD::UDIV, MVT::v2i8, 5},  {ISD::UDIV, MVT::v4i8, 8},
2850               {ISD::UDIV, MVT::v8i8, 8},  {ISD::UDIV, MVT::v2i16, 5},
2851               {ISD::UDIV, MVT::v4i16, 5}, {ISD::UDIV, MVT::v2i32, 1}};
2852 
2853           const auto *Entry = CostTableLookup(DivTbl, ISD, VT.getSimpleVT());
2854           if (nullptr != Entry)
2855             return Entry->Cost;
2856         }
2857         // For 8/16-bit elements, the cost is higher because the type
2858         // requires promotion and possibly splitting:
2859         if (LT.second.getScalarType() == MVT::i8)
2860           Cost *= 8;
2861         else if (LT.second.getScalarType() == MVT::i16)
2862           Cost *= 4;
2863         return Cost;
2864       } else {
2865         // If one of the operands is a uniform constant then the cost for each
2866         // element is Cost for insertion, extraction and division.
2867         // Insertion cost = 2, Extraction Cost = 2, Division = cost for the
2868         // operation with scalar type
2869         if ((Op1Info.isConstant() && Op1Info.isUniform()) ||
2870             (Op2Info.isConstant() && Op2Info.isUniform())) {
2871           if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
2872             InstructionCost DivCost = BaseT::getArithmeticInstrCost(
2873                 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info);
2874             return (4 + DivCost) * VTy->getNumElements();
2875           }
2876         }
2877         // On AArch64, without SVE, vector divisions are expanded
2878         // into scalar divisions of each pair of elements.
2879         Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty,
2880                                        CostKind, Op1Info, Op2Info);
2881         Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind,
2882                                        Op1Info, Op2Info);
2883       }
2884 
2885       // TODO: if one of the arguments is scalar, then it's not necessary to
2886       // double the cost of handling the vector elements.
2887       Cost += Cost;
2888     }
2889     return Cost;
2890   }
2891   case ISD::MUL:
2892     // When SVE is available, then we can lower the v2i64 operation using
2893     // the SVE mul instruction, which has a lower cost.
2894     if (LT.second == MVT::v2i64 && ST->hasSVE())
2895       return LT.first;
2896 
2897     // When SVE is not available, there is no MUL.2d instruction,
2898     // which means mul <2 x i64> is expensive as elements are extracted
2899     // from the vectors and the muls scalarized.
2900     // As getScalarizationOverhead is a bit too pessimistic, we
2901     // estimate the cost for a i64 vector directly here, which is:
2902     // - four 2-cost i64 extracts,
2903     // - two 2-cost i64 inserts, and
2904     // - two 1-cost muls.
2905     // So, for a v2i64 with LT.First = 1 the cost is 14, and for a v4i64 with
2906     // LT.first = 2 the cost is 28. If both operands are extensions it will not
2907     // need to scalarize so the cost can be cheaper (smull or umull).
2908     // so the cost can be cheaper (smull or umull).
2909     if (LT.second != MVT::v2i64 || isWideningInstruction(Ty, Opcode, Args))
2910       return LT.first;
2911     return LT.first * 14;
2912   case ISD::ADD:
2913   case ISD::XOR:
2914   case ISD::OR:
2915   case ISD::AND:
2916   case ISD::SRL:
2917   case ISD::SRA:
2918   case ISD::SHL:
2919     // These nodes are marked as 'custom' for combining purposes only.
2920     // We know that they are legal. See LowerAdd in ISelLowering.
2921     return LT.first;
2922 
2923   case ISD::FNEG:
2924   case ISD::FADD:
2925   case ISD::FSUB:
2926     // Increase the cost for half and bfloat types if not architecturally
2927     // supported.
2928     if ((Ty->getScalarType()->isHalfTy() && !ST->hasFullFP16()) ||
2929         (Ty->getScalarType()->isBFloatTy() && !ST->hasBF16()))
2930       return 2 * LT.first;
2931     if (!Ty->getScalarType()->isFP128Ty())
2932       return LT.first;
2933     [[fallthrough]];
2934   case ISD::FMUL:
2935   case ISD::FDIV:
2936     // These nodes are marked as 'custom' just to lower them to SVE.
2937     // We know said lowering will incur no additional cost.
2938     if (!Ty->getScalarType()->isFP128Ty())
2939       return 2 * LT.first;
2940 
2941     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
2942                                          Op2Info);
2943   }
2944 }
2945 
getAddressComputationCost(Type * Ty,ScalarEvolution * SE,const SCEV * Ptr)2946 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty,
2947                                                           ScalarEvolution *SE,
2948                                                           const SCEV *Ptr) {
2949   // Address computations in vectorized code with non-consecutive addresses will
2950   // likely result in more instructions compared to scalar code where the
2951   // computation can more often be merged into the index mode. The resulting
2952   // extra micro-ops can significantly decrease throughput.
2953   unsigned NumVectorInstToHideOverhead = NeonNonConstStrideOverhead;
2954   int MaxMergeDistance = 64;
2955 
2956   if (Ty->isVectorTy() && SE &&
2957       !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
2958     return NumVectorInstToHideOverhead;
2959 
2960   // In many cases the address computation is not merged into the instruction
2961   // addressing mode.
2962   return 1;
2963 }
2964 
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)2965 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
2966                                                    Type *CondTy,
2967                                                    CmpInst::Predicate VecPred,
2968                                                    TTI::TargetCostKind CostKind,
2969                                                    const Instruction *I) {
2970   // TODO: Handle other cost kinds.
2971   if (CostKind != TTI::TCK_RecipThroughput)
2972     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2973                                      I);
2974 
2975   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2976   // We don't lower some vector selects well that are wider than the register
2977   // width.
2978   if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) {
2979     // We would need this many instructions to hide the scalarization happening.
2980     const int AmortizationCost = 20;
2981 
2982     // If VecPred is not set, check if we can get a predicate from the context
2983     // instruction, if its type matches the requested ValTy.
2984     if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
2985       CmpInst::Predicate CurrentPred;
2986       if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
2987                             m_Value())))
2988         VecPred = CurrentPred;
2989     }
2990     // Check if we have a compare/select chain that can be lowered using
2991     // a (F)CMxx & BFI pair.
2992     if (CmpInst::isIntPredicate(VecPred) || VecPred == CmpInst::FCMP_OLE ||
2993         VecPred == CmpInst::FCMP_OLT || VecPred == CmpInst::FCMP_OGT ||
2994         VecPred == CmpInst::FCMP_OGE || VecPred == CmpInst::FCMP_OEQ ||
2995         VecPred == CmpInst::FCMP_UNE) {
2996       static const auto ValidMinMaxTys = {
2997           MVT::v8i8,  MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
2998           MVT::v4i32, MVT::v2i64, MVT::v2f32, MVT::v4f32, MVT::v2f64};
2999       static const auto ValidFP16MinMaxTys = {MVT::v4f16, MVT::v8f16};
3000 
3001       auto LT = getTypeLegalizationCost(ValTy);
3002       if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }) ||
3003           (ST->hasFullFP16() &&
3004            any_of(ValidFP16MinMaxTys, [&LT](MVT M) { return M == LT.second; })))
3005         return LT.first;
3006     }
3007 
3008     static const TypeConversionCostTblEntry
3009     VectorSelectTbl[] = {
3010       { ISD::SELECT, MVT::v2i1, MVT::v2f32, 2 },
3011       { ISD::SELECT, MVT::v2i1, MVT::v2f64, 2 },
3012       { ISD::SELECT, MVT::v4i1, MVT::v4f32, 2 },
3013       { ISD::SELECT, MVT::v4i1, MVT::v4f16, 2 },
3014       { ISD::SELECT, MVT::v8i1, MVT::v8f16, 2 },
3015       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
3016       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
3017       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
3018       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
3019       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
3020       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
3021     };
3022 
3023     EVT SelCondTy = TLI->getValueType(DL, CondTy);
3024     EVT SelValTy = TLI->getValueType(DL, ValTy);
3025     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
3026       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
3027                                                      SelCondTy.getSimpleVT(),
3028                                                      SelValTy.getSimpleVT()))
3029         return Entry->Cost;
3030     }
3031   }
3032 
3033   if (isa<FixedVectorType>(ValTy) && ISD == ISD::SETCC) {
3034     auto LT = getTypeLegalizationCost(ValTy);
3035     // Cost v4f16 FCmp without FP16 support via converting to v4f32 and back.
3036     if (LT.second == MVT::v4f16 && !ST->hasFullFP16())
3037       return LT.first * 4; // fcvtl + fcvtl + fcmp + xtn
3038   }
3039 
3040   // Treat the icmp in icmp(and, 0) as free, as we can make use of ands.
3041   // FIXME: This can apply to more conditions and add/sub if it can be shown to
3042   // be profitable.
3043   if (ValTy->isIntegerTy() && ISD == ISD::SETCC && I &&
3044       ICmpInst::isEquality(VecPred) &&
3045       TLI->isTypeLegal(TLI->getValueType(DL, ValTy)) &&
3046       match(I->getOperand(1), m_Zero()) &&
3047       match(I->getOperand(0), m_And(m_Value(), m_Value())))
3048     return 0;
3049 
3050   // The base case handles scalable vectors fine for now, since it treats the
3051   // cost as 1 * legalization cost.
3052   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
3053 }
3054 
3055 AArch64TTIImpl::TTI::MemCmpExpansionOptions
enableMemCmpExpansion(bool OptSize,bool IsZeroCmp) const3056 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
3057   TTI::MemCmpExpansionOptions Options;
3058   if (ST->requiresStrictAlign()) {
3059     // TODO: Add cost modeling for strict align. Misaligned loads expand to
3060     // a bunch of instructions when strict align is enabled.
3061     return Options;
3062   }
3063   Options.AllowOverlappingLoads = true;
3064   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
3065   Options.NumLoadsPerBlock = Options.MaxNumLoads;
3066   // TODO: Though vector loads usually perform well on AArch64, in some targets
3067   // they may wake up the FP unit, which raises the power consumption.  Perhaps
3068   // they could be used with no holds barred (-O3).
3069   Options.LoadSizes = {8, 4, 2, 1};
3070   Options.AllowedTailExpansions = {3, 5, 6};
3071   return Options;
3072 }
3073 
prefersVectorizedAddressing() const3074 bool AArch64TTIImpl::prefersVectorizedAddressing() const {
3075   return ST->hasSVE();
3076 }
3077 
3078 InstructionCost
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)3079 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
3080                                       Align Alignment, unsigned AddressSpace,
3081                                       TTI::TargetCostKind CostKind) {
3082   if (useNeonVector(Src))
3083     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3084                                         CostKind);
3085   auto LT = getTypeLegalizationCost(Src);
3086   if (!LT.first.isValid())
3087     return InstructionCost::getInvalid();
3088 
3089   // The code-generator is currently not able to handle scalable vectors
3090   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3091   // it. This change will be removed when code-generation for these types is
3092   // sufficiently reliable.
3093   if (cast<VectorType>(Src)->getElementCount() == ElementCount::getScalable(1))
3094     return InstructionCost::getInvalid();
3095 
3096   return LT.first;
3097 }
3098 
getSVEGatherScatterOverhead(unsigned Opcode)3099 static unsigned getSVEGatherScatterOverhead(unsigned Opcode) {
3100   return Opcode == Instruction::Load ? SVEGatherOverhead : SVEScatterOverhead;
3101 }
3102 
getGatherScatterOpCost(unsigned Opcode,Type * DataTy,const Value * Ptr,bool VariableMask,Align Alignment,TTI::TargetCostKind CostKind,const Instruction * I)3103 InstructionCost AArch64TTIImpl::getGatherScatterOpCost(
3104     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
3105     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
3106   if (useNeonVector(DataTy) || !isLegalMaskedGatherScatter(DataTy))
3107     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
3108                                          Alignment, CostKind, I);
3109   auto *VT = cast<VectorType>(DataTy);
3110   auto LT = getTypeLegalizationCost(DataTy);
3111   if (!LT.first.isValid())
3112     return InstructionCost::getInvalid();
3113 
3114   if (!LT.second.isVector() ||
3115       !isElementTypeLegalForScalableVector(VT->getElementType()))
3116     return InstructionCost::getInvalid();
3117 
3118   // The code-generator is currently not able to handle scalable vectors
3119   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3120   // it. This change will be removed when code-generation for these types is
3121   // sufficiently reliable.
3122   if (cast<VectorType>(DataTy)->getElementCount() ==
3123       ElementCount::getScalable(1))
3124     return InstructionCost::getInvalid();
3125 
3126   ElementCount LegalVF = LT.second.getVectorElementCount();
3127   InstructionCost MemOpCost =
3128       getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind,
3129                       {TTI::OK_AnyValue, TTI::OP_None}, I);
3130   // Add on an overhead cost for using gathers/scatters.
3131   // TODO: At the moment this is applied unilaterally for all CPUs, but at some
3132   // point we may want a per-CPU overhead.
3133   MemOpCost *= getSVEGatherScatterOverhead(Opcode);
3134   return LT.first * MemOpCost * getMaxNumElements(LegalVF);
3135 }
3136 
useNeonVector(const Type * Ty) const3137 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
3138   return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
3139 }
3140 
getMemoryOpCost(unsigned Opcode,Type * Ty,MaybeAlign Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,TTI::OperandValueInfo OpInfo,const Instruction * I)3141 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
3142                                                 MaybeAlign Alignment,
3143                                                 unsigned AddressSpace,
3144                                                 TTI::TargetCostKind CostKind,
3145                                                 TTI::OperandValueInfo OpInfo,
3146                                                 const Instruction *I) {
3147   EVT VT = TLI->getValueType(DL, Ty, true);
3148   // Type legalization can't handle structs
3149   if (VT == MVT::Other)
3150     return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
3151                                   CostKind);
3152 
3153   auto LT = getTypeLegalizationCost(Ty);
3154   if (!LT.first.isValid())
3155     return InstructionCost::getInvalid();
3156 
3157   // The code-generator is currently not able to handle scalable vectors
3158   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3159   // it. This change will be removed when code-generation for these types is
3160   // sufficiently reliable.
3161   if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
3162     if (VTy->getElementCount() == ElementCount::getScalable(1))
3163       return InstructionCost::getInvalid();
3164 
3165   // TODO: consider latency as well for TCK_SizeAndLatency.
3166   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
3167     return LT.first;
3168 
3169   if (CostKind != TTI::TCK_RecipThroughput)
3170     return 1;
3171 
3172   if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
3173       LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
3174     // Unaligned stores are extremely inefficient. We don't split all
3175     // unaligned 128-bit stores because the negative impact that has shown in
3176     // practice on inlined block copy code.
3177     // We make such stores expensive so that we will only vectorize if there
3178     // are 6 other instructions getting vectorized.
3179     const int AmortizationCost = 6;
3180 
3181     return LT.first * 2 * AmortizationCost;
3182   }
3183 
3184   // Opaque ptr or ptr vector types are i64s and can be lowered to STP/LDPs.
3185   if (Ty->isPtrOrPtrVectorTy())
3186     return LT.first;
3187 
3188   if (useNeonVector(Ty)) {
3189     // Check truncating stores and extending loads.
3190     if (Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) {
3191       // v4i8 types are lowered to scalar a load/store and sshll/xtn.
3192       if (VT == MVT::v4i8)
3193         return 2;
3194       // Otherwise we need to scalarize.
3195       return cast<FixedVectorType>(Ty)->getNumElements() * 2;
3196     }
3197     EVT EltVT = VT.getVectorElementType();
3198     unsigned EltSize = EltVT.getScalarSizeInBits();
3199     if (!isPowerOf2_32(EltSize) || EltSize < 8 || EltSize > 64 ||
3200         VT.getVectorNumElements() >= (128 / EltSize) || !Alignment ||
3201         *Alignment != Align(1))
3202       return LT.first;
3203     // FIXME: v3i8 lowering currently is very inefficient, due to automatic
3204     // widening to v4i8, which produces suboptimal results.
3205     if (VT.getVectorNumElements() == 3 && EltVT == MVT::i8)
3206       return LT.first;
3207 
3208     // Check non-power-of-2 loads/stores for legal vector element types with
3209     // NEON. Non-power-of-2 memory ops will get broken down to a set of
3210     // operations on smaller power-of-2 ops, including ld1/st1.
3211     LLVMContext &C = Ty->getContext();
3212     InstructionCost Cost(0);
3213     SmallVector<EVT> TypeWorklist;
3214     TypeWorklist.push_back(VT);
3215     while (!TypeWorklist.empty()) {
3216       EVT CurrVT = TypeWorklist.pop_back_val();
3217       unsigned CurrNumElements = CurrVT.getVectorNumElements();
3218       if (isPowerOf2_32(CurrNumElements)) {
3219         Cost += 1;
3220         continue;
3221       }
3222 
3223       unsigned PrevPow2 = NextPowerOf2(CurrNumElements) / 2;
3224       TypeWorklist.push_back(EVT::getVectorVT(C, EltVT, PrevPow2));
3225       TypeWorklist.push_back(
3226           EVT::getVectorVT(C, EltVT, CurrNumElements - PrevPow2));
3227     }
3228     return Cost;
3229   }
3230 
3231   return LT.first;
3232 }
3233 
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)3234 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
3235     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
3236     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
3237     bool UseMaskForCond, bool UseMaskForGaps) {
3238   assert(Factor >= 2 && "Invalid interleave factor");
3239   auto *VecVTy = cast<VectorType>(VecTy);
3240 
3241   if (VecTy->isScalableTy() && (!ST->hasSVE() || Factor != 2))
3242     return InstructionCost::getInvalid();
3243 
3244   // Vectorization for masked interleaved accesses is only enabled for scalable
3245   // VF.
3246   if (!VecTy->isScalableTy() && (UseMaskForCond || UseMaskForGaps))
3247     return InstructionCost::getInvalid();
3248 
3249   if (!UseMaskForGaps && Factor <= TLI->getMaxSupportedInterleaveFactor()) {
3250     unsigned MinElts = VecVTy->getElementCount().getKnownMinValue();
3251     auto *SubVecTy =
3252         VectorType::get(VecVTy->getElementType(),
3253                         VecVTy->getElementCount().divideCoefficientBy(Factor));
3254 
3255     // ldN/stN only support legal vector types of size 64 or 128 in bits.
3256     // Accesses having vector types that are a multiple of 128 bits can be
3257     // matched to more than one ldN/stN instruction.
3258     bool UseScalable;
3259     if (MinElts % Factor == 0 &&
3260         TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
3261       return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
3262   }
3263 
3264   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3265                                            Alignment, AddressSpace, CostKind,
3266                                            UseMaskForCond, UseMaskForGaps);
3267 }
3268 
3269 InstructionCost
getCostOfKeepingLiveOverCall(ArrayRef<Type * > Tys)3270 AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
3271   InstructionCost Cost = 0;
3272   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3273   for (auto *I : Tys) {
3274     if (!I->isVectorTy())
3275       continue;
3276     if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
3277         128)
3278       Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
3279               getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
3280   }
3281   return Cost;
3282 }
3283 
getMaxInterleaveFactor(ElementCount VF)3284 unsigned AArch64TTIImpl::getMaxInterleaveFactor(ElementCount VF) {
3285   return ST->getMaxInterleaveFactor();
3286 }
3287 
3288 // For Falkor, we want to avoid having too many strided loads in a loop since
3289 // that can exhaust the HW prefetcher resources.  We adjust the unroller
3290 // MaxCount preference below to attempt to ensure unrolling doesn't create too
3291 // many strided loads.
3292 static void
getFalkorUnrollingPreferences(Loop * L,ScalarEvolution & SE,TargetTransformInfo::UnrollingPreferences & UP)3293 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
3294                               TargetTransformInfo::UnrollingPreferences &UP) {
3295   enum { MaxStridedLoads = 7 };
3296   auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
3297     int StridedLoads = 0;
3298     // FIXME? We could make this more precise by looking at the CFG and
3299     // e.g. not counting loads in each side of an if-then-else diamond.
3300     for (const auto BB : L->blocks()) {
3301       for (auto &I : *BB) {
3302         LoadInst *LMemI = dyn_cast<LoadInst>(&I);
3303         if (!LMemI)
3304           continue;
3305 
3306         Value *PtrValue = LMemI->getPointerOperand();
3307         if (L->isLoopInvariant(PtrValue))
3308           continue;
3309 
3310         const SCEV *LSCEV = SE.getSCEV(PtrValue);
3311         const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
3312         if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
3313           continue;
3314 
3315         // FIXME? We could take pairing of unrolled load copies into account
3316         // by looking at the AddRec, but we would probably have to limit this
3317         // to loops with no stores or other memory optimization barriers.
3318         ++StridedLoads;
3319         // We've seen enough strided loads that seeing more won't make a
3320         // difference.
3321         if (StridedLoads > MaxStridedLoads / 2)
3322           return StridedLoads;
3323       }
3324     }
3325     return StridedLoads;
3326   };
3327 
3328   int StridedLoads = countStridedLoads(L, SE);
3329   LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
3330                     << " strided loads\n");
3331   // Pick the largest power of 2 unroll count that won't result in too many
3332   // strided loads.
3333   if (StridedLoads) {
3334     UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
3335     LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
3336                       << UP.MaxCount << '\n');
3337   }
3338 }
3339 
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,TTI::UnrollingPreferences & UP,OptimizationRemarkEmitter * ORE)3340 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
3341                                              TTI::UnrollingPreferences &UP,
3342                                              OptimizationRemarkEmitter *ORE) {
3343   // Enable partial unrolling and runtime unrolling.
3344   BaseT::getUnrollingPreferences(L, SE, UP, ORE);
3345 
3346   UP.UpperBound = true;
3347 
3348   // For inner loop, it is more likely to be a hot one, and the runtime check
3349   // can be promoted out from LICM pass, so the overhead is less, let's try
3350   // a larger threshold to unroll more loops.
3351   if (L->getLoopDepth() > 1)
3352     UP.PartialThreshold *= 2;
3353 
3354   // Disable partial & runtime unrolling on -Os.
3355   UP.PartialOptSizeThreshold = 0;
3356 
3357   if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
3358       EnableFalkorHWPFUnrollFix)
3359     getFalkorUnrollingPreferences(L, SE, UP);
3360 
3361   // Scan the loop: don't unroll loops with calls as this could prevent
3362   // inlining. Don't unroll vector loops either, as they don't benefit much from
3363   // unrolling.
3364   for (auto *BB : L->getBlocks()) {
3365     for (auto &I : *BB) {
3366       // Don't unroll vectorised loop.
3367       if (I.getType()->isVectorTy())
3368         return;
3369 
3370       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
3371         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
3372           if (!isLoweredToCall(F))
3373             continue;
3374         }
3375         return;
3376       }
3377     }
3378   }
3379 
3380   // Enable runtime unrolling for in-order models
3381   // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by
3382   // checking for that case, we can ensure that the default behaviour is
3383   // unchanged
3384   if (ST->getProcFamily() != AArch64Subtarget::Others &&
3385       !ST->getSchedModel().isOutOfOrder()) {
3386     UP.Runtime = true;
3387     UP.Partial = true;
3388     UP.UnrollRemainder = true;
3389     UP.DefaultUnrollRuntimeCount = 4;
3390 
3391     UP.UnrollAndJam = true;
3392     UP.UnrollAndJamInnerLoopThreshold = 60;
3393   }
3394 }
3395 
getPeelingPreferences(Loop * L,ScalarEvolution & SE,TTI::PeelingPreferences & PP)3396 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
3397                                            TTI::PeelingPreferences &PP) {
3398   BaseT::getPeelingPreferences(L, SE, PP);
3399 }
3400 
getOrCreateResultFromMemIntrinsic(IntrinsicInst * Inst,Type * ExpectedType)3401 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
3402                                                          Type *ExpectedType) {
3403   switch (Inst->getIntrinsicID()) {
3404   default:
3405     return nullptr;
3406   case Intrinsic::aarch64_neon_st2:
3407   case Intrinsic::aarch64_neon_st3:
3408   case Intrinsic::aarch64_neon_st4: {
3409     // Create a struct type
3410     StructType *ST = dyn_cast<StructType>(ExpectedType);
3411     if (!ST)
3412       return nullptr;
3413     unsigned NumElts = Inst->arg_size() - 1;
3414     if (ST->getNumElements() != NumElts)
3415       return nullptr;
3416     for (unsigned i = 0, e = NumElts; i != e; ++i) {
3417       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
3418         return nullptr;
3419     }
3420     Value *Res = PoisonValue::get(ExpectedType);
3421     IRBuilder<> Builder(Inst);
3422     for (unsigned i = 0, e = NumElts; i != e; ++i) {
3423       Value *L = Inst->getArgOperand(i);
3424       Res = Builder.CreateInsertValue(Res, L, i);
3425     }
3426     return Res;
3427   }
3428   case Intrinsic::aarch64_neon_ld2:
3429   case Intrinsic::aarch64_neon_ld3:
3430   case Intrinsic::aarch64_neon_ld4:
3431     if (Inst->getType() == ExpectedType)
3432       return Inst;
3433     return nullptr;
3434   }
3435 }
3436 
getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info)3437 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
3438                                         MemIntrinsicInfo &Info) {
3439   switch (Inst->getIntrinsicID()) {
3440   default:
3441     break;
3442   case Intrinsic::aarch64_neon_ld2:
3443   case Intrinsic::aarch64_neon_ld3:
3444   case Intrinsic::aarch64_neon_ld4:
3445     Info.ReadMem = true;
3446     Info.WriteMem = false;
3447     Info.PtrVal = Inst->getArgOperand(0);
3448     break;
3449   case Intrinsic::aarch64_neon_st2:
3450   case Intrinsic::aarch64_neon_st3:
3451   case Intrinsic::aarch64_neon_st4:
3452     Info.ReadMem = false;
3453     Info.WriteMem = true;
3454     Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1);
3455     break;
3456   }
3457 
3458   switch (Inst->getIntrinsicID()) {
3459   default:
3460     return false;
3461   case Intrinsic::aarch64_neon_ld2:
3462   case Intrinsic::aarch64_neon_st2:
3463     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
3464     break;
3465   case Intrinsic::aarch64_neon_ld3:
3466   case Intrinsic::aarch64_neon_st3:
3467     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
3468     break;
3469   case Intrinsic::aarch64_neon_ld4:
3470   case Intrinsic::aarch64_neon_st4:
3471     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
3472     break;
3473   }
3474   return true;
3475 }
3476 
3477 /// See if \p I should be considered for address type promotion. We check if \p
3478 /// I is a sext with right type and used in memory accesses. If it used in a
3479 /// "complex" getelementptr, we allow it to be promoted without finding other
3480 /// sext instructions that sign extended the same initial value. A getelementptr
3481 /// is considered as "complex" if it has more than 2 operands.
shouldConsiderAddressTypePromotion(const Instruction & I,bool & AllowPromotionWithoutCommonHeader)3482 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
3483     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
3484   bool Considerable = false;
3485   AllowPromotionWithoutCommonHeader = false;
3486   if (!isa<SExtInst>(&I))
3487     return false;
3488   Type *ConsideredSExtType =
3489       Type::getInt64Ty(I.getParent()->getParent()->getContext());
3490   if (I.getType() != ConsideredSExtType)
3491     return false;
3492   // See if the sext is the one with the right type and used in at least one
3493   // GetElementPtrInst.
3494   for (const User *U : I.users()) {
3495     if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
3496       Considerable = true;
3497       // A getelementptr is considered as "complex" if it has more than 2
3498       // operands. We will promote a SExt used in such complex GEP as we
3499       // expect some computation to be merged if they are done on 64 bits.
3500       if (GEPInst->getNumOperands() > 2) {
3501         AllowPromotionWithoutCommonHeader = true;
3502         break;
3503       }
3504     }
3505   }
3506   return Considerable;
3507 }
3508 
isLegalToVectorizeReduction(const RecurrenceDescriptor & RdxDesc,ElementCount VF) const3509 bool AArch64TTIImpl::isLegalToVectorizeReduction(
3510     const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
3511   if (!VF.isScalable())
3512     return true;
3513 
3514   Type *Ty = RdxDesc.getRecurrenceType();
3515   if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty))
3516     return false;
3517 
3518   switch (RdxDesc.getRecurrenceKind()) {
3519   case RecurKind::Add:
3520   case RecurKind::FAdd:
3521   case RecurKind::And:
3522   case RecurKind::Or:
3523   case RecurKind::Xor:
3524   case RecurKind::SMin:
3525   case RecurKind::SMax:
3526   case RecurKind::UMin:
3527   case RecurKind::UMax:
3528   case RecurKind::FMin:
3529   case RecurKind::FMax:
3530   case RecurKind::FMulAdd:
3531   case RecurKind::IAnyOf:
3532   case RecurKind::FAnyOf:
3533     return true;
3534   default:
3535     return false;
3536   }
3537 }
3538 
3539 InstructionCost
getMinMaxReductionCost(Intrinsic::ID IID,VectorType * Ty,FastMathFlags FMF,TTI::TargetCostKind CostKind)3540 AArch64TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
3541                                        FastMathFlags FMF,
3542                                        TTI::TargetCostKind CostKind) {
3543   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
3544 
3545   if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16())
3546     return BaseT::getMinMaxReductionCost(IID, Ty, FMF, CostKind);
3547 
3548   InstructionCost LegalizationCost = 0;
3549   if (LT.first > 1) {
3550     Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
3551     IntrinsicCostAttributes Attrs(IID, LegalVTy, {LegalVTy, LegalVTy}, FMF);
3552     LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1);
3553   }
3554 
3555   return LegalizationCost + /*Cost of horizontal reduction*/ 2;
3556 }
3557 
getArithmeticReductionCostSVE(unsigned Opcode,VectorType * ValTy,TTI::TargetCostKind CostKind)3558 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE(
3559     unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) {
3560   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
3561   InstructionCost LegalizationCost = 0;
3562   if (LT.first > 1) {
3563     Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
3564     LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
3565     LegalizationCost *= LT.first - 1;
3566   }
3567 
3568   int ISD = TLI->InstructionOpcodeToISD(Opcode);
3569   assert(ISD && "Invalid opcode");
3570   // Add the final reduction cost for the legal horizontal reduction
3571   switch (ISD) {
3572   case ISD::ADD:
3573   case ISD::AND:
3574   case ISD::OR:
3575   case ISD::XOR:
3576   case ISD::FADD:
3577     return LegalizationCost + 2;
3578   default:
3579     return InstructionCost::getInvalid();
3580   }
3581 }
3582 
3583 InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * ValTy,std::optional<FastMathFlags> FMF,TTI::TargetCostKind CostKind)3584 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3585                                            std::optional<FastMathFlags> FMF,
3586                                            TTI::TargetCostKind CostKind) {
3587   if (TTI::requiresOrderedReduction(FMF)) {
3588     if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) {
3589       InstructionCost BaseCost =
3590           BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
3591       // Add on extra cost to reflect the extra overhead on some CPUs. We still
3592       // end up vectorizing for more computationally intensive loops.
3593       return BaseCost + FixedVTy->getNumElements();
3594     }
3595 
3596     if (Opcode != Instruction::FAdd)
3597       return InstructionCost::getInvalid();
3598 
3599     auto *VTy = cast<ScalableVectorType>(ValTy);
3600     InstructionCost Cost =
3601         getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind);
3602     Cost *= getMaxNumElements(VTy->getElementCount());
3603     return Cost;
3604   }
3605 
3606   if (isa<ScalableVectorType>(ValTy))
3607     return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind);
3608 
3609   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
3610   MVT MTy = LT.second;
3611   int ISD = TLI->InstructionOpcodeToISD(Opcode);
3612   assert(ISD && "Invalid opcode");
3613 
3614   // Horizontal adds can use the 'addv' instruction. We model the cost of these
3615   // instructions as twice a normal vector add, plus 1 for each legalization
3616   // step (LT.first). This is the only arithmetic vector reduction operation for
3617   // which we have an instruction.
3618   // OR, XOR and AND costs should match the codegen from:
3619   // OR: llvm/test/CodeGen/AArch64/reduce-or.ll
3620   // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll
3621   // AND: llvm/test/CodeGen/AArch64/reduce-and.ll
3622   static const CostTblEntry CostTblNoPairwise[]{
3623       {ISD::ADD, MVT::v8i8,   2},
3624       {ISD::ADD, MVT::v16i8,  2},
3625       {ISD::ADD, MVT::v4i16,  2},
3626       {ISD::ADD, MVT::v8i16,  2},
3627       {ISD::ADD, MVT::v4i32,  2},
3628       {ISD::ADD, MVT::v2i64,  2},
3629       {ISD::OR,  MVT::v8i8,  15},
3630       {ISD::OR,  MVT::v16i8, 17},
3631       {ISD::OR,  MVT::v4i16,  7},
3632       {ISD::OR,  MVT::v8i16,  9},
3633       {ISD::OR,  MVT::v2i32,  3},
3634       {ISD::OR,  MVT::v4i32,  5},
3635       {ISD::OR,  MVT::v2i64,  3},
3636       {ISD::XOR, MVT::v8i8,  15},
3637       {ISD::XOR, MVT::v16i8, 17},
3638       {ISD::XOR, MVT::v4i16,  7},
3639       {ISD::XOR, MVT::v8i16,  9},
3640       {ISD::XOR, MVT::v2i32,  3},
3641       {ISD::XOR, MVT::v4i32,  5},
3642       {ISD::XOR, MVT::v2i64,  3},
3643       {ISD::AND, MVT::v8i8,  15},
3644       {ISD::AND, MVT::v16i8, 17},
3645       {ISD::AND, MVT::v4i16,  7},
3646       {ISD::AND, MVT::v8i16,  9},
3647       {ISD::AND, MVT::v2i32,  3},
3648       {ISD::AND, MVT::v4i32,  5},
3649       {ISD::AND, MVT::v2i64,  3},
3650   };
3651   switch (ISD) {
3652   default:
3653     break;
3654   case ISD::ADD:
3655     if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
3656       return (LT.first - 1) + Entry->Cost;
3657     break;
3658   case ISD::XOR:
3659   case ISD::AND:
3660   case ISD::OR:
3661     const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy);
3662     if (!Entry)
3663       break;
3664     auto *ValVTy = cast<FixedVectorType>(ValTy);
3665     if (MTy.getVectorNumElements() <= ValVTy->getNumElements() &&
3666         isPowerOf2_32(ValVTy->getNumElements())) {
3667       InstructionCost ExtraCost = 0;
3668       if (LT.first != 1) {
3669         // Type needs to be split, so there is an extra cost of LT.first - 1
3670         // arithmetic ops.
3671         auto *Ty = FixedVectorType::get(ValTy->getElementType(),
3672                                         MTy.getVectorNumElements());
3673         ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
3674         ExtraCost *= LT.first - 1;
3675       }
3676       // All and/or/xor of i1 will be lowered with maxv/minv/addv + fmov
3677       auto Cost = ValVTy->getElementType()->isIntegerTy(1) ? 2 : Entry->Cost;
3678       return Cost + ExtraCost;
3679     }
3680     break;
3681   }
3682   return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
3683 }
3684 
getSpliceCost(VectorType * Tp,int Index)3685 InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) {
3686   static const CostTblEntry ShuffleTbl[] = {
3687       { TTI::SK_Splice, MVT::nxv16i8,  1 },
3688       { TTI::SK_Splice, MVT::nxv8i16,  1 },
3689       { TTI::SK_Splice, MVT::nxv4i32,  1 },
3690       { TTI::SK_Splice, MVT::nxv2i64,  1 },
3691       { TTI::SK_Splice, MVT::nxv2f16,  1 },
3692       { TTI::SK_Splice, MVT::nxv4f16,  1 },
3693       { TTI::SK_Splice, MVT::nxv8f16,  1 },
3694       { TTI::SK_Splice, MVT::nxv2bf16, 1 },
3695       { TTI::SK_Splice, MVT::nxv4bf16, 1 },
3696       { TTI::SK_Splice, MVT::nxv8bf16, 1 },
3697       { TTI::SK_Splice, MVT::nxv2f32,  1 },
3698       { TTI::SK_Splice, MVT::nxv4f32,  1 },
3699       { TTI::SK_Splice, MVT::nxv2f64,  1 },
3700   };
3701 
3702   // The code-generator is currently not able to handle scalable vectors
3703   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
3704   // it. This change will be removed when code-generation for these types is
3705   // sufficiently reliable.
3706   if (Tp->getElementCount() == ElementCount::getScalable(1))
3707     return InstructionCost::getInvalid();
3708 
3709   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
3710   Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext());
3711   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3712   EVT PromotedVT = LT.second.getScalarType() == MVT::i1
3713                        ? TLI->getPromotedVTForPredicate(EVT(LT.second))
3714                        : LT.second;
3715   Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext());
3716   InstructionCost LegalizationCost = 0;
3717   if (Index < 0) {
3718     LegalizationCost =
3719         getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy,
3720                            CmpInst::BAD_ICMP_PREDICATE, CostKind) +
3721         getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy,
3722                            CmpInst::BAD_ICMP_PREDICATE, CostKind);
3723   }
3724 
3725   // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp
3726   // Cost performed on a promoted type.
3727   if (LT.second.getScalarType() == MVT::i1) {
3728     LegalizationCost +=
3729         getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy,
3730                          TTI::CastContextHint::None, CostKind) +
3731         getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy,
3732                          TTI::CastContextHint::None, CostKind);
3733   }
3734   const auto *Entry =
3735       CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT());
3736   assert(Entry && "Illegal Type for Splice");
3737   LegalizationCost += Entry->Cost;
3738   return LegalizationCost * LT.first;
3739 }
3740 
getShuffleCost(TTI::ShuffleKind Kind,VectorType * Tp,ArrayRef<int> Mask,TTI::TargetCostKind CostKind,int Index,VectorType * SubTp,ArrayRef<const Value * > Args)3741 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
3742                                                VectorType *Tp,
3743                                                ArrayRef<int> Mask,
3744                                                TTI::TargetCostKind CostKind,
3745                                                int Index, VectorType *SubTp,
3746                                                ArrayRef<const Value *> Args) {
3747   std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
3748   // If we have a Mask, and the LT is being legalized somehow, split the Mask
3749   // into smaller vectors and sum the cost of each shuffle.
3750   if (!Mask.empty() && isa<FixedVectorType>(Tp) && LT.second.isVector() &&
3751       Tp->getScalarSizeInBits() == LT.second.getScalarSizeInBits() &&
3752       Mask.size() > LT.second.getVectorNumElements() && !Index && !SubTp) {
3753     unsigned TpNumElts = Mask.size();
3754     unsigned LTNumElts = LT.second.getVectorNumElements();
3755     unsigned NumVecs = (TpNumElts + LTNumElts - 1) / LTNumElts;
3756     VectorType *NTp =
3757         VectorType::get(Tp->getScalarType(), LT.second.getVectorElementCount());
3758     InstructionCost Cost;
3759     for (unsigned N = 0; N < NumVecs; N++) {
3760       SmallVector<int> NMask;
3761       // Split the existing mask into chunks of size LTNumElts. Track the source
3762       // sub-vectors to ensure the result has at most 2 inputs.
3763       unsigned Source1, Source2;
3764       unsigned NumSources = 0;
3765       for (unsigned E = 0; E < LTNumElts; E++) {
3766         int MaskElt = (N * LTNumElts + E < TpNumElts) ? Mask[N * LTNumElts + E]
3767                                                       : PoisonMaskElem;
3768         if (MaskElt < 0) {
3769           NMask.push_back(PoisonMaskElem);
3770           continue;
3771         }
3772 
3773         // Calculate which source from the input this comes from and whether it
3774         // is new to us.
3775         unsigned Source = MaskElt / LTNumElts;
3776         if (NumSources == 0) {
3777           Source1 = Source;
3778           NumSources = 1;
3779         } else if (NumSources == 1 && Source != Source1) {
3780           Source2 = Source;
3781           NumSources = 2;
3782         } else if (NumSources >= 2 && Source != Source1 && Source != Source2) {
3783           NumSources++;
3784         }
3785 
3786         // Add to the new mask. For the NumSources>2 case these are not correct,
3787         // but are only used for the modular lane number.
3788         if (Source == Source1)
3789           NMask.push_back(MaskElt % LTNumElts);
3790         else if (Source == Source2)
3791           NMask.push_back(MaskElt % LTNumElts + LTNumElts);
3792         else
3793           NMask.push_back(MaskElt % LTNumElts);
3794       }
3795       // If the sub-mask has at most 2 input sub-vectors then re-cost it using
3796       // getShuffleCost. If not then cost it using the worst case.
3797       if (NumSources <= 2)
3798         Cost += getShuffleCost(NumSources <= 1 ? TTI::SK_PermuteSingleSrc
3799                                                : TTI::SK_PermuteTwoSrc,
3800                                NTp, NMask, CostKind, 0, nullptr, Args);
3801       else if (any_of(enumerate(NMask), [&](const auto &ME) {
3802                  return ME.value() % LTNumElts == ME.index();
3803                }))
3804         Cost += LTNumElts - 1;
3805       else
3806         Cost += LTNumElts;
3807     }
3808     return Cost;
3809   }
3810 
3811   Kind = improveShuffleKindFromMask(Kind, Mask, Tp, Index, SubTp);
3812 
3813   // Check for broadcast loads, which are supported by the LD1R instruction.
3814   // In terms of code-size, the shuffle vector is free when a load + dup get
3815   // folded into a LD1R. That's what we check and return here. For performance
3816   // and reciprocal throughput, a LD1R is not completely free. In this case, we
3817   // return the cost for the broadcast below (i.e. 1 for most/all types), so
3818   // that we model the load + dup sequence slightly higher because LD1R is a
3819   // high latency instruction.
3820   if (CostKind == TTI::TCK_CodeSize && Kind == TTI::SK_Broadcast) {
3821     bool IsLoad = !Args.empty() && isa<LoadInst>(Args[0]);
3822     if (IsLoad && LT.second.isVector() &&
3823         isLegalBroadcastLoad(Tp->getElementType(),
3824                              LT.second.getVectorElementCount()))
3825       return 0;
3826   }
3827 
3828   // If we have 4 elements for the shuffle and a Mask, get the cost straight
3829   // from the perfect shuffle tables.
3830   if (Mask.size() == 4 && Tp->getElementCount() == ElementCount::getFixed(4) &&
3831       (Tp->getScalarSizeInBits() == 16 || Tp->getScalarSizeInBits() == 32) &&
3832       all_of(Mask, [](int E) { return E < 8; }))
3833     return getPerfectShuffleCost(Mask);
3834 
3835   if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
3836       Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
3837       Kind == TTI::SK_Reverse || Kind == TTI::SK_Splice) {
3838     static const CostTblEntry ShuffleTbl[] = {
3839         // Broadcast shuffle kinds can be performed with 'dup'.
3840         {TTI::SK_Broadcast, MVT::v8i8, 1},
3841         {TTI::SK_Broadcast, MVT::v16i8, 1},
3842         {TTI::SK_Broadcast, MVT::v4i16, 1},
3843         {TTI::SK_Broadcast, MVT::v8i16, 1},
3844         {TTI::SK_Broadcast, MVT::v2i32, 1},
3845         {TTI::SK_Broadcast, MVT::v4i32, 1},
3846         {TTI::SK_Broadcast, MVT::v2i64, 1},
3847         {TTI::SK_Broadcast, MVT::v4f16, 1},
3848         {TTI::SK_Broadcast, MVT::v8f16, 1},
3849         {TTI::SK_Broadcast, MVT::v2f32, 1},
3850         {TTI::SK_Broadcast, MVT::v4f32, 1},
3851         {TTI::SK_Broadcast, MVT::v2f64, 1},
3852         // Transpose shuffle kinds can be performed with 'trn1/trn2' and
3853         // 'zip1/zip2' instructions.
3854         {TTI::SK_Transpose, MVT::v8i8, 1},
3855         {TTI::SK_Transpose, MVT::v16i8, 1},
3856         {TTI::SK_Transpose, MVT::v4i16, 1},
3857         {TTI::SK_Transpose, MVT::v8i16, 1},
3858         {TTI::SK_Transpose, MVT::v2i32, 1},
3859         {TTI::SK_Transpose, MVT::v4i32, 1},
3860         {TTI::SK_Transpose, MVT::v2i64, 1},
3861         {TTI::SK_Transpose, MVT::v4f16, 1},
3862         {TTI::SK_Transpose, MVT::v8f16, 1},
3863         {TTI::SK_Transpose, MVT::v2f32, 1},
3864         {TTI::SK_Transpose, MVT::v4f32, 1},
3865         {TTI::SK_Transpose, MVT::v2f64, 1},
3866         // Select shuffle kinds.
3867         // TODO: handle vXi8/vXi16.
3868         {TTI::SK_Select, MVT::v2i32, 1}, // mov.
3869         {TTI::SK_Select, MVT::v4i32, 2}, // rev+trn (or similar).
3870         {TTI::SK_Select, MVT::v2i64, 1}, // mov.
3871         {TTI::SK_Select, MVT::v2f32, 1}, // mov.
3872         {TTI::SK_Select, MVT::v4f32, 2}, // rev+trn (or similar).
3873         {TTI::SK_Select, MVT::v2f64, 1}, // mov.
3874         // PermuteSingleSrc shuffle kinds.
3875         {TTI::SK_PermuteSingleSrc, MVT::v2i32, 1}, // mov.
3876         {TTI::SK_PermuteSingleSrc, MVT::v4i32, 3}, // perfectshuffle worst case.
3877         {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // mov.
3878         {TTI::SK_PermuteSingleSrc, MVT::v2f32, 1}, // mov.
3879         {TTI::SK_PermuteSingleSrc, MVT::v4f32, 3}, // perfectshuffle worst case.
3880         {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // mov.
3881         {TTI::SK_PermuteSingleSrc, MVT::v4i16, 3}, // perfectshuffle worst case.
3882         {TTI::SK_PermuteSingleSrc, MVT::v4f16, 3}, // perfectshuffle worst case.
3883         {TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3}, // same
3884         {TTI::SK_PermuteSingleSrc, MVT::v8i16, 8},  // constpool + load + tbl
3885         {TTI::SK_PermuteSingleSrc, MVT::v8f16, 8},  // constpool + load + tbl
3886         {TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8}, // constpool + load + tbl
3887         {TTI::SK_PermuteSingleSrc, MVT::v8i8, 8},   // constpool + load + tbl
3888         {TTI::SK_PermuteSingleSrc, MVT::v16i8, 8},  // constpool + load + tbl
3889         // Reverse can be lowered with `rev`.
3890         {TTI::SK_Reverse, MVT::v2i32, 1}, // REV64
3891         {TTI::SK_Reverse, MVT::v4i32, 2}, // REV64; EXT
3892         {TTI::SK_Reverse, MVT::v2i64, 1}, // EXT
3893         {TTI::SK_Reverse, MVT::v2f32, 1}, // REV64
3894         {TTI::SK_Reverse, MVT::v4f32, 2}, // REV64; EXT
3895         {TTI::SK_Reverse, MVT::v2f64, 1}, // EXT
3896         {TTI::SK_Reverse, MVT::v8f16, 2}, // REV64; EXT
3897         {TTI::SK_Reverse, MVT::v8i16, 2}, // REV64; EXT
3898         {TTI::SK_Reverse, MVT::v16i8, 2}, // REV64; EXT
3899         {TTI::SK_Reverse, MVT::v4f16, 1}, // REV64
3900         {TTI::SK_Reverse, MVT::v4i16, 1}, // REV64
3901         {TTI::SK_Reverse, MVT::v8i8, 1},  // REV64
3902         // Splice can all be lowered as `ext`.
3903         {TTI::SK_Splice, MVT::v2i32, 1},
3904         {TTI::SK_Splice, MVT::v4i32, 1},
3905         {TTI::SK_Splice, MVT::v2i64, 1},
3906         {TTI::SK_Splice, MVT::v2f32, 1},
3907         {TTI::SK_Splice, MVT::v4f32, 1},
3908         {TTI::SK_Splice, MVT::v2f64, 1},
3909         {TTI::SK_Splice, MVT::v8f16, 1},
3910         {TTI::SK_Splice, MVT::v8bf16, 1},
3911         {TTI::SK_Splice, MVT::v8i16, 1},
3912         {TTI::SK_Splice, MVT::v16i8, 1},
3913         {TTI::SK_Splice, MVT::v4bf16, 1},
3914         {TTI::SK_Splice, MVT::v4f16, 1},
3915         {TTI::SK_Splice, MVT::v4i16, 1},
3916         {TTI::SK_Splice, MVT::v8i8, 1},
3917         // Broadcast shuffle kinds for scalable vectors
3918         {TTI::SK_Broadcast, MVT::nxv16i8, 1},
3919         {TTI::SK_Broadcast, MVT::nxv8i16, 1},
3920         {TTI::SK_Broadcast, MVT::nxv4i32, 1},
3921         {TTI::SK_Broadcast, MVT::nxv2i64, 1},
3922         {TTI::SK_Broadcast, MVT::nxv2f16, 1},
3923         {TTI::SK_Broadcast, MVT::nxv4f16, 1},
3924         {TTI::SK_Broadcast, MVT::nxv8f16, 1},
3925         {TTI::SK_Broadcast, MVT::nxv2bf16, 1},
3926         {TTI::SK_Broadcast, MVT::nxv4bf16, 1},
3927         {TTI::SK_Broadcast, MVT::nxv8bf16, 1},
3928         {TTI::SK_Broadcast, MVT::nxv2f32, 1},
3929         {TTI::SK_Broadcast, MVT::nxv4f32, 1},
3930         {TTI::SK_Broadcast, MVT::nxv2f64, 1},
3931         {TTI::SK_Broadcast, MVT::nxv16i1, 1},
3932         {TTI::SK_Broadcast, MVT::nxv8i1, 1},
3933         {TTI::SK_Broadcast, MVT::nxv4i1, 1},
3934         {TTI::SK_Broadcast, MVT::nxv2i1, 1},
3935         // Handle the cases for vector.reverse with scalable vectors
3936         {TTI::SK_Reverse, MVT::nxv16i8, 1},
3937         {TTI::SK_Reverse, MVT::nxv8i16, 1},
3938         {TTI::SK_Reverse, MVT::nxv4i32, 1},
3939         {TTI::SK_Reverse, MVT::nxv2i64, 1},
3940         {TTI::SK_Reverse, MVT::nxv2f16, 1},
3941         {TTI::SK_Reverse, MVT::nxv4f16, 1},
3942         {TTI::SK_Reverse, MVT::nxv8f16, 1},
3943         {TTI::SK_Reverse, MVT::nxv2bf16, 1},
3944         {TTI::SK_Reverse, MVT::nxv4bf16, 1},
3945         {TTI::SK_Reverse, MVT::nxv8bf16, 1},
3946         {TTI::SK_Reverse, MVT::nxv2f32, 1},
3947         {TTI::SK_Reverse, MVT::nxv4f32, 1},
3948         {TTI::SK_Reverse, MVT::nxv2f64, 1},
3949         {TTI::SK_Reverse, MVT::nxv16i1, 1},
3950         {TTI::SK_Reverse, MVT::nxv8i1, 1},
3951         {TTI::SK_Reverse, MVT::nxv4i1, 1},
3952         {TTI::SK_Reverse, MVT::nxv2i1, 1},
3953     };
3954     if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
3955       return LT.first * Entry->Cost;
3956   }
3957 
3958   if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp))
3959     return getSpliceCost(Tp, Index);
3960 
3961   // Inserting a subvector can often be done with either a D, S or H register
3962   // move, so long as the inserted vector is "aligned".
3963   if (Kind == TTI::SK_InsertSubvector && LT.second.isFixedLengthVector() &&
3964       LT.second.getSizeInBits() <= 128 && SubTp) {
3965     std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
3966     if (SubLT.second.isVector()) {
3967       int NumElts = LT.second.getVectorNumElements();
3968       int NumSubElts = SubLT.second.getVectorNumElements();
3969       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
3970         return SubLT.first;
3971     }
3972   }
3973 
3974   return BaseT::getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp);
3975 }
3976 
containsDecreasingPointers(Loop * TheLoop,PredicatedScalarEvolution * PSE)3977 static bool containsDecreasingPointers(Loop *TheLoop,
3978                                        PredicatedScalarEvolution *PSE) {
3979   const auto &Strides = DenseMap<Value *, const SCEV *>();
3980   for (BasicBlock *BB : TheLoop->blocks()) {
3981     // Scan the instructions in the block and look for addresses that are
3982     // consecutive and decreasing.
3983     for (Instruction &I : *BB) {
3984       if (isa<LoadInst>(&I) || isa<StoreInst>(&I)) {
3985         Value *Ptr = getLoadStorePointerOperand(&I);
3986         Type *AccessTy = getLoadStoreType(&I);
3987         if (getPtrStride(*PSE, AccessTy, Ptr, TheLoop, Strides, /*Assume=*/true,
3988                          /*ShouldCheckWrap=*/false)
3989                 .value_or(0) < 0)
3990           return true;
3991       }
3992     }
3993   }
3994   return false;
3995 }
3996 
preferPredicateOverEpilogue(TailFoldingInfo * TFI)3997 bool AArch64TTIImpl::preferPredicateOverEpilogue(TailFoldingInfo *TFI) {
3998   if (!ST->hasSVE())
3999     return false;
4000 
4001   // We don't currently support vectorisation with interleaving for SVE - with
4002   // such loops we're better off not using tail-folding. This gives us a chance
4003   // to fall back on fixed-width vectorisation using NEON's ld2/st2/etc.
4004   if (TFI->IAI->hasGroups())
4005     return false;
4006 
4007   TailFoldingOpts Required = TailFoldingOpts::Disabled;
4008   if (TFI->LVL->getReductionVars().size())
4009     Required |= TailFoldingOpts::Reductions;
4010   if (TFI->LVL->getFixedOrderRecurrences().size())
4011     Required |= TailFoldingOpts::Recurrences;
4012 
4013   // We call this to discover whether any load/store pointers in the loop have
4014   // negative strides. This will require extra work to reverse the loop
4015   // predicate, which may be expensive.
4016   if (containsDecreasingPointers(TFI->LVL->getLoop(),
4017                                  TFI->LVL->getPredicatedScalarEvolution()))
4018     Required |= TailFoldingOpts::Reverse;
4019   if (Required == TailFoldingOpts::Disabled)
4020     Required |= TailFoldingOpts::Simple;
4021 
4022   if (!TailFoldingOptionLoc.satisfies(ST->getSVETailFoldingDefaultOpts(),
4023                                       Required))
4024     return false;
4025 
4026   // Don't tail-fold for tight loops where we would be better off interleaving
4027   // with an unpredicated loop.
4028   unsigned NumInsns = 0;
4029   for (BasicBlock *BB : TFI->LVL->getLoop()->blocks()) {
4030     NumInsns += BB->sizeWithoutDebug();
4031   }
4032 
4033   // We expect 4 of these to be a IV PHI, IV add, IV compare and branch.
4034   return NumInsns >= SVETailFoldInsnThreshold;
4035 }
4036 
4037 InstructionCost
getScalingFactorCost(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace) const4038 AArch64TTIImpl::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
4039                                      int64_t BaseOffset, bool HasBaseReg,
4040                                      int64_t Scale, unsigned AddrSpace) const {
4041   // Scaling factors are not free at all.
4042   // Operands                     | Rt Latency
4043   // -------------------------------------------
4044   // Rt, [Xn, Xm]                 | 4
4045   // -------------------------------------------
4046   // Rt, [Xn, Xm, lsl #imm]       | Rn: 4 Rm: 5
4047   // Rt, [Xn, Wm, <extend> #imm]  |
4048   TargetLoweringBase::AddrMode AM;
4049   AM.BaseGV = BaseGV;
4050   AM.BaseOffs = BaseOffset;
4051   AM.HasBaseReg = HasBaseReg;
4052   AM.Scale = Scale;
4053   if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
4054     // Scale represents reg2 * scale, thus account for 1 if
4055     // it is not equal to 0 or 1.
4056     return AM.Scale != 0 && AM.Scale != 1;
4057   return -1;
4058 }
4059 
shouldTreatInstructionLikeSelect(const Instruction * I)4060 bool AArch64TTIImpl::shouldTreatInstructionLikeSelect(const Instruction *I) {
4061   // For the binary operators (e.g. or) we need to be more careful than
4062   // selects, here we only transform them if they are already at a natural
4063   // break point in the code - the end of a block with an unconditional
4064   // terminator.
4065   if (EnableOrLikeSelectOpt && I->getOpcode() == Instruction::Or &&
4066       isa<BranchInst>(I->getNextNode()) &&
4067       cast<BranchInst>(I->getNextNode())->isUnconditional())
4068     return true;
4069   return BaseT::shouldTreatInstructionLikeSelect(I);
4070 }
4071