1 //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This pass exposes codegen information to IR-level passes. Every
10 /// transformation that uses codegen information is broken into three parts:
11 /// 1. The IR-level analysis pass.
12 /// 2. The IR-level transformation interface which provides the needed
13 /// information.
14 /// 3. Codegen-level implementation which uses target-specific hooks.
15 ///
16 /// This file defines #2, which is the interface that IR-level transformations
17 /// use for querying the codegen.
18 ///
19 //===----------------------------------------------------------------------===//
20
21 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
22 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23
24 #include "llvm/Analysis/IVDescriptors.h"
25 #include "llvm/IR/InstrTypes.h"
26 #include "llvm/IR/Operator.h"
27 #include "llvm/IR/PassManager.h"
28 #include "llvm/Pass.h"
29 #include "llvm/Support/AtomicOrdering.h"
30 #include "llvm/Support/BranchProbability.h"
31 #include "llvm/Support/DataTypes.h"
32 #include "llvm/Support/InstructionCost.h"
33 #include <functional>
34
35 namespace llvm {
36
37 namespace Intrinsic {
38 typedef unsigned ID;
39 }
40
41 class AssumptionCache;
42 class BlockFrequencyInfo;
43 class DominatorTree;
44 class BranchInst;
45 class CallBase;
46 class ExtractElementInst;
47 class Function;
48 class GlobalValue;
49 class InstCombiner;
50 class IntrinsicInst;
51 class LoadInst;
52 class LoopAccessInfo;
53 class Loop;
54 class LoopInfo;
55 class ProfileSummaryInfo;
56 class SCEV;
57 class ScalarEvolution;
58 class StoreInst;
59 class SwitchInst;
60 class TargetLibraryInfo;
61 class Type;
62 class User;
63 class Value;
64 class VPIntrinsic;
65 struct KnownBits;
66 template <typename T> class Optional;
67
68 /// Information about a load/store intrinsic defined by the target.
69 struct MemIntrinsicInfo {
70 /// This is the pointer that the intrinsic is loading from or storing to.
71 /// If this is non-null, then analysis/optimization passes can assume that
72 /// this intrinsic is functionally equivalent to a load/store from this
73 /// pointer.
74 Value *PtrVal = nullptr;
75
76 // Ordering for atomic operations.
77 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
78
79 // Same Id is set by the target for corresponding load/store intrinsics.
80 unsigned short MatchingId = 0;
81
82 bool ReadMem = false;
83 bool WriteMem = false;
84 bool IsVolatile = false;
85
isUnorderedMemIntrinsicInfo86 bool isUnordered() const {
87 return (Ordering == AtomicOrdering::NotAtomic ||
88 Ordering == AtomicOrdering::Unordered) &&
89 !IsVolatile;
90 }
91 };
92
93 /// Attributes of a target dependent hardware loop.
94 struct HardwareLoopInfo {
95 HardwareLoopInfo() = delete;
HardwareLoopInfoHardwareLoopInfo96 HardwareLoopInfo(Loop *L) : L(L) {}
97 Loop *L = nullptr;
98 BasicBlock *ExitBlock = nullptr;
99 BranchInst *ExitBranch = nullptr;
100 const SCEV *ExitCount = nullptr;
101 IntegerType *CountType = nullptr;
102 Value *LoopDecrement = nullptr; // Decrement the loop counter by this
103 // value in every iteration.
104 bool IsNestingLegal = false; // Can a hardware loop be a parent to
105 // another hardware loop?
106 bool CounterInReg = false; // Should loop counter be updated in
107 // the loop via a phi?
108 bool PerformEntryTest = false; // Generate the intrinsic which also performs
109 // icmp ne zero on the loop counter value and
110 // produces an i1 to guard the loop entry.
111 bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI,
112 DominatorTree &DT, bool ForceNestedLoop = false,
113 bool ForceHardwareLoopPHI = false);
114 bool canAnalyze(LoopInfo &LI);
115 };
116
117 class IntrinsicCostAttributes {
118 const IntrinsicInst *II = nullptr;
119 Type *RetTy = nullptr;
120 Intrinsic::ID IID;
121 SmallVector<Type *, 4> ParamTys;
122 SmallVector<const Value *, 4> Arguments;
123 FastMathFlags FMF;
124 // If ScalarizationCost is UINT_MAX, the cost of scalarizing the
125 // arguments and the return value will be computed based on types.
126 InstructionCost ScalarizationCost = InstructionCost::getInvalid();
127
128 public:
129 IntrinsicCostAttributes(
130 Intrinsic::ID Id, const CallBase &CI,
131 InstructionCost ScalarCost = InstructionCost::getInvalid());
132
133 IntrinsicCostAttributes(
134 Intrinsic::ID Id, Type *RTy, ArrayRef<Type *> Tys,
135 FastMathFlags Flags = FastMathFlags(), const IntrinsicInst *I = nullptr,
136 InstructionCost ScalarCost = InstructionCost::getInvalid());
137
138 IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
139 ArrayRef<const Value *> Args);
140
141 IntrinsicCostAttributes(
142 Intrinsic::ID Id, Type *RTy, ArrayRef<const Value *> Args,
143 ArrayRef<Type *> Tys, FastMathFlags Flags = FastMathFlags(),
144 const IntrinsicInst *I = nullptr,
145 InstructionCost ScalarCost = InstructionCost::getInvalid());
146
getID()147 Intrinsic::ID getID() const { return IID; }
getInst()148 const IntrinsicInst *getInst() const { return II; }
getReturnType()149 Type *getReturnType() const { return RetTy; }
getFlags()150 FastMathFlags getFlags() const { return FMF; }
getScalarizationCost()151 InstructionCost getScalarizationCost() const { return ScalarizationCost; }
getArgs()152 const SmallVectorImpl<const Value *> &getArgs() const { return Arguments; }
getArgTypes()153 const SmallVectorImpl<Type *> &getArgTypes() const { return ParamTys; }
154
isTypeBasedOnly()155 bool isTypeBasedOnly() const {
156 return Arguments.empty();
157 }
158
skipScalarizationCost()159 bool skipScalarizationCost() const { return ScalarizationCost.isValid(); }
160 };
161
162 class TargetTransformInfo;
163 typedef TargetTransformInfo TTI;
164
165 /// This pass provides access to the codegen interfaces that are needed
166 /// for IR-level transformations.
167 class TargetTransformInfo {
168 public:
169 /// Construct a TTI object using a type implementing the \c Concept
170 /// API below.
171 ///
172 /// This is used by targets to construct a TTI wrapping their target-specific
173 /// implementation that encodes appropriate costs for their target.
174 template <typename T> TargetTransformInfo(T Impl);
175
176 /// Construct a baseline TTI object using a minimal implementation of
177 /// the \c Concept API below.
178 ///
179 /// The TTI implementation will reflect the information in the DataLayout
180 /// provided if non-null.
181 explicit TargetTransformInfo(const DataLayout &DL);
182
183 // Provide move semantics.
184 TargetTransformInfo(TargetTransformInfo &&Arg);
185 TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
186
187 // We need to define the destructor out-of-line to define our sub-classes
188 // out-of-line.
189 ~TargetTransformInfo();
190
191 /// Handle the invalidation of this information.
192 ///
193 /// When used as a result of \c TargetIRAnalysis this method will be called
194 /// when the function this was computed for changes. When it returns false,
195 /// the information is preserved across those changes.
invalidate(Function &,const PreservedAnalyses &,FunctionAnalysisManager::Invalidator &)196 bool invalidate(Function &, const PreservedAnalyses &,
197 FunctionAnalysisManager::Invalidator &) {
198 // FIXME: We should probably in some way ensure that the subtarget
199 // information for a function hasn't changed.
200 return false;
201 }
202
203 /// \name Generic Target Information
204 /// @{
205
206 /// The kind of cost model.
207 ///
208 /// There are several different cost models that can be customized by the
209 /// target. The normalization of each cost model may be target specific.
210 enum TargetCostKind {
211 TCK_RecipThroughput, ///< Reciprocal throughput.
212 TCK_Latency, ///< The latency of instruction.
213 TCK_CodeSize, ///< Instruction code size.
214 TCK_SizeAndLatency ///< The weighted sum of size and latency.
215 };
216
217 /// Query the cost of a specified instruction.
218 ///
219 /// Clients should use this interface to query the cost of an existing
220 /// instruction. The instruction must have a valid parent (basic block).
221 ///
222 /// Note, this method does not cache the cost calculation and it
223 /// can be expensive in some cases.
getInstructionCost(const Instruction * I,enum TargetCostKind kind)224 InstructionCost getInstructionCost(const Instruction *I,
225 enum TargetCostKind kind) const {
226 InstructionCost Cost;
227 switch (kind) {
228 case TCK_RecipThroughput:
229 Cost = getInstructionThroughput(I);
230 break;
231 case TCK_Latency:
232 Cost = getInstructionLatency(I);
233 break;
234 case TCK_CodeSize:
235 case TCK_SizeAndLatency:
236 Cost = getUserCost(I, kind);
237 break;
238 }
239 return Cost;
240 }
241
242 /// Underlying constants for 'cost' values in this interface.
243 ///
244 /// Many APIs in this interface return a cost. This enum defines the
245 /// fundamental values that should be used to interpret (and produce) those
246 /// costs. The costs are returned as an int rather than a member of this
247 /// enumeration because it is expected that the cost of one IR instruction
248 /// may have a multiplicative factor to it or otherwise won't fit directly
249 /// into the enum. Moreover, it is common to sum or average costs which works
250 /// better as simple integral values. Thus this enum only provides constants.
251 /// Also note that the returned costs are signed integers to make it natural
252 /// to add, subtract, and test with zero (a common boundary condition). It is
253 /// not expected that 2^32 is a realistic cost to be modeling at any point.
254 ///
255 /// Note that these costs should usually reflect the intersection of code-size
256 /// cost and execution cost. A free instruction is typically one that folds
257 /// into another instruction. For example, reg-to-reg moves can often be
258 /// skipped by renaming the registers in the CPU, but they still are encoded
259 /// and thus wouldn't be considered 'free' here.
260 enum TargetCostConstants {
261 TCC_Free = 0, ///< Expected to fold away in lowering.
262 TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
263 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
264 };
265
266 /// Estimate the cost of a GEP operation when lowered.
267 InstructionCost
268 getGEPCost(Type *PointeeType, const Value *Ptr,
269 ArrayRef<const Value *> Operands,
270 TargetCostKind CostKind = TCK_SizeAndLatency) const;
271
272 /// \returns A value by which our inlining threshold should be multiplied.
273 /// This is primarily used to bump up the inlining threshold wholesale on
274 /// targets where calls are unusually expensive.
275 ///
276 /// TODO: This is a rather blunt instrument. Perhaps altering the costs of
277 /// individual classes of instructions would be better.
278 unsigned getInliningThresholdMultiplier() const;
279
280 /// \returns A value to be added to the inlining threshold.
281 unsigned adjustInliningThreshold(const CallBase *CB) const;
282
283 /// \returns Vector bonus in percent.
284 ///
285 /// Vector bonuses: We want to more aggressively inline vector-dense kernels
286 /// and apply this bonus based on the percentage of vector instructions. A
287 /// bonus is applied if the vector instructions exceed 50% and half that
288 /// amount is applied if it exceeds 10%. Note that these bonuses are some what
289 /// arbitrary and evolved over time by accident as much as because they are
290 /// principled bonuses.
291 /// FIXME: It would be nice to base the bonus values on something more
292 /// scientific. A target may has no bonus on vector instructions.
293 int getInlinerVectorBonusPercent() const;
294
295 /// \return the expected cost of a memcpy, which could e.g. depend on the
296 /// source/destination type and alignment and the number of bytes copied.
297 InstructionCost getMemcpyCost(const Instruction *I) const;
298
299 /// \return The estimated number of case clusters when lowering \p 'SI'.
300 /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
301 /// table.
302 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
303 unsigned &JTSize,
304 ProfileSummaryInfo *PSI,
305 BlockFrequencyInfo *BFI) const;
306
307 /// Estimate the cost of a given IR user when lowered.
308 ///
309 /// This can estimate the cost of either a ConstantExpr or Instruction when
310 /// lowered.
311 ///
312 /// \p Operands is a list of operands which can be a result of transformations
313 /// of the current operands. The number of the operands on the list must equal
314 /// to the number of the current operands the IR user has. Their order on the
315 /// list must be the same as the order of the current operands the IR user
316 /// has.
317 ///
318 /// The returned cost is defined in terms of \c TargetCostConstants, see its
319 /// comments for a detailed explanation of the cost values.
320 InstructionCost getUserCost(const User *U, ArrayRef<const Value *> Operands,
321 TargetCostKind CostKind) const;
322
323 /// This is a helper function which calls the two-argument getUserCost
324 /// with \p Operands which are the current operands U has.
getUserCost(const User * U,TargetCostKind CostKind)325 InstructionCost getUserCost(const User *U, TargetCostKind CostKind) const {
326 SmallVector<const Value *, 4> Operands(U->operand_values());
327 return getUserCost(U, Operands, CostKind);
328 }
329
330 /// If a branch or a select condition is skewed in one direction by more than
331 /// this factor, it is very likely to be predicted correctly.
332 BranchProbability getPredictableBranchThreshold() const;
333
334 /// Return true if branch divergence exists.
335 ///
336 /// Branch divergence has a significantly negative impact on GPU performance
337 /// when threads in the same wavefront take different paths due to conditional
338 /// branches.
339 bool hasBranchDivergence() const;
340
341 /// Return true if the target prefers to use GPU divergence analysis to
342 /// replace the legacy version.
343 bool useGPUDivergenceAnalysis() const;
344
345 /// Returns whether V is a source of divergence.
346 ///
347 /// This function provides the target-dependent information for
348 /// the target-independent LegacyDivergenceAnalysis. LegacyDivergenceAnalysis
349 /// first builds the dependency graph, and then runs the reachability
350 /// algorithm starting with the sources of divergence.
351 bool isSourceOfDivergence(const Value *V) const;
352
353 // Returns true for the target specific
354 // set of operations which produce uniform result
355 // even taking non-uniform arguments
356 bool isAlwaysUniform(const Value *V) const;
357
358 /// Returns the address space ID for a target's 'flat' address space. Note
359 /// this is not necessarily the same as addrspace(0), which LLVM sometimes
360 /// refers to as the generic address space. The flat address space is a
361 /// generic address space that can be used access multiple segments of memory
362 /// with different address spaces. Access of a memory location through a
363 /// pointer with this address space is expected to be legal but slower
364 /// compared to the same memory location accessed through a pointer with a
365 /// different address space.
366 //
367 /// This is for targets with different pointer representations which can
368 /// be converted with the addrspacecast instruction. If a pointer is converted
369 /// to this address space, optimizations should attempt to replace the access
370 /// with the source address space.
371 ///
372 /// \returns ~0u if the target does not have such a flat address space to
373 /// optimize away.
374 unsigned getFlatAddressSpace() const;
375
376 /// Return any intrinsic address operand indexes which may be rewritten if
377 /// they use a flat address space pointer.
378 ///
379 /// \returns true if the intrinsic was handled.
380 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
381 Intrinsic::ID IID) const;
382
383 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
384
385 unsigned getAssumedAddrSpace(const Value *V) const;
386
387 /// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
388 /// NewV, which has a different address space. This should happen for every
389 /// operand index that collectFlatAddressOperands returned for the intrinsic.
390 /// \returns nullptr if the intrinsic was not handled. Otherwise, returns the
391 /// new value (which may be the original \p II with modified operands).
392 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
393 Value *NewV) const;
394
395 /// Test whether calls to a function lower to actual program function
396 /// calls.
397 ///
398 /// The idea is to test whether the program is likely to require a 'call'
399 /// instruction or equivalent in order to call the given function.
400 ///
401 /// FIXME: It's not clear that this is a good or useful query API. Client's
402 /// should probably move to simpler cost metrics using the above.
403 /// Alternatively, we could split the cost interface into distinct code-size
404 /// and execution-speed costs. This would allow modelling the core of this
405 /// query more accurately as a call is a single small instruction, but
406 /// incurs significant execution cost.
407 bool isLoweredToCall(const Function *F) const;
408
409 struct LSRCost {
410 /// TODO: Some of these could be merged. Also, a lexical ordering
411 /// isn't always optimal.
412 unsigned Insns;
413 unsigned NumRegs;
414 unsigned AddRecCost;
415 unsigned NumIVMuls;
416 unsigned NumBaseAdds;
417 unsigned ImmCost;
418 unsigned SetupCost;
419 unsigned ScaleCost;
420 };
421
422 /// Parameters that control the generic loop unrolling transformation.
423 struct UnrollingPreferences {
424 /// The cost threshold for the unrolled loop. Should be relative to the
425 /// getUserCost values returned by this API, and the expectation is that
426 /// the unrolled loop's instructions when run through that interface should
427 /// not exceed this cost. However, this is only an estimate. Also, specific
428 /// loops may be unrolled even with a cost above this threshold if deemed
429 /// profitable. Set this to UINT_MAX to disable the loop body cost
430 /// restriction.
431 unsigned Threshold;
432 /// If complete unrolling will reduce the cost of the loop, we will boost
433 /// the Threshold by a certain percent to allow more aggressive complete
434 /// unrolling. This value provides the maximum boost percentage that we
435 /// can apply to Threshold (The value should be no less than 100).
436 /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
437 /// MaxPercentThresholdBoost / 100)
438 /// E.g. if complete unrolling reduces the loop execution time by 50%
439 /// then we boost the threshold by the factor of 2x. If unrolling is not
440 /// expected to reduce the running time, then we do not increase the
441 /// threshold.
442 unsigned MaxPercentThresholdBoost;
443 /// The cost threshold for the unrolled loop when optimizing for size (set
444 /// to UINT_MAX to disable).
445 unsigned OptSizeThreshold;
446 /// The cost threshold for the unrolled loop, like Threshold, but used
447 /// for partial/runtime unrolling (set to UINT_MAX to disable).
448 unsigned PartialThreshold;
449 /// The cost threshold for the unrolled loop when optimizing for size, like
450 /// OptSizeThreshold, but used for partial/runtime unrolling (set to
451 /// UINT_MAX to disable).
452 unsigned PartialOptSizeThreshold;
453 /// A forced unrolling factor (the number of concatenated bodies of the
454 /// original loop in the unrolled loop body). When set to 0, the unrolling
455 /// transformation will select an unrolling factor based on the current cost
456 /// threshold and other factors.
457 unsigned Count;
458 /// Default unroll count for loops with run-time trip count.
459 unsigned DefaultUnrollRuntimeCount;
460 // Set the maximum unrolling factor. The unrolling factor may be selected
461 // using the appropriate cost threshold, but may not exceed this number
462 // (set to UINT_MAX to disable). This does not apply in cases where the
463 // loop is being fully unrolled.
464 unsigned MaxCount;
465 /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
466 /// applies even if full unrolling is selected. This allows a target to fall
467 /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
468 unsigned FullUnrollMaxCount;
469 // Represents number of instructions optimized when "back edge"
470 // becomes "fall through" in unrolled loop.
471 // For now we count a conditional branch on a backedge and a comparison
472 // feeding it.
473 unsigned BEInsns;
474 /// Allow partial unrolling (unrolling of loops to expand the size of the
475 /// loop body, not only to eliminate small constant-trip-count loops).
476 bool Partial;
477 /// Allow runtime unrolling (unrolling of loops to expand the size of the
478 /// loop body even when the number of loop iterations is not known at
479 /// compile time).
480 bool Runtime;
481 /// Allow generation of a loop remainder (extra iterations after unroll).
482 bool AllowRemainder;
483 /// Allow emitting expensive instructions (such as divisions) when computing
484 /// the trip count of a loop for runtime unrolling.
485 bool AllowExpensiveTripCount;
486 /// Apply loop unroll on any kind of loop
487 /// (mainly to loops that fail runtime unrolling).
488 bool Force;
489 /// Allow using trip count upper bound to unroll loops.
490 bool UpperBound;
491 /// Allow unrolling of all the iterations of the runtime loop remainder.
492 bool UnrollRemainder;
493 /// Allow unroll and jam. Used to enable unroll and jam for the target.
494 bool UnrollAndJam;
495 /// Threshold for unroll and jam, for inner loop size. The 'Threshold'
496 /// value above is used during unroll and jam for the outer loop size.
497 /// This value is used in the same manner to limit the size of the inner
498 /// loop.
499 unsigned UnrollAndJamInnerLoopThreshold;
500 /// Don't allow loop unrolling to simulate more than this number of
501 /// iterations when checking full unroll profitability
502 unsigned MaxIterationsCountToAnalyze;
503 };
504
505 /// Get target-customized preferences for the generic loop unrolling
506 /// transformation. The caller will initialize UP with the current
507 /// target-independent defaults.
508 void getUnrollingPreferences(Loop *L, ScalarEvolution &,
509 UnrollingPreferences &UP) const;
510
511 /// Query the target whether it would be profitable to convert the given loop
512 /// into a hardware loop.
513 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
514 AssumptionCache &AC, TargetLibraryInfo *LibInfo,
515 HardwareLoopInfo &HWLoopInfo) const;
516
517 /// Query the target whether it would be prefered to create a predicated
518 /// vector loop, which can avoid the need to emit a scalar epilogue loop.
519 bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
520 AssumptionCache &AC, TargetLibraryInfo *TLI,
521 DominatorTree *DT,
522 const LoopAccessInfo *LAI) const;
523
524 /// Query the target whether lowering of the llvm.get.active.lane.mask
525 /// intrinsic is supported.
526 bool emitGetActiveLaneMask() const;
527
528 // Parameters that control the loop peeling transformation
529 struct PeelingPreferences {
530 /// A forced peeling factor (the number of bodied of the original loop
531 /// that should be peeled off before the loop body). When set to 0, the
532 /// a peeling factor based on profile information and other factors.
533 unsigned PeelCount;
534 /// Allow peeling off loop iterations.
535 bool AllowPeeling;
536 /// Allow peeling off loop iterations for loop nests.
537 bool AllowLoopNestsPeeling;
538 /// Allow peeling basing on profile. Uses to enable peeling off all
539 /// iterations basing on provided profile.
540 /// If the value is true the peeling cost model can decide to peel only
541 /// some iterations and in this case it will set this to false.
542 bool PeelProfiledIterations;
543 };
544
545 /// Get target-customized preferences for the generic loop peeling
546 /// transformation. The caller will initialize \p PP with the current
547 /// target-independent defaults with information from \p L and \p SE.
548 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
549 PeelingPreferences &PP) const;
550
551 /// Targets can implement their own combinations for target-specific
552 /// intrinsics. This function will be called from the InstCombine pass every
553 /// time a target-specific intrinsic is encountered.
554 ///
555 /// \returns None to not do anything target specific or a value that will be
556 /// returned from the InstCombiner. It is possible to return null and stop
557 /// further processing of the intrinsic by returning nullptr.
558 Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
559 IntrinsicInst &II) const;
560 /// Can be used to implement target-specific instruction combining.
561 /// \see instCombineIntrinsic
562 Optional<Value *>
563 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
564 APInt DemandedMask, KnownBits &Known,
565 bool &KnownBitsComputed) const;
566 /// Can be used to implement target-specific instruction combining.
567 /// \see instCombineIntrinsic
568 Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
569 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
570 APInt &UndefElts2, APInt &UndefElts3,
571 std::function<void(Instruction *, unsigned, APInt, APInt &)>
572 SimplifyAndSetOp) const;
573 /// @}
574
575 /// \name Scalar Target Information
576 /// @{
577
578 /// Flags indicating the kind of support for population count.
579 ///
580 /// Compared to the SW implementation, HW support is supposed to
581 /// significantly boost the performance when the population is dense, and it
582 /// may or may not degrade performance if the population is sparse. A HW
583 /// support is considered as "Fast" if it can outperform, or is on a par
584 /// with, SW implementation when the population is sparse; otherwise, it is
585 /// considered as "Slow".
586 enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
587
588 /// Return true if the specified immediate is legal add immediate, that
589 /// is the target has add instructions which can add a register with the
590 /// immediate without having to materialize the immediate into a register.
591 bool isLegalAddImmediate(int64_t Imm) const;
592
593 /// Return true if the specified immediate is legal icmp immediate,
594 /// that is the target has icmp instructions which can compare a register
595 /// against the immediate without having to materialize the immediate into a
596 /// register.
597 bool isLegalICmpImmediate(int64_t Imm) const;
598
599 /// Return true if the addressing mode represented by AM is legal for
600 /// this target, for a load/store of the specified type.
601 /// The type may be VoidTy, in which case only return true if the addressing
602 /// mode is legal for a load/store of any legal type.
603 /// If target returns true in LSRWithInstrQueries(), I may be valid.
604 /// TODO: Handle pre/postinc as well.
605 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
606 bool HasBaseReg, int64_t Scale,
607 unsigned AddrSpace = 0,
608 Instruction *I = nullptr) const;
609
610 /// Return true if LSR cost of C1 is lower than C1.
611 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
612 TargetTransformInfo::LSRCost &C2) const;
613
614 /// Return true if LSR major cost is number of registers. Targets which
615 /// implement their own isLSRCostLess and unset number of registers as major
616 /// cost should return false, otherwise return true.
617 bool isNumRegsMajorCostOfLSR() const;
618
619 /// \returns true if LSR should not optimize a chain that includes \p I.
620 bool isProfitableLSRChainElement(Instruction *I) const;
621
622 /// Return true if the target can fuse a compare and branch.
623 /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost
624 /// calculation for the instructions in a loop.
625 bool canMacroFuseCmp() const;
626
627 /// Return true if the target can save a compare for loop count, for example
628 /// hardware loop saves a compare.
629 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
630 DominatorTree *DT, AssumptionCache *AC,
631 TargetLibraryInfo *LibInfo) const;
632
633 enum AddressingModeKind {
634 AMK_PreIndexed,
635 AMK_PostIndexed,
636 AMK_None
637 };
638
639 /// Return the preferred addressing mode LSR should make efforts to generate.
640 AddressingModeKind getPreferredAddressingMode(const Loop *L,
641 ScalarEvolution *SE) const;
642
643 /// Return true if the target supports masked store.
644 bool isLegalMaskedStore(Type *DataType, Align Alignment) const;
645 /// Return true if the target supports masked load.
646 bool isLegalMaskedLoad(Type *DataType, Align Alignment) const;
647
648 /// Return true if the target supports nontemporal store.
649 bool isLegalNTStore(Type *DataType, Align Alignment) const;
650 /// Return true if the target supports nontemporal load.
651 bool isLegalNTLoad(Type *DataType, Align Alignment) const;
652
653 /// Return true if the target supports masked scatter.
654 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
655 /// Return true if the target supports masked gather.
656 bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
657
658 /// Return true if the target supports masked compress store.
659 bool isLegalMaskedCompressStore(Type *DataType) const;
660 /// Return true if the target supports masked expand load.
661 bool isLegalMaskedExpandLoad(Type *DataType) const;
662
663 /// Return true if the target has a unified operation to calculate division
664 /// and remainder. If so, the additional implicit multiplication and
665 /// subtraction required to calculate a remainder from division are free. This
666 /// can enable more aggressive transformations for division and remainder than
667 /// would typically be allowed using throughput or size cost models.
668 bool hasDivRemOp(Type *DataType, bool IsSigned) const;
669
670 /// Return true if the given instruction (assumed to be a memory access
671 /// instruction) has a volatile variant. If that's the case then we can avoid
672 /// addrspacecast to generic AS for volatile loads/stores. Default
673 /// implementation returns false, which prevents address space inference for
674 /// volatile loads/stores.
675 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;
676
677 /// Return true if target doesn't mind addresses in vectors.
678 bool prefersVectorizedAddressing() const;
679
680 /// Return the cost of the scaling factor used in the addressing
681 /// mode represented by AM for this target, for a load/store
682 /// of the specified type.
683 /// If the AM is supported, the return value must be >= 0.
684 /// If the AM is not supported, it returns a negative value.
685 /// TODO: Handle pre/postinc as well.
686 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
687 int64_t BaseOffset, bool HasBaseReg,
688 int64_t Scale,
689 unsigned AddrSpace = 0) const;
690
691 /// Return true if the loop strength reduce pass should make
692 /// Instruction* based TTI queries to isLegalAddressingMode(). This is
693 /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
694 /// immediate offset and no index register.
695 bool LSRWithInstrQueries() const;
696
697 /// Return true if it's free to truncate a value of type Ty1 to type
698 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
699 /// by referencing its sub-register AX.
700 bool isTruncateFree(Type *Ty1, Type *Ty2) const;
701
702 /// Return true if it is profitable to hoist instruction in the
703 /// then/else to before if.
704 bool isProfitableToHoist(Instruction *I) const;
705
706 bool useAA() const;
707
708 /// Return true if this type is legal.
709 bool isTypeLegal(Type *Ty) const;
710
711 /// Returns the estimated number of registers required to represent \p Ty.
712 InstructionCost getRegUsageForType(Type *Ty) const;
713
714 /// Return true if switches should be turned into lookup tables for the
715 /// target.
716 bool shouldBuildLookupTables() const;
717
718 /// Return true if switches should be turned into lookup tables
719 /// containing this constant value for the target.
720 bool shouldBuildLookupTablesForConstant(Constant *C) const;
721
722 /// Return true if lookup tables should be turned into relative lookup tables.
723 bool shouldBuildRelLookupTables() const;
724
725 /// Return true if the input function which is cold at all call sites,
726 /// should use coldcc calling convention.
727 bool useColdCCForColdCall(Function &F) const;
728
729 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
730 /// are set if the demanded result elements need to be inserted and/or
731 /// extracted from vectors.
732 InstructionCost getScalarizationOverhead(VectorType *Ty,
733 const APInt &DemandedElts,
734 bool Insert, bool Extract) const;
735
736 /// Estimate the overhead of scalarizing an instructions unique
737 /// non-constant operands. The (potentially vector) types to use for each of
738 /// argument are passes via Tys.
739 InstructionCost getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
740 ArrayRef<Type *> Tys) const;
741
742 /// If target has efficient vector element load/store instructions, it can
743 /// return true here so that insertion/extraction costs are not added to
744 /// the scalarization cost of a load/store.
745 bool supportsEfficientVectorElementLoadStore() const;
746
747 /// Don't restrict interleaved unrolling to small loops.
748 bool enableAggressiveInterleaving(bool LoopHasReductions) const;
749
750 /// Returns options for expansion of memcmp. IsZeroCmp is
751 // true if this is the expansion of memcmp(p1, p2, s) == 0.
752 struct MemCmpExpansionOptions {
753 // Return true if memcmp expansion is enabled.
754 operator bool() const { return MaxNumLoads > 0; }
755
756 // Maximum number of load operations.
757 unsigned MaxNumLoads = 0;
758
759 // The list of available load sizes (in bytes), sorted in decreasing order.
760 SmallVector<unsigned, 8> LoadSizes;
761
762 // For memcmp expansion when the memcmp result is only compared equal or
763 // not-equal to 0, allow up to this number of load pairs per block. As an
764 // example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
765 // a0 = load2bytes &a[0]
766 // b0 = load2bytes &b[0]
767 // a2 = load1byte &a[2]
768 // b2 = load1byte &b[2]
769 // r = cmp eq (a0 ^ b0 | a2 ^ b2), 0
770 unsigned NumLoadsPerBlock = 1;
771
772 // Set to true to allow overlapping loads. For example, 7-byte compares can
773 // be done with two 4-byte compares instead of 4+2+1-byte compares. This
774 // requires all loads in LoadSizes to be doable in an unaligned way.
775 bool AllowOverlappingLoads = false;
776 };
777 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
778 bool IsZeroCmp) const;
779
780 /// Enable matching of interleaved access groups.
781 bool enableInterleavedAccessVectorization() const;
782
783 /// Enable matching of interleaved access groups that contain predicated
784 /// accesses or gaps and therefore vectorized using masked
785 /// vector loads/stores.
786 bool enableMaskedInterleavedAccessVectorization() const;
787
788 /// Indicate that it is potentially unsafe to automatically vectorize
789 /// floating-point operations because the semantics of vector and scalar
790 /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
791 /// does not support IEEE-754 denormal numbers, while depending on the
792 /// platform, scalar floating-point math does.
793 /// This applies to floating-point math operations and calls, not memory
794 /// operations, shuffles, or casts.
795 bool isFPVectorizationPotentiallyUnsafe() const;
796
797 /// Determine if the target supports unaligned memory accesses.
798 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
799 unsigned AddressSpace = 0,
800 Align Alignment = Align(1),
801 bool *Fast = nullptr) const;
802
803 /// Return hardware support for population count.
804 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
805
806 /// Return true if the hardware has a fast square-root instruction.
807 bool haveFastSqrt(Type *Ty) const;
808
809 /// Return true if it is faster to check if a floating-point value is NaN
810 /// (or not-NaN) versus a comparison against a constant FP zero value.
811 /// Targets should override this if materializing a 0.0 for comparison is
812 /// generally as cheap as checking for ordered/unordered.
813 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
814
815 /// Return the expected cost of supporting the floating point operation
816 /// of the specified type.
817 InstructionCost getFPOpCost(Type *Ty) const;
818
819 /// Return the expected cost of materializing for the given integer
820 /// immediate of the specified type.
821 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
822 TargetCostKind CostKind) const;
823
824 /// Return the expected cost of materialization for the given integer
825 /// immediate of the specified type for a given instruction. The cost can be
826 /// zero if the immediate can be folded into the specified instruction.
827 InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
828 const APInt &Imm, Type *Ty,
829 TargetCostKind CostKind,
830 Instruction *Inst = nullptr) const;
831 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
832 const APInt &Imm, Type *Ty,
833 TargetCostKind CostKind) const;
834
835 /// Return the expected cost for the given integer when optimising
836 /// for size. This is different than the other integer immediate cost
837 /// functions in that it is subtarget agnostic. This is useful when you e.g.
838 /// target one ISA such as Aarch32 but smaller encodings could be possible
839 /// with another such as Thumb. This return value is used as a penalty when
840 /// the total costs for a constant is calculated (the bigger the cost, the
841 /// more beneficial constant hoisting is).
842 InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
843 const APInt &Imm, Type *Ty) const;
844 /// @}
845
846 /// \name Vector Target Information
847 /// @{
848
849 /// The various kinds of shuffle patterns for vector queries.
850 enum ShuffleKind {
851 SK_Broadcast, ///< Broadcast element 0 to all other elements.
852 SK_Reverse, ///< Reverse the order of the vector.
853 SK_Select, ///< Selects elements from the corresponding lane of
854 ///< either source operand. This is equivalent to a
855 ///< vector select with a constant condition operand.
856 SK_Transpose, ///< Transpose two vectors.
857 SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
858 SK_ExtractSubvector, ///< ExtractSubvector Index indicates start offset.
859 SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one
860 ///< with any shuffle mask.
861 SK_PermuteSingleSrc, ///< Shuffle elements of single source vector with any
862 ///< shuffle mask.
863 SK_Splice ///< Concatenates elements from the first input vector
864 ///< with elements of the second input vector. Returning
865 ///< a vector of the same type as the input vectors.
866 };
867
868 /// Additional information about an operand's possible values.
869 enum OperandValueKind {
870 OK_AnyValue, // Operand can have any value.
871 OK_UniformValue, // Operand is uniform (splat of a value).
872 OK_UniformConstantValue, // Operand is uniform constant.
873 OK_NonUniformConstantValue // Operand is a non uniform constant value.
874 };
875
876 /// Additional properties of an operand's values.
877 enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
878
879 /// \return the number of registers in the target-provided register class.
880 unsigned getNumberOfRegisters(unsigned ClassID) const;
881
882 /// \return the target-provided register class ID for the provided type,
883 /// accounting for type promotion and other type-legalization techniques that
884 /// the target might apply. However, it specifically does not account for the
885 /// scalarization or splitting of vector types. Should a vector type require
886 /// scalarization or splitting into multiple underlying vector registers, that
887 /// type should be mapped to a register class containing no registers.
888 /// Specifically, this is designed to provide a simple, high-level view of the
889 /// register allocation later performed by the backend. These register classes
890 /// don't necessarily map onto the register classes used by the backend.
891 /// FIXME: It's not currently possible to determine how many registers
892 /// are used by the provided type.
893 unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
894
895 /// \return the target-provided register class name
896 const char *getRegisterClassName(unsigned ClassID) const;
897
898 enum RegisterKind { RGK_Scalar, RGK_FixedWidthVector, RGK_ScalableVector };
899
900 /// \return The width of the largest scalar or vector register type.
901 TypeSize getRegisterBitWidth(RegisterKind K) const;
902
903 /// \return The width of the smallest vector register type.
904 unsigned getMinVectorRegisterBitWidth() const;
905
906 /// \return The maximum value of vscale if the target specifies an
907 /// architectural maximum vector length, and None otherwise.
908 Optional<unsigned> getMaxVScale() const;
909
910 /// \return True if the vectorization factor should be chosen to
911 /// make the vector of the smallest element type match the size of a
912 /// vector register. For wider element types, this could result in
913 /// creating vectors that span multiple vector registers.
914 /// If false, the vectorization factor will be chosen based on the
915 /// size of the widest element type.
916 bool shouldMaximizeVectorBandwidth() const;
917
918 /// \return The minimum vectorization factor for types of given element
919 /// bit width, or 0 if there is no minimum VF. The returned value only
920 /// applies when shouldMaximizeVectorBandwidth returns true.
921 /// If IsScalable is true, the returned ElementCount must be a scalable VF.
922 ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const;
923
924 /// \return The maximum vectorization factor for types of given element
925 /// bit width and opcode, or 0 if there is no maximum VF.
926 /// Currently only used by the SLP vectorizer.
927 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
928
929 /// \return True if it should be considered for address type promotion.
930 /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
931 /// profitable without finding other extensions fed by the same input.
932 bool shouldConsiderAddressTypePromotion(
933 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
934
935 /// \return The size of a cache line in bytes.
936 unsigned getCacheLineSize() const;
937
938 /// The possible cache levels
939 enum class CacheLevel {
940 L1D, // The L1 data cache
941 L2D, // The L2 data cache
942
943 // We currently do not model L3 caches, as their sizes differ widely between
944 // microarchitectures. Also, we currently do not have a use for L3 cache
945 // size modeling yet.
946 };
947
948 /// \return The size of the cache level in bytes, if available.
949 Optional<unsigned> getCacheSize(CacheLevel Level) const;
950
951 /// \return The associativity of the cache level, if available.
952 Optional<unsigned> getCacheAssociativity(CacheLevel Level) const;
953
954 /// \return How much before a load we should place the prefetch
955 /// instruction. This is currently measured in number of
956 /// instructions.
957 unsigned getPrefetchDistance() const;
958
959 /// Some HW prefetchers can handle accesses up to a certain constant stride.
960 /// Sometimes prefetching is beneficial even below the HW prefetcher limit,
961 /// and the arguments provided are meant to serve as a basis for deciding this
962 /// for a particular loop.
963 ///
964 /// \param NumMemAccesses Number of memory accesses in the loop.
965 /// \param NumStridedMemAccesses Number of the memory accesses that
966 /// ScalarEvolution could find a known stride
967 /// for.
968 /// \param NumPrefetches Number of software prefetches that will be
969 /// emitted as determined by the addresses
970 /// involved and the cache line size.
971 /// \param HasCall True if the loop contains a call.
972 ///
973 /// \return This is the minimum stride in bytes where it makes sense to start
974 /// adding SW prefetches. The default is 1, i.e. prefetch with any
975 /// stride.
976 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
977 unsigned NumStridedMemAccesses,
978 unsigned NumPrefetches, bool HasCall) const;
979
980 /// \return The maximum number of iterations to prefetch ahead. If
981 /// the required number of iterations is more than this number, no
982 /// prefetching is performed.
983 unsigned getMaxPrefetchIterationsAhead() const;
984
985 /// \return True if prefetching should also be done for writes.
986 bool enableWritePrefetching() const;
987
988 /// \return The maximum interleave factor that any transform should try to
989 /// perform for this target. This number depends on the level of parallelism
990 /// and the number of execution units in the CPU.
991 unsigned getMaxInterleaveFactor(unsigned VF) const;
992
993 /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
994 static OperandValueKind getOperandInfo(const Value *V,
995 OperandValueProperties &OpProps);
996
997 /// This is an approximation of reciprocal throughput of a math/logic op.
998 /// A higher cost indicates less expected throughput.
999 /// From Agner Fog's guides, reciprocal throughput is "the average number of
1000 /// clock cycles per instruction when the instructions are not part of a
1001 /// limiting dependency chain."
1002 /// Therefore, costs should be scaled to account for multiple execution units
1003 /// on the target that can process this type of instruction. For example, if
1004 /// there are 5 scalar integer units and 2 vector integer units that can
1005 /// calculate an 'add' in a single cycle, this model should indicate that the
1006 /// cost of the vector add instruction is 2.5 times the cost of the scalar
1007 /// add instruction.
1008 /// \p Args is an optional argument which holds the instruction operands
1009 /// values so the TTI can analyze those values searching for special
1010 /// cases or optimizations based on those values.
1011 /// \p CxtI is the optional original context instruction, if one exists, to
1012 /// provide even more information.
1013 InstructionCost getArithmeticInstrCost(
1014 unsigned Opcode, Type *Ty,
1015 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1016 OperandValueKind Opd1Info = OK_AnyValue,
1017 OperandValueKind Opd2Info = OK_AnyValue,
1018 OperandValueProperties Opd1PropInfo = OP_None,
1019 OperandValueProperties Opd2PropInfo = OP_None,
1020 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
1021 const Instruction *CxtI = nullptr) const;
1022
1023 /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
1024 /// The exact mask may be passed as Mask, or else the array will be empty.
1025 /// The index and subtype parameters are used by the subvector insertion and
1026 /// extraction shuffle kinds to show the insert/extract point and the type of
1027 /// the subvector being inserted/extracted.
1028 /// NOTE: For subvector extractions Tp represents the source type.
1029 InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp,
1030 ArrayRef<int> Mask = None, int Index = 0,
1031 VectorType *SubTp = nullptr) const;
1032
1033 /// Represents a hint about the context in which a cast is used.
1034 ///
1035 /// For zext/sext, the context of the cast is the operand, which must be a
1036 /// load of some kind. For trunc, the context is of the cast is the single
1037 /// user of the instruction, which must be a store of some kind.
1038 ///
1039 /// This enum allows the vectorizer to give getCastInstrCost an idea of the
1040 /// type of cast it's dealing with, as not every cast is equal. For instance,
1041 /// the zext of a load may be free, but the zext of an interleaving load can
1042 //// be (very) expensive!
1043 ///
1044 /// See \c getCastContextHint to compute a CastContextHint from a cast
1045 /// Instruction*. Callers can use it if they don't need to override the
1046 /// context and just want it to be calculated from the instruction.
1047 ///
1048 /// FIXME: This handles the types of load/store that the vectorizer can
1049 /// produce, which are the cases where the context instruction is most
1050 /// likely to be incorrect. There are other situations where that can happen
1051 /// too, which might be handled here but in the long run a more general
1052 /// solution of costing multiple instructions at the same times may be better.
1053 enum class CastContextHint : uint8_t {
1054 None, ///< The cast is not used with a load/store of any kind.
1055 Normal, ///< The cast is used with a normal load/store.
1056 Masked, ///< The cast is used with a masked load/store.
1057 GatherScatter, ///< The cast is used with a gather/scatter.
1058 Interleave, ///< The cast is used with an interleaved load/store.
1059 Reversed, ///< The cast is used with a reversed load/store.
1060 };
1061
1062 /// Calculates a CastContextHint from \p I.
1063 /// This should be used by callers of getCastInstrCost if they wish to
1064 /// determine the context from some instruction.
1065 /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr,
1066 /// or if it's another type of cast.
1067 static CastContextHint getCastContextHint(const Instruction *I);
1068
1069 /// \return The expected cost of cast instructions, such as bitcast, trunc,
1070 /// zext, etc. If there is an existing instruction that holds Opcode, it
1071 /// may be passed in the 'I' parameter.
1072 InstructionCost
1073 getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1074 TTI::CastContextHint CCH,
1075 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
1076 const Instruction *I = nullptr) const;
1077
1078 /// \return The expected cost of a sign- or zero-extended vector extract. Use
1079 /// -1 to indicate that there is no information about the index value.
1080 InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
1081 VectorType *VecTy,
1082 unsigned Index = -1) const;
1083
1084 /// \return The expected cost of control-flow related instructions such as
1085 /// Phi, Ret, Br, Switch.
1086 InstructionCost
1087 getCFInstrCost(unsigned Opcode,
1088 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
1089 const Instruction *I = nullptr) const;
1090
1091 /// \returns The expected cost of compare and select instructions. If there
1092 /// is an existing instruction that holds Opcode, it may be passed in the
1093 /// 'I' parameter. The \p VecPred parameter can be used to indicate the select
1094 /// is using a compare with the specified predicate as condition. When vector
1095 /// types are passed, \p VecPred must be used for all lanes.
1096 InstructionCost
1097 getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy = nullptr,
1098 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE,
1099 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1100 const Instruction *I = nullptr) const;
1101
1102 /// \return The expected cost of vector Insert and Extract.
1103 /// Use -1 to indicate that there is no information on the index value.
1104 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
1105 unsigned Index = -1) const;
1106
1107 /// \return The cost of Load and Store instructions.
1108 InstructionCost
1109 getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1110 unsigned AddressSpace,
1111 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1112 const Instruction *I = nullptr) const;
1113
1114 /// \return The cost of masked Load and Store instructions.
1115 InstructionCost getMaskedMemoryOpCost(
1116 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1117 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1118
1119 /// \return The cost of Gather or Scatter operation
1120 /// \p Opcode - is a type of memory access Load or Store
1121 /// \p DataTy - a vector type of the data to be loaded or stored
1122 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
1123 /// \p VariableMask - true when the memory access is predicated with a mask
1124 /// that is not a compile-time constant
1125 /// \p Alignment - alignment of single element
1126 /// \p I - the optional original context instruction, if one exists, e.g. the
1127 /// load/store to transform or the call to the gather/scatter intrinsic
1128 InstructionCost getGatherScatterOpCost(
1129 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1130 Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1131 const Instruction *I = nullptr) const;
1132
1133 /// \return The cost of the interleaved memory operation.
1134 /// \p Opcode is the memory operation code
1135 /// \p VecTy is the vector type of the interleaved access.
1136 /// \p Factor is the interleave factor
1137 /// \p Indices is the indices for interleaved load members (as interleaved
1138 /// load allows gaps)
1139 /// \p Alignment is the alignment of the memory operation
1140 /// \p AddressSpace is address space of the pointer.
1141 /// \p UseMaskForCond indicates if the memory access is predicated.
1142 /// \p UseMaskForGaps indicates if gaps should be masked.
1143 InstructionCost getInterleavedMemoryOpCost(
1144 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1145 Align Alignment, unsigned AddressSpace,
1146 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1147 bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
1148
1149 /// A helper function to determine the type of reduction algorithm used
1150 /// for a given \p Opcode and set of FastMathFlags \p FMF.
requiresOrderedReduction(Optional<FastMathFlags> FMF)1151 static bool requiresOrderedReduction(Optional<FastMathFlags> FMF) {
1152 return FMF != None && !(*FMF).allowReassoc();
1153 }
1154
1155 /// Calculate the cost of vector reduction intrinsics.
1156 ///
1157 /// This is the cost of reducing the vector value of type \p Ty to a scalar
1158 /// value using the operation denoted by \p Opcode. The FastMathFlags
1159 /// parameter \p FMF indicates what type of reduction we are performing:
1160 /// 1. Tree-wise. This is the typical 'fast' reduction performed that
1161 /// involves successively splitting a vector into half and doing the
1162 /// operation on the pair of halves until you have a scalar value. For
1163 /// example:
1164 /// (v0, v1, v2, v3)
1165 /// ((v0+v2), (v1+v3), undef, undef)
1166 /// ((v0+v2+v1+v3), undef, undef, undef)
1167 /// This is the default behaviour for integer operations, whereas for
1168 /// floating point we only do this if \p FMF indicates that
1169 /// reassociation is allowed.
1170 /// 2. Ordered. For a vector with N elements this involves performing N
1171 /// operations in lane order, starting with an initial scalar value, i.e.
1172 /// result = InitVal + v0
1173 /// result = result + v1
1174 /// result = result + v2
1175 /// result = result + v3
1176 /// This is only the case for FP operations and when reassociation is not
1177 /// allowed.
1178 ///
1179 InstructionCost getArithmeticReductionCost(
1180 unsigned Opcode, VectorType *Ty, Optional<FastMathFlags> FMF,
1181 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1182
1183 InstructionCost getMinMaxReductionCost(
1184 VectorType *Ty, VectorType *CondTy, bool IsUnsigned,
1185 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1186
1187 /// Calculate the cost of an extended reduction pattern, similar to
1188 /// getArithmeticReductionCost of an Add reduction with an extension and
1189 /// optional multiply. This is the cost of as:
1190 /// ResTy vecreduce.add(ext(Ty A)), or if IsMLA flag is set then:
1191 /// ResTy vecreduce.add(mul(ext(Ty A), ext(Ty B)). The reduction happens
1192 /// on a VectorType with ResTy elements and Ty lanes.
1193 InstructionCost getExtendedAddReductionCost(
1194 bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1195 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1196
1197 /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
1198 /// Three cases are handled: 1. scalar instruction 2. vector instruction
1199 /// 3. scalar instruction which is to be vectorized.
1200 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1201 TTI::TargetCostKind CostKind) const;
1202
1203 /// \returns The cost of Call instructions.
1204 InstructionCost getCallInstrCost(
1205 Function *F, Type *RetTy, ArrayRef<Type *> Tys,
1206 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const;
1207
1208 /// \returns The number of pieces into which the provided type must be
1209 /// split during legalization. Zero is returned when the answer is unknown.
1210 unsigned getNumberOfParts(Type *Tp) const;
1211
1212 /// \returns The cost of the address computation. For most targets this can be
1213 /// merged into the instruction indexing mode. Some targets might want to
1214 /// distinguish between address computation for memory operations on vector
1215 /// types and scalar types. Such targets should override this function.
1216 /// The 'SE' parameter holds pointer for the scalar evolution object which
1217 /// is used in order to get the Ptr step value in case of constant stride.
1218 /// The 'Ptr' parameter holds SCEV of the access pointer.
1219 InstructionCost getAddressComputationCost(Type *Ty,
1220 ScalarEvolution *SE = nullptr,
1221 const SCEV *Ptr = nullptr) const;
1222
1223 /// \returns The cost, if any, of keeping values of the given types alive
1224 /// over a callsite.
1225 ///
1226 /// Some types may require the use of register classes that do not have
1227 /// any callee-saved registers, so would require a spill and fill.
1228 InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
1229
1230 /// \returns True if the intrinsic is a supported memory intrinsic. Info
1231 /// will contain additional information - whether the intrinsic may write
1232 /// or read to memory, volatility and the pointer. Info is undefined
1233 /// if false is returned.
1234 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
1235
1236 /// \returns The maximum element size, in bytes, for an element
1237 /// unordered-atomic memory intrinsic.
1238 unsigned getAtomicMemIntrinsicMaxElementSize() const;
1239
1240 /// \returns A value which is the result of the given memory intrinsic. New
1241 /// instructions may be created to extract the result from the given intrinsic
1242 /// memory operation. Returns nullptr if the target cannot create a result
1243 /// from the given intrinsic.
1244 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1245 Type *ExpectedType) const;
1246
1247 /// \returns The type to use in a loop expansion of a memcpy call.
1248 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1249 unsigned SrcAddrSpace, unsigned DestAddrSpace,
1250 unsigned SrcAlign, unsigned DestAlign) const;
1251
1252 /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
1253 /// \param RemainingBytes The number of bytes to copy.
1254 ///
1255 /// Calculates the operand types to use when copying \p RemainingBytes of
1256 /// memory, where source and destination alignments are \p SrcAlign and
1257 /// \p DestAlign respectively.
1258 void getMemcpyLoopResidualLoweringType(
1259 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1260 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1261 unsigned SrcAlign, unsigned DestAlign) const;
1262
1263 /// \returns True if the two functions have compatible attributes for inlining
1264 /// purposes.
1265 bool areInlineCompatible(const Function *Caller,
1266 const Function *Callee) const;
1267
1268 /// \returns True if the caller and callee agree on how \p Args will be passed
1269 /// to the callee.
1270 /// \param[out] Args The list of compatible arguments. The implementation may
1271 /// filter out any incompatible args from this list.
1272 bool areFunctionArgsABICompatible(const Function *Caller,
1273 const Function *Callee,
1274 SmallPtrSetImpl<Argument *> &Args) const;
1275
1276 /// The type of load/store indexing.
1277 enum MemIndexedMode {
1278 MIM_Unindexed, ///< No indexing.
1279 MIM_PreInc, ///< Pre-incrementing.
1280 MIM_PreDec, ///< Pre-decrementing.
1281 MIM_PostInc, ///< Post-incrementing.
1282 MIM_PostDec ///< Post-decrementing.
1283 };
1284
1285 /// \returns True if the specified indexed load for the given type is legal.
1286 bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;
1287
1288 /// \returns True if the specified indexed store for the given type is legal.
1289 bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;
1290
1291 /// \returns The bitwidth of the largest vector type that should be used to
1292 /// load/store in the given address space.
1293 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
1294
1295 /// \returns True if the load instruction is legal to vectorize.
1296 bool isLegalToVectorizeLoad(LoadInst *LI) const;
1297
1298 /// \returns True if the store instruction is legal to vectorize.
1299 bool isLegalToVectorizeStore(StoreInst *SI) const;
1300
1301 /// \returns True if it is legal to vectorize the given load chain.
1302 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
1303 unsigned AddrSpace) const;
1304
1305 /// \returns True if it is legal to vectorize the given store chain.
1306 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
1307 unsigned AddrSpace) const;
1308
1309 /// \returns True if it is legal to vectorize the given reduction kind.
1310 bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
1311 ElementCount VF) const;
1312
1313 /// \returns True if the given type is supported for scalable vectors
1314 bool isElementTypeLegalForScalableVector(Type *Ty) const;
1315
1316 /// \returns The new vector factor value if the target doesn't support \p
1317 /// SizeInBytes loads or has a better vector factor.
1318 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1319 unsigned ChainSizeInBytes,
1320 VectorType *VecTy) const;
1321
1322 /// \returns The new vector factor value if the target doesn't support \p
1323 /// SizeInBytes stores or has a better vector factor.
1324 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1325 unsigned ChainSizeInBytes,
1326 VectorType *VecTy) const;
1327
1328 /// Flags describing the kind of vector reduction.
1329 struct ReductionFlags {
ReductionFlagsReductionFlags1330 ReductionFlags() : IsMaxOp(false), IsSigned(false), NoNaN(false) {}
1331 bool IsMaxOp; ///< If the op a min/max kind, true if it's a max operation.
1332 bool IsSigned; ///< Whether the operation is a signed int reduction.
1333 bool NoNaN; ///< If op is an fp min/max, whether NaNs may be present.
1334 };
1335
1336 /// \returns True if the target prefers reductions in loop.
1337 bool preferInLoopReduction(unsigned Opcode, Type *Ty,
1338 ReductionFlags Flags) const;
1339
1340 /// \returns True if the target prefers reductions select kept in the loop
1341 /// when tail folding. i.e.
1342 /// loop:
1343 /// p = phi (0, s)
1344 /// a = add (p, x)
1345 /// s = select (mask, a, p)
1346 /// vecreduce.add(s)
1347 ///
1348 /// As opposed to the normal scheme of p = phi (0, a) which allows the select
1349 /// to be pulled out of the loop. If the select(.., add, ..) can be predicated
1350 /// by the target, this can lead to cleaner code generation.
1351 bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
1352 ReductionFlags Flags) const;
1353
1354 /// \returns True if the target wants to expand the given reduction intrinsic
1355 /// into a shuffle sequence.
1356 bool shouldExpandReduction(const IntrinsicInst *II) const;
1357
1358 /// \returns the size cost of rematerializing a GlobalValue address relative
1359 /// to a stack reload.
1360 unsigned getGISelRematGlobalCost() const;
1361
1362 /// \returns True if the target supports scalable vectors.
1363 bool supportsScalableVectors() const;
1364
1365 /// \name Vector Predication Information
1366 /// @{
1367 /// Whether the target supports the %evl parameter of VP intrinsic efficiently
1368 /// in hardware. (see LLVM Language Reference - "Vector Predication
1369 /// Intrinsics") Use of %evl is discouraged when that is not the case.
1370 bool hasActiveVectorLength() const;
1371
1372 struct VPLegalization {
1373 enum VPTransform {
1374 // keep the predicating parameter
1375 Legal = 0,
1376 // where legal, discard the predicate parameter
1377 Discard = 1,
1378 // transform into something else that is also predicating
1379 Convert = 2
1380 };
1381
1382 // How to transform the EVL parameter.
1383 // Legal: keep the EVL parameter as it is.
1384 // Discard: Ignore the EVL parameter where it is safe to do so.
1385 // Convert: Fold the EVL into the mask parameter.
1386 VPTransform EVLParamStrategy;
1387
1388 // How to transform the operator.
1389 // Legal: The target supports this operator.
1390 // Convert: Convert this to a non-VP operation.
1391 // The 'Discard' strategy is invalid.
1392 VPTransform OpStrategy;
1393
shouldDoNothingVPLegalization1394 bool shouldDoNothing() const {
1395 return (EVLParamStrategy == Legal) && (OpStrategy == Legal);
1396 }
VPLegalizationVPLegalization1397 VPLegalization(VPTransform EVLParamStrategy, VPTransform OpStrategy)
1398 : EVLParamStrategy(EVLParamStrategy), OpStrategy(OpStrategy) {}
1399 };
1400
1401 /// \returns How the target needs this vector-predicated operation to be
1402 /// transformed.
1403 VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const;
1404 /// @}
1405
1406 /// @}
1407
1408 private:
1409 /// Estimate the latency of specified instruction.
1410 /// Returns 1 as the default value.
1411 InstructionCost getInstructionLatency(const Instruction *I) const;
1412
1413 /// Returns the expected throughput cost of the instruction.
1414 /// Returns -1 if the cost is unknown.
1415 InstructionCost getInstructionThroughput(const Instruction *I) const;
1416
1417 /// The abstract base class used to type erase specific TTI
1418 /// implementations.
1419 class Concept;
1420
1421 /// The template model for the base class which wraps a concrete
1422 /// implementation in a type erased interface.
1423 template <typename T> class Model;
1424
1425 std::unique_ptr<Concept> TTIImpl;
1426 };
1427
1428 class TargetTransformInfo::Concept {
1429 public:
1430 virtual ~Concept() = 0;
1431 virtual const DataLayout &getDataLayout() const = 0;
1432 virtual InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
1433 ArrayRef<const Value *> Operands,
1434 TTI::TargetCostKind CostKind) = 0;
1435 virtual unsigned getInliningThresholdMultiplier() = 0;
1436 virtual unsigned adjustInliningThreshold(const CallBase *CB) = 0;
1437 virtual int getInlinerVectorBonusPercent() = 0;
1438 virtual InstructionCost getMemcpyCost(const Instruction *I) = 0;
1439 virtual unsigned
1440 getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize,
1441 ProfileSummaryInfo *PSI,
1442 BlockFrequencyInfo *BFI) = 0;
1443 virtual InstructionCost getUserCost(const User *U,
1444 ArrayRef<const Value *> Operands,
1445 TargetCostKind CostKind) = 0;
1446 virtual BranchProbability getPredictableBranchThreshold() = 0;
1447 virtual bool hasBranchDivergence() = 0;
1448 virtual bool useGPUDivergenceAnalysis() = 0;
1449 virtual bool isSourceOfDivergence(const Value *V) = 0;
1450 virtual bool isAlwaysUniform(const Value *V) = 0;
1451 virtual unsigned getFlatAddressSpace() = 0;
1452 virtual bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1453 Intrinsic::ID IID) const = 0;
1454 virtual bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0;
1455 virtual unsigned getAssumedAddrSpace(const Value *V) const = 0;
1456 virtual Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
1457 Value *OldV,
1458 Value *NewV) const = 0;
1459 virtual bool isLoweredToCall(const Function *F) = 0;
1460 virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &,
1461 UnrollingPreferences &UP) = 0;
1462 virtual void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1463 PeelingPreferences &PP) = 0;
1464 virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1465 AssumptionCache &AC,
1466 TargetLibraryInfo *LibInfo,
1467 HardwareLoopInfo &HWLoopInfo) = 0;
1468 virtual bool
1469 preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1470 AssumptionCache &AC, TargetLibraryInfo *TLI,
1471 DominatorTree *DT, const LoopAccessInfo *LAI) = 0;
1472 virtual bool emitGetActiveLaneMask() = 0;
1473 virtual Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
1474 IntrinsicInst &II) = 0;
1475 virtual Optional<Value *>
1476 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
1477 APInt DemandedMask, KnownBits &Known,
1478 bool &KnownBitsComputed) = 0;
1479 virtual Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
1480 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
1481 APInt &UndefElts2, APInt &UndefElts3,
1482 std::function<void(Instruction *, unsigned, APInt, APInt &)>
1483 SimplifyAndSetOp) = 0;
1484 virtual bool isLegalAddImmediate(int64_t Imm) = 0;
1485 virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
1486 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
1487 int64_t BaseOffset, bool HasBaseReg,
1488 int64_t Scale, unsigned AddrSpace,
1489 Instruction *I) = 0;
1490 virtual bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1491 TargetTransformInfo::LSRCost &C2) = 0;
1492 virtual bool isNumRegsMajorCostOfLSR() = 0;
1493 virtual bool isProfitableLSRChainElement(Instruction *I) = 0;
1494 virtual bool canMacroFuseCmp() = 0;
1495 virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1496 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
1497 TargetLibraryInfo *LibInfo) = 0;
1498 virtual AddressingModeKind
1499 getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const = 0;
1500 virtual bool isLegalMaskedStore(Type *DataType, Align Alignment) = 0;
1501 virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment) = 0;
1502 virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
1503 virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0;
1504 virtual bool isLegalMaskedScatter(Type *DataType, Align Alignment) = 0;
1505 virtual bool isLegalMaskedGather(Type *DataType, Align Alignment) = 0;
1506 virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
1507 virtual bool isLegalMaskedExpandLoad(Type *DataType) = 0;
1508 virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0;
1509 virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0;
1510 virtual bool prefersVectorizedAddressing() = 0;
1511 virtual InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
1512 int64_t BaseOffset,
1513 bool HasBaseReg, int64_t Scale,
1514 unsigned AddrSpace) = 0;
1515 virtual bool LSRWithInstrQueries() = 0;
1516 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
1517 virtual bool isProfitableToHoist(Instruction *I) = 0;
1518 virtual bool useAA() = 0;
1519 virtual bool isTypeLegal(Type *Ty) = 0;
1520 virtual InstructionCost getRegUsageForType(Type *Ty) = 0;
1521 virtual bool shouldBuildLookupTables() = 0;
1522 virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
1523 virtual bool shouldBuildRelLookupTables() = 0;
1524 virtual bool useColdCCForColdCall(Function &F) = 0;
1525 virtual InstructionCost getScalarizationOverhead(VectorType *Ty,
1526 const APInt &DemandedElts,
1527 bool Insert,
1528 bool Extract) = 0;
1529 virtual InstructionCost
1530 getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1531 ArrayRef<Type *> Tys) = 0;
1532 virtual bool supportsEfficientVectorElementLoadStore() = 0;
1533 virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
1534 virtual MemCmpExpansionOptions
1535 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0;
1536 virtual bool enableInterleavedAccessVectorization() = 0;
1537 virtual bool enableMaskedInterleavedAccessVectorization() = 0;
1538 virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
1539 virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
1540 unsigned BitWidth,
1541 unsigned AddressSpace,
1542 Align Alignment,
1543 bool *Fast) = 0;
1544 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
1545 virtual bool haveFastSqrt(Type *Ty) = 0;
1546 virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0;
1547 virtual InstructionCost getFPOpCost(Type *Ty) = 0;
1548 virtual InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
1549 const APInt &Imm, Type *Ty) = 0;
1550 virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
1551 TargetCostKind CostKind) = 0;
1552 virtual InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
1553 const APInt &Imm, Type *Ty,
1554 TargetCostKind CostKind,
1555 Instruction *Inst = nullptr) = 0;
1556 virtual InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
1557 const APInt &Imm, Type *Ty,
1558 TargetCostKind CostKind) = 0;
1559 virtual unsigned getNumberOfRegisters(unsigned ClassID) const = 0;
1560 virtual unsigned getRegisterClassForType(bool Vector,
1561 Type *Ty = nullptr) const = 0;
1562 virtual const char *getRegisterClassName(unsigned ClassID) const = 0;
1563 virtual TypeSize getRegisterBitWidth(RegisterKind K) const = 0;
1564 virtual unsigned getMinVectorRegisterBitWidth() const = 0;
1565 virtual Optional<unsigned> getMaxVScale() const = 0;
1566 virtual bool shouldMaximizeVectorBandwidth() const = 0;
1567 virtual ElementCount getMinimumVF(unsigned ElemWidth,
1568 bool IsScalable) const = 0;
1569 virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const = 0;
1570 virtual bool shouldConsiderAddressTypePromotion(
1571 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
1572 virtual unsigned getCacheLineSize() const = 0;
1573 virtual Optional<unsigned> getCacheSize(CacheLevel Level) const = 0;
1574 virtual Optional<unsigned> getCacheAssociativity(CacheLevel Level) const = 0;
1575
1576 /// \return How much before a load we should place the prefetch
1577 /// instruction. This is currently measured in number of
1578 /// instructions.
1579 virtual unsigned getPrefetchDistance() const = 0;
1580
1581 /// \return Some HW prefetchers can handle accesses up to a certain
1582 /// constant stride. This is the minimum stride in bytes where it
1583 /// makes sense to start adding SW prefetches. The default is 1,
1584 /// i.e. prefetch with any stride. Sometimes prefetching is beneficial
1585 /// even below the HW prefetcher limit, and the arguments provided are
1586 /// meant to serve as a basis for deciding this for a particular loop.
1587 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
1588 unsigned NumStridedMemAccesses,
1589 unsigned NumPrefetches,
1590 bool HasCall) const = 0;
1591
1592 /// \return The maximum number of iterations to prefetch ahead. If
1593 /// the required number of iterations is more than this number, no
1594 /// prefetching is performed.
1595 virtual unsigned getMaxPrefetchIterationsAhead() const = 0;
1596
1597 /// \return True if prefetching should also be done for writes.
1598 virtual bool enableWritePrefetching() const = 0;
1599
1600 virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
1601 virtual InstructionCost getArithmeticInstrCost(
1602 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1603 OperandValueKind Opd1Info, OperandValueKind Opd2Info,
1604 OperandValueProperties Opd1PropInfo, OperandValueProperties Opd2PropInfo,
1605 ArrayRef<const Value *> Args, const Instruction *CxtI = nullptr) = 0;
1606 virtual InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp,
1607 ArrayRef<int> Mask, int Index,
1608 VectorType *SubTp) = 0;
1609 virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst,
1610 Type *Src, CastContextHint CCH,
1611 TTI::TargetCostKind CostKind,
1612 const Instruction *I) = 0;
1613 virtual InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
1614 VectorType *VecTy,
1615 unsigned Index) = 0;
1616 virtual InstructionCost getCFInstrCost(unsigned Opcode,
1617 TTI::TargetCostKind CostKind,
1618 const Instruction *I = nullptr) = 0;
1619 virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1620 Type *CondTy,
1621 CmpInst::Predicate VecPred,
1622 TTI::TargetCostKind CostKind,
1623 const Instruction *I) = 0;
1624 virtual InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
1625 unsigned Index) = 0;
1626 virtual InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src,
1627 Align Alignment,
1628 unsigned AddressSpace,
1629 TTI::TargetCostKind CostKind,
1630 const Instruction *I) = 0;
1631 virtual InstructionCost
1632 getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1633 unsigned AddressSpace,
1634 TTI::TargetCostKind CostKind) = 0;
1635 virtual InstructionCost
1636 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
1637 bool VariableMask, Align Alignment,
1638 TTI::TargetCostKind CostKind,
1639 const Instruction *I = nullptr) = 0;
1640
1641 virtual InstructionCost getInterleavedMemoryOpCost(
1642 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1643 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1644 bool UseMaskForCond = false, bool UseMaskForGaps = false) = 0;
1645 virtual InstructionCost
1646 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
1647 Optional<FastMathFlags> FMF,
1648 TTI::TargetCostKind CostKind) = 0;
1649 virtual InstructionCost
1650 getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, bool IsUnsigned,
1651 TTI::TargetCostKind CostKind) = 0;
1652 virtual InstructionCost getExtendedAddReductionCost(
1653 bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1654 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0;
1655 virtual InstructionCost
1656 getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1657 TTI::TargetCostKind CostKind) = 0;
1658 virtual InstructionCost getCallInstrCost(Function *F, Type *RetTy,
1659 ArrayRef<Type *> Tys,
1660 TTI::TargetCostKind CostKind) = 0;
1661 virtual unsigned getNumberOfParts(Type *Tp) = 0;
1662 virtual InstructionCost
1663 getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr) = 0;
1664 virtual InstructionCost
1665 getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
1666 virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
1667 MemIntrinsicInfo &Info) = 0;
1668 virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
1669 virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1670 Type *ExpectedType) = 0;
1671 virtual Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1672 unsigned SrcAddrSpace,
1673 unsigned DestAddrSpace,
1674 unsigned SrcAlign,
1675 unsigned DestAlign) const = 0;
1676 virtual void getMemcpyLoopResidualLoweringType(
1677 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1678 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1679 unsigned SrcAlign, unsigned DestAlign) const = 0;
1680 virtual bool areInlineCompatible(const Function *Caller,
1681 const Function *Callee) const = 0;
1682 virtual bool
1683 areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
1684 SmallPtrSetImpl<Argument *> &Args) const = 0;
1685 virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
1686 virtual bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const = 0;
1687 virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
1688 virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
1689 virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
1690 virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1691 Align Alignment,
1692 unsigned AddrSpace) const = 0;
1693 virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1694 Align Alignment,
1695 unsigned AddrSpace) const = 0;
1696 virtual bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
1697 ElementCount VF) const = 0;
1698 virtual bool isElementTypeLegalForScalableVector(Type *Ty) const = 0;
1699 virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1700 unsigned ChainSizeInBytes,
1701 VectorType *VecTy) const = 0;
1702 virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1703 unsigned ChainSizeInBytes,
1704 VectorType *VecTy) const = 0;
1705 virtual bool preferInLoopReduction(unsigned Opcode, Type *Ty,
1706 ReductionFlags) const = 0;
1707 virtual bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
1708 ReductionFlags) const = 0;
1709 virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
1710 virtual unsigned getGISelRematGlobalCost() const = 0;
1711 virtual bool supportsScalableVectors() const = 0;
1712 virtual bool hasActiveVectorLength() const = 0;
1713 virtual InstructionCost getInstructionLatency(const Instruction *I) = 0;
1714 virtual VPLegalization
1715 getVPLegalizationStrategy(const VPIntrinsic &PI) const = 0;
1716 };
1717
1718 template <typename T>
1719 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
1720 T Impl;
1721
1722 public:
Model(T Impl)1723 Model(T Impl) : Impl(std::move(Impl)) {}
~Model()1724 ~Model() override {}
1725
getDataLayout()1726 const DataLayout &getDataLayout() const override {
1727 return Impl.getDataLayout();
1728 }
1729
1730 InstructionCost
getGEPCost(Type * PointeeType,const Value * Ptr,ArrayRef<const Value * > Operands,enum TargetTransformInfo::TargetCostKind CostKind)1731 getGEPCost(Type *PointeeType, const Value *Ptr,
1732 ArrayRef<const Value *> Operands,
1733 enum TargetTransformInfo::TargetCostKind CostKind) override {
1734 return Impl.getGEPCost(PointeeType, Ptr, Operands);
1735 }
getInliningThresholdMultiplier()1736 unsigned getInliningThresholdMultiplier() override {
1737 return Impl.getInliningThresholdMultiplier();
1738 }
adjustInliningThreshold(const CallBase * CB)1739 unsigned adjustInliningThreshold(const CallBase *CB) override {
1740 return Impl.adjustInliningThreshold(CB);
1741 }
getInlinerVectorBonusPercent()1742 int getInlinerVectorBonusPercent() override {
1743 return Impl.getInlinerVectorBonusPercent();
1744 }
getMemcpyCost(const Instruction * I)1745 InstructionCost getMemcpyCost(const Instruction *I) override {
1746 return Impl.getMemcpyCost(I);
1747 }
getUserCost(const User * U,ArrayRef<const Value * > Operands,TargetCostKind CostKind)1748 InstructionCost getUserCost(const User *U, ArrayRef<const Value *> Operands,
1749 TargetCostKind CostKind) override {
1750 return Impl.getUserCost(U, Operands, CostKind);
1751 }
getPredictableBranchThreshold()1752 BranchProbability getPredictableBranchThreshold() override {
1753 return Impl.getPredictableBranchThreshold();
1754 }
hasBranchDivergence()1755 bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
useGPUDivergenceAnalysis()1756 bool useGPUDivergenceAnalysis() override {
1757 return Impl.useGPUDivergenceAnalysis();
1758 }
isSourceOfDivergence(const Value * V)1759 bool isSourceOfDivergence(const Value *V) override {
1760 return Impl.isSourceOfDivergence(V);
1761 }
1762
isAlwaysUniform(const Value * V)1763 bool isAlwaysUniform(const Value *V) override {
1764 return Impl.isAlwaysUniform(V);
1765 }
1766
getFlatAddressSpace()1767 unsigned getFlatAddressSpace() override { return Impl.getFlatAddressSpace(); }
1768
collectFlatAddressOperands(SmallVectorImpl<int> & OpIndexes,Intrinsic::ID IID)1769 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1770 Intrinsic::ID IID) const override {
1771 return Impl.collectFlatAddressOperands(OpIndexes, IID);
1772 }
1773
isNoopAddrSpaceCast(unsigned FromAS,unsigned ToAS)1774 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
1775 return Impl.isNoopAddrSpaceCast(FromAS, ToAS);
1776 }
1777
getAssumedAddrSpace(const Value * V)1778 unsigned getAssumedAddrSpace(const Value *V) const override {
1779 return Impl.getAssumedAddrSpace(V);
1780 }
1781
rewriteIntrinsicWithAddressSpace(IntrinsicInst * II,Value * OldV,Value * NewV)1782 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
1783 Value *NewV) const override {
1784 return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
1785 }
1786
isLoweredToCall(const Function * F)1787 bool isLoweredToCall(const Function *F) override {
1788 return Impl.isLoweredToCall(F);
1789 }
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,UnrollingPreferences & UP)1790 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1791 UnrollingPreferences &UP) override {
1792 return Impl.getUnrollingPreferences(L, SE, UP);
1793 }
getPeelingPreferences(Loop * L,ScalarEvolution & SE,PeelingPreferences & PP)1794 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1795 PeelingPreferences &PP) override {
1796 return Impl.getPeelingPreferences(L, SE, PP);
1797 }
isHardwareLoopProfitable(Loop * L,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * LibInfo,HardwareLoopInfo & HWLoopInfo)1798 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1799 AssumptionCache &AC, TargetLibraryInfo *LibInfo,
1800 HardwareLoopInfo &HWLoopInfo) override {
1801 return Impl.isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
1802 }
preferPredicateOverEpilogue(Loop * L,LoopInfo * LI,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * TLI,DominatorTree * DT,const LoopAccessInfo * LAI)1803 bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1804 AssumptionCache &AC, TargetLibraryInfo *TLI,
1805 DominatorTree *DT,
1806 const LoopAccessInfo *LAI) override {
1807 return Impl.preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
1808 }
emitGetActiveLaneMask()1809 bool emitGetActiveLaneMask() override {
1810 return Impl.emitGetActiveLaneMask();
1811 }
instCombineIntrinsic(InstCombiner & IC,IntrinsicInst & II)1812 Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
1813 IntrinsicInst &II) override {
1814 return Impl.instCombineIntrinsic(IC, II);
1815 }
1816 Optional<Value *>
simplifyDemandedUseBitsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt DemandedMask,KnownBits & Known,bool & KnownBitsComputed)1817 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
1818 APInt DemandedMask, KnownBits &Known,
1819 bool &KnownBitsComputed) override {
1820 return Impl.simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
1821 KnownBitsComputed);
1822 }
simplifyDemandedVectorEltsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt DemandedElts,APInt & UndefElts,APInt & UndefElts2,APInt & UndefElts3,std::function<void (Instruction *,unsigned,APInt,APInt &)> SimplifyAndSetOp)1823 Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
1824 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
1825 APInt &UndefElts2, APInt &UndefElts3,
1826 std::function<void(Instruction *, unsigned, APInt, APInt &)>
1827 SimplifyAndSetOp) override {
1828 return Impl.simplifyDemandedVectorEltsIntrinsic(
1829 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
1830 SimplifyAndSetOp);
1831 }
isLegalAddImmediate(int64_t Imm)1832 bool isLegalAddImmediate(int64_t Imm) override {
1833 return Impl.isLegalAddImmediate(Imm);
1834 }
isLegalICmpImmediate(int64_t Imm)1835 bool isLegalICmpImmediate(int64_t Imm) override {
1836 return Impl.isLegalICmpImmediate(Imm);
1837 }
isLegalAddressingMode(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace,Instruction * I)1838 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
1839 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
1840 Instruction *I) override {
1841 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
1842 AddrSpace, I);
1843 }
isLSRCostLess(TargetTransformInfo::LSRCost & C1,TargetTransformInfo::LSRCost & C2)1844 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1845 TargetTransformInfo::LSRCost &C2) override {
1846 return Impl.isLSRCostLess(C1, C2);
1847 }
isNumRegsMajorCostOfLSR()1848 bool isNumRegsMajorCostOfLSR() override {
1849 return Impl.isNumRegsMajorCostOfLSR();
1850 }
isProfitableLSRChainElement(Instruction * I)1851 bool isProfitableLSRChainElement(Instruction *I) override {
1852 return Impl.isProfitableLSRChainElement(I);
1853 }
canMacroFuseCmp()1854 bool canMacroFuseCmp() override { return Impl.canMacroFuseCmp(); }
canSaveCmp(Loop * L,BranchInst ** BI,ScalarEvolution * SE,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,TargetLibraryInfo * LibInfo)1855 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
1856 DominatorTree *DT, AssumptionCache *AC,
1857 TargetLibraryInfo *LibInfo) override {
1858 return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
1859 }
1860 AddressingModeKind
getPreferredAddressingMode(const Loop * L,ScalarEvolution * SE)1861 getPreferredAddressingMode(const Loop *L,
1862 ScalarEvolution *SE) const override {
1863 return Impl.getPreferredAddressingMode(L, SE);
1864 }
isLegalMaskedStore(Type * DataType,Align Alignment)1865 bool isLegalMaskedStore(Type *DataType, Align Alignment) override {
1866 return Impl.isLegalMaskedStore(DataType, Alignment);
1867 }
isLegalMaskedLoad(Type * DataType,Align Alignment)1868 bool isLegalMaskedLoad(Type *DataType, Align Alignment) override {
1869 return Impl.isLegalMaskedLoad(DataType, Alignment);
1870 }
isLegalNTStore(Type * DataType,Align Alignment)1871 bool isLegalNTStore(Type *DataType, Align Alignment) override {
1872 return Impl.isLegalNTStore(DataType, Alignment);
1873 }
isLegalNTLoad(Type * DataType,Align Alignment)1874 bool isLegalNTLoad(Type *DataType, Align Alignment) override {
1875 return Impl.isLegalNTLoad(DataType, Alignment);
1876 }
isLegalMaskedScatter(Type * DataType,Align Alignment)1877 bool isLegalMaskedScatter(Type *DataType, Align Alignment) override {
1878 return Impl.isLegalMaskedScatter(DataType, Alignment);
1879 }
isLegalMaskedGather(Type * DataType,Align Alignment)1880 bool isLegalMaskedGather(Type *DataType, Align Alignment) override {
1881 return Impl.isLegalMaskedGather(DataType, Alignment);
1882 }
isLegalMaskedCompressStore(Type * DataType)1883 bool isLegalMaskedCompressStore(Type *DataType) override {
1884 return Impl.isLegalMaskedCompressStore(DataType);
1885 }
isLegalMaskedExpandLoad(Type * DataType)1886 bool isLegalMaskedExpandLoad(Type *DataType) override {
1887 return Impl.isLegalMaskedExpandLoad(DataType);
1888 }
hasDivRemOp(Type * DataType,bool IsSigned)1889 bool hasDivRemOp(Type *DataType, bool IsSigned) override {
1890 return Impl.hasDivRemOp(DataType, IsSigned);
1891 }
hasVolatileVariant(Instruction * I,unsigned AddrSpace)1892 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) override {
1893 return Impl.hasVolatileVariant(I, AddrSpace);
1894 }
prefersVectorizedAddressing()1895 bool prefersVectorizedAddressing() override {
1896 return Impl.prefersVectorizedAddressing();
1897 }
getScalingFactorCost(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace)1898 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
1899 int64_t BaseOffset, bool HasBaseReg,
1900 int64_t Scale,
1901 unsigned AddrSpace) override {
1902 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
1903 AddrSpace);
1904 }
LSRWithInstrQueries()1905 bool LSRWithInstrQueries() override { return Impl.LSRWithInstrQueries(); }
isTruncateFree(Type * Ty1,Type * Ty2)1906 bool isTruncateFree(Type *Ty1, Type *Ty2) override {
1907 return Impl.isTruncateFree(Ty1, Ty2);
1908 }
isProfitableToHoist(Instruction * I)1909 bool isProfitableToHoist(Instruction *I) override {
1910 return Impl.isProfitableToHoist(I);
1911 }
useAA()1912 bool useAA() override { return Impl.useAA(); }
isTypeLegal(Type * Ty)1913 bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
getRegUsageForType(Type * Ty)1914 InstructionCost getRegUsageForType(Type *Ty) override {
1915 return Impl.getRegUsageForType(Ty);
1916 }
shouldBuildLookupTables()1917 bool shouldBuildLookupTables() override {
1918 return Impl.shouldBuildLookupTables();
1919 }
shouldBuildLookupTablesForConstant(Constant * C)1920 bool shouldBuildLookupTablesForConstant(Constant *C) override {
1921 return Impl.shouldBuildLookupTablesForConstant(C);
1922 }
shouldBuildRelLookupTables()1923 bool shouldBuildRelLookupTables() override {
1924 return Impl.shouldBuildRelLookupTables();
1925 }
useColdCCForColdCall(Function & F)1926 bool useColdCCForColdCall(Function &F) override {
1927 return Impl.useColdCCForColdCall(F);
1928 }
1929
getScalarizationOverhead(VectorType * Ty,const APInt & DemandedElts,bool Insert,bool Extract)1930 InstructionCost getScalarizationOverhead(VectorType *Ty,
1931 const APInt &DemandedElts,
1932 bool Insert, bool Extract) override {
1933 return Impl.getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
1934 }
1935 InstructionCost
getOperandsScalarizationOverhead(ArrayRef<const Value * > Args,ArrayRef<Type * > Tys)1936 getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1937 ArrayRef<Type *> Tys) override {
1938 return Impl.getOperandsScalarizationOverhead(Args, Tys);
1939 }
1940
supportsEfficientVectorElementLoadStore()1941 bool supportsEfficientVectorElementLoadStore() override {
1942 return Impl.supportsEfficientVectorElementLoadStore();
1943 }
1944
enableAggressiveInterleaving(bool LoopHasReductions)1945 bool enableAggressiveInterleaving(bool LoopHasReductions) override {
1946 return Impl.enableAggressiveInterleaving(LoopHasReductions);
1947 }
enableMemCmpExpansion(bool OptSize,bool IsZeroCmp)1948 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
1949 bool IsZeroCmp) const override {
1950 return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp);
1951 }
enableInterleavedAccessVectorization()1952 bool enableInterleavedAccessVectorization() override {
1953 return Impl.enableInterleavedAccessVectorization();
1954 }
enableMaskedInterleavedAccessVectorization()1955 bool enableMaskedInterleavedAccessVectorization() override {
1956 return Impl.enableMaskedInterleavedAccessVectorization();
1957 }
isFPVectorizationPotentiallyUnsafe()1958 bool isFPVectorizationPotentiallyUnsafe() override {
1959 return Impl.isFPVectorizationPotentiallyUnsafe();
1960 }
allowsMisalignedMemoryAccesses(LLVMContext & Context,unsigned BitWidth,unsigned AddressSpace,Align Alignment,bool * Fast)1961 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
1962 unsigned AddressSpace, Align Alignment,
1963 bool *Fast) override {
1964 return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
1965 Alignment, Fast);
1966 }
getPopcntSupport(unsigned IntTyWidthInBit)1967 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
1968 return Impl.getPopcntSupport(IntTyWidthInBit);
1969 }
haveFastSqrt(Type * Ty)1970 bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
1971
isFCmpOrdCheaperThanFCmpZero(Type * Ty)1972 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) override {
1973 return Impl.isFCmpOrdCheaperThanFCmpZero(Ty);
1974 }
1975
getFPOpCost(Type * Ty)1976 InstructionCost getFPOpCost(Type *Ty) override {
1977 return Impl.getFPOpCost(Ty);
1978 }
1979
getIntImmCodeSizeCost(unsigned Opc,unsigned Idx,const APInt & Imm,Type * Ty)1980 InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
1981 const APInt &Imm, Type *Ty) override {
1982 return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
1983 }
getIntImmCost(const APInt & Imm,Type * Ty,TargetCostKind CostKind)1984 InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
1985 TargetCostKind CostKind) override {
1986 return Impl.getIntImmCost(Imm, Ty, CostKind);
1987 }
1988 InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
1989 const APInt &Imm, Type *Ty,
1990 TargetCostKind CostKind,
1991 Instruction *Inst = nullptr) override {
1992 return Impl.getIntImmCostInst(Opc, Idx, Imm, Ty, CostKind, Inst);
1993 }
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TargetCostKind CostKind)1994 InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
1995 const APInt &Imm, Type *Ty,
1996 TargetCostKind CostKind) override {
1997 return Impl.getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
1998 }
getNumberOfRegisters(unsigned ClassID)1999 unsigned getNumberOfRegisters(unsigned ClassID) const override {
2000 return Impl.getNumberOfRegisters(ClassID);
2001 }
2002 unsigned getRegisterClassForType(bool Vector,
2003 Type *Ty = nullptr) const override {
2004 return Impl.getRegisterClassForType(Vector, Ty);
2005 }
getRegisterClassName(unsigned ClassID)2006 const char *getRegisterClassName(unsigned ClassID) const override {
2007 return Impl.getRegisterClassName(ClassID);
2008 }
getRegisterBitWidth(RegisterKind K)2009 TypeSize getRegisterBitWidth(RegisterKind K) const override {
2010 return Impl.getRegisterBitWidth(K);
2011 }
getMinVectorRegisterBitWidth()2012 unsigned getMinVectorRegisterBitWidth() const override {
2013 return Impl.getMinVectorRegisterBitWidth();
2014 }
getMaxVScale()2015 Optional<unsigned> getMaxVScale() const override {
2016 return Impl.getMaxVScale();
2017 }
shouldMaximizeVectorBandwidth()2018 bool shouldMaximizeVectorBandwidth() const override {
2019 return Impl.shouldMaximizeVectorBandwidth();
2020 }
getMinimumVF(unsigned ElemWidth,bool IsScalable)2021 ElementCount getMinimumVF(unsigned ElemWidth,
2022 bool IsScalable) const override {
2023 return Impl.getMinimumVF(ElemWidth, IsScalable);
2024 }
getMaximumVF(unsigned ElemWidth,unsigned Opcode)2025 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override {
2026 return Impl.getMaximumVF(ElemWidth, Opcode);
2027 }
shouldConsiderAddressTypePromotion(const Instruction & I,bool & AllowPromotionWithoutCommonHeader)2028 bool shouldConsiderAddressTypePromotion(
2029 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
2030 return Impl.shouldConsiderAddressTypePromotion(
2031 I, AllowPromotionWithoutCommonHeader);
2032 }
getCacheLineSize()2033 unsigned getCacheLineSize() const override { return Impl.getCacheLineSize(); }
getCacheSize(CacheLevel Level)2034 Optional<unsigned> getCacheSize(CacheLevel Level) const override {
2035 return Impl.getCacheSize(Level);
2036 }
getCacheAssociativity(CacheLevel Level)2037 Optional<unsigned> getCacheAssociativity(CacheLevel Level) const override {
2038 return Impl.getCacheAssociativity(Level);
2039 }
2040
2041 /// Return the preferred prefetch distance in terms of instructions.
2042 ///
getPrefetchDistance()2043 unsigned getPrefetchDistance() const override {
2044 return Impl.getPrefetchDistance();
2045 }
2046
2047 /// Return the minimum stride necessary to trigger software
2048 /// prefetching.
2049 ///
getMinPrefetchStride(unsigned NumMemAccesses,unsigned NumStridedMemAccesses,unsigned NumPrefetches,bool HasCall)2050 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
2051 unsigned NumStridedMemAccesses,
2052 unsigned NumPrefetches,
2053 bool HasCall) const override {
2054 return Impl.getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
2055 NumPrefetches, HasCall);
2056 }
2057
2058 /// Return the maximum prefetch distance in terms of loop
2059 /// iterations.
2060 ///
getMaxPrefetchIterationsAhead()2061 unsigned getMaxPrefetchIterationsAhead() const override {
2062 return Impl.getMaxPrefetchIterationsAhead();
2063 }
2064
2065 /// \return True if prefetching should also be done for writes.
enableWritePrefetching()2066 bool enableWritePrefetching() const override {
2067 return Impl.enableWritePrefetching();
2068 }
2069
getMaxInterleaveFactor(unsigned VF)2070 unsigned getMaxInterleaveFactor(unsigned VF) override {
2071 return Impl.getMaxInterleaveFactor(VF);
2072 }
getEstimatedNumberOfCaseClusters(const SwitchInst & SI,unsigned & JTSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)2073 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
2074 unsigned &JTSize,
2075 ProfileSummaryInfo *PSI,
2076 BlockFrequencyInfo *BFI) override {
2077 return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
2078 }
2079 InstructionCost getArithmeticInstrCost(
2080 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
2081 OperandValueKind Opd1Info, OperandValueKind Opd2Info,
2082 OperandValueProperties Opd1PropInfo, OperandValueProperties Opd2PropInfo,
2083 ArrayRef<const Value *> Args,
2084 const Instruction *CxtI = nullptr) override {
2085 return Impl.getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
2086 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
2087 }
getShuffleCost(ShuffleKind Kind,VectorType * Tp,ArrayRef<int> Mask,int Index,VectorType * SubTp)2088 InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp,
2089 ArrayRef<int> Mask, int Index,
2090 VectorType *SubTp) override {
2091 return Impl.getShuffleCost(Kind, Tp, Mask, Index, SubTp);
2092 }
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)2093 InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
2094 CastContextHint CCH,
2095 TTI::TargetCostKind CostKind,
2096 const Instruction *I) override {
2097 return Impl.getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
2098 }
getExtractWithExtendCost(unsigned Opcode,Type * Dst,VectorType * VecTy,unsigned Index)2099 InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
2100 VectorType *VecTy,
2101 unsigned Index) override {
2102 return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
2103 }
2104 InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
2105 const Instruction *I = nullptr) override {
2106 return Impl.getCFInstrCost(Opcode, CostKind, I);
2107 }
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)2108 InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
2109 CmpInst::Predicate VecPred,
2110 TTI::TargetCostKind CostKind,
2111 const Instruction *I) override {
2112 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2113 }
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)2114 InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
2115 unsigned Index) override {
2116 return Impl.getVectorInstrCost(Opcode, Val, Index);
2117 }
getMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,const Instruction * I)2118 InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
2119 unsigned AddressSpace,
2120 TTI::TargetCostKind CostKind,
2121 const Instruction *I) override {
2122 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
2123 CostKind, I);
2124 }
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)2125 InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
2126 Align Alignment, unsigned AddressSpace,
2127 TTI::TargetCostKind CostKind) override {
2128 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
2129 CostKind);
2130 }
2131 InstructionCost
2132 getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
2133 bool VariableMask, Align Alignment,
2134 TTI::TargetCostKind CostKind,
2135 const Instruction *I = nullptr) override {
2136 return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
2137 Alignment, CostKind, I);
2138 }
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)2139 InstructionCost getInterleavedMemoryOpCost(
2140 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
2141 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
2142 bool UseMaskForCond, bool UseMaskForGaps) override {
2143 return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2144 Alignment, AddressSpace, CostKind,
2145 UseMaskForCond, UseMaskForGaps);
2146 }
2147 InstructionCost
getArithmeticReductionCost(unsigned Opcode,VectorType * Ty,Optional<FastMathFlags> FMF,TTI::TargetCostKind CostKind)2148 getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
2149 Optional<FastMathFlags> FMF,
2150 TTI::TargetCostKind CostKind) override {
2151 return Impl.getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
2152 }
2153 InstructionCost
getMinMaxReductionCost(VectorType * Ty,VectorType * CondTy,bool IsUnsigned,TTI::TargetCostKind CostKind)2154 getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, bool IsUnsigned,
2155 TTI::TargetCostKind CostKind) override {
2156 return Impl.getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
2157 }
2158 InstructionCost getExtendedAddReductionCost(
2159 bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
2160 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) override {
2161 return Impl.getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, Ty,
2162 CostKind);
2163 }
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)2164 InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2165 TTI::TargetCostKind CostKind) override {
2166 return Impl.getIntrinsicInstrCost(ICA, CostKind);
2167 }
getCallInstrCost(Function * F,Type * RetTy,ArrayRef<Type * > Tys,TTI::TargetCostKind CostKind)2168 InstructionCost getCallInstrCost(Function *F, Type *RetTy,
2169 ArrayRef<Type *> Tys,
2170 TTI::TargetCostKind CostKind) override {
2171 return Impl.getCallInstrCost(F, RetTy, Tys, CostKind);
2172 }
getNumberOfParts(Type * Tp)2173 unsigned getNumberOfParts(Type *Tp) override {
2174 return Impl.getNumberOfParts(Tp);
2175 }
getAddressComputationCost(Type * Ty,ScalarEvolution * SE,const SCEV * Ptr)2176 InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
2177 const SCEV *Ptr) override {
2178 return Impl.getAddressComputationCost(Ty, SE, Ptr);
2179 }
getCostOfKeepingLiveOverCall(ArrayRef<Type * > Tys)2180 InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
2181 return Impl.getCostOfKeepingLiveOverCall(Tys);
2182 }
getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info)2183 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
2184 MemIntrinsicInfo &Info) override {
2185 return Impl.getTgtMemIntrinsic(Inst, Info);
2186 }
getAtomicMemIntrinsicMaxElementSize()2187 unsigned getAtomicMemIntrinsicMaxElementSize() const override {
2188 return Impl.getAtomicMemIntrinsicMaxElementSize();
2189 }
getOrCreateResultFromMemIntrinsic(IntrinsicInst * Inst,Type * ExpectedType)2190 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
2191 Type *ExpectedType) override {
2192 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
2193 }
getMemcpyLoopLoweringType(LLVMContext & Context,Value * Length,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign)2194 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
2195 unsigned SrcAddrSpace, unsigned DestAddrSpace,
2196 unsigned SrcAlign,
2197 unsigned DestAlign) const override {
2198 return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
2199 DestAddrSpace, SrcAlign, DestAlign);
2200 }
getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type * > & OpsOut,LLVMContext & Context,unsigned RemainingBytes,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign)2201 void getMemcpyLoopResidualLoweringType(
2202 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
2203 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
2204 unsigned SrcAlign, unsigned DestAlign) const override {
2205 Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
2206 SrcAddrSpace, DestAddrSpace,
2207 SrcAlign, DestAlign);
2208 }
areInlineCompatible(const Function * Caller,const Function * Callee)2209 bool areInlineCompatible(const Function *Caller,
2210 const Function *Callee) const override {
2211 return Impl.areInlineCompatible(Caller, Callee);
2212 }
areFunctionArgsABICompatible(const Function * Caller,const Function * Callee,SmallPtrSetImpl<Argument * > & Args)2213 bool areFunctionArgsABICompatible(
2214 const Function *Caller, const Function *Callee,
2215 SmallPtrSetImpl<Argument *> &Args) const override {
2216 return Impl.areFunctionArgsABICompatible(Caller, Callee, Args);
2217 }
isIndexedLoadLegal(MemIndexedMode Mode,Type * Ty)2218 bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
2219 return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
2220 }
isIndexedStoreLegal(MemIndexedMode Mode,Type * Ty)2221 bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const override {
2222 return Impl.isIndexedStoreLegal(Mode, Ty, getDataLayout());
2223 }
getLoadStoreVecRegBitWidth(unsigned AddrSpace)2224 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
2225 return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
2226 }
isLegalToVectorizeLoad(LoadInst * LI)2227 bool isLegalToVectorizeLoad(LoadInst *LI) const override {
2228 return Impl.isLegalToVectorizeLoad(LI);
2229 }
isLegalToVectorizeStore(StoreInst * SI)2230 bool isLegalToVectorizeStore(StoreInst *SI) const override {
2231 return Impl.isLegalToVectorizeStore(SI);
2232 }
isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace)2233 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
2234 unsigned AddrSpace) const override {
2235 return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
2236 AddrSpace);
2237 }
isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace)2238 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
2239 unsigned AddrSpace) const override {
2240 return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
2241 AddrSpace);
2242 }
isLegalToVectorizeReduction(const RecurrenceDescriptor & RdxDesc,ElementCount VF)2243 bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
2244 ElementCount VF) const override {
2245 return Impl.isLegalToVectorizeReduction(RdxDesc, VF);
2246 }
isElementTypeLegalForScalableVector(Type * Ty)2247 bool isElementTypeLegalForScalableVector(Type *Ty) const override {
2248 return Impl.isElementTypeLegalForScalableVector(Ty);
2249 }
getLoadVectorFactor(unsigned VF,unsigned LoadSize,unsigned ChainSizeInBytes,VectorType * VecTy)2250 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
2251 unsigned ChainSizeInBytes,
2252 VectorType *VecTy) const override {
2253 return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
2254 }
getStoreVectorFactor(unsigned VF,unsigned StoreSize,unsigned ChainSizeInBytes,VectorType * VecTy)2255 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
2256 unsigned ChainSizeInBytes,
2257 VectorType *VecTy) const override {
2258 return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
2259 }
preferInLoopReduction(unsigned Opcode,Type * Ty,ReductionFlags Flags)2260 bool preferInLoopReduction(unsigned Opcode, Type *Ty,
2261 ReductionFlags Flags) const override {
2262 return Impl.preferInLoopReduction(Opcode, Ty, Flags);
2263 }
preferPredicatedReductionSelect(unsigned Opcode,Type * Ty,ReductionFlags Flags)2264 bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
2265 ReductionFlags Flags) const override {
2266 return Impl.preferPredicatedReductionSelect(Opcode, Ty, Flags);
2267 }
shouldExpandReduction(const IntrinsicInst * II)2268 bool shouldExpandReduction(const IntrinsicInst *II) const override {
2269 return Impl.shouldExpandReduction(II);
2270 }
2271
getGISelRematGlobalCost()2272 unsigned getGISelRematGlobalCost() const override {
2273 return Impl.getGISelRematGlobalCost();
2274 }
2275
supportsScalableVectors()2276 bool supportsScalableVectors() const override {
2277 return Impl.supportsScalableVectors();
2278 }
2279
hasActiveVectorLength()2280 bool hasActiveVectorLength() const override {
2281 return Impl.hasActiveVectorLength();
2282 }
2283
getInstructionLatency(const Instruction * I)2284 InstructionCost getInstructionLatency(const Instruction *I) override {
2285 return Impl.getInstructionLatency(I);
2286 }
2287
2288 VPLegalization
getVPLegalizationStrategy(const VPIntrinsic & PI)2289 getVPLegalizationStrategy(const VPIntrinsic &PI) const override {
2290 return Impl.getVPLegalizationStrategy(PI);
2291 }
2292 };
2293
2294 template <typename T>
TargetTransformInfo(T Impl)2295 TargetTransformInfo::TargetTransformInfo(T Impl)
2296 : TTIImpl(new Model<T>(Impl)) {}
2297
2298 /// Analysis pass providing the \c TargetTransformInfo.
2299 ///
2300 /// The core idea of the TargetIRAnalysis is to expose an interface through
2301 /// which LLVM targets can analyze and provide information about the middle
2302 /// end's target-independent IR. This supports use cases such as target-aware
2303 /// cost modeling of IR constructs.
2304 ///
2305 /// This is a function analysis because much of the cost modeling for targets
2306 /// is done in a subtarget specific way and LLVM supports compiling different
2307 /// functions targeting different subtargets in order to support runtime
2308 /// dispatch according to the observed subtarget.
2309 class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
2310 public:
2311 typedef TargetTransformInfo Result;
2312
2313 /// Default construct a target IR analysis.
2314 ///
2315 /// This will use the module's datalayout to construct a baseline
2316 /// conservative TTI result.
2317 TargetIRAnalysis();
2318
2319 /// Construct an IR analysis pass around a target-provide callback.
2320 ///
2321 /// The callback will be called with a particular function for which the TTI
2322 /// is needed and must return a TTI object for that function.
2323 TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
2324
2325 // Value semantics. We spell out the constructors for MSVC.
TargetIRAnalysis(const TargetIRAnalysis & Arg)2326 TargetIRAnalysis(const TargetIRAnalysis &Arg)
2327 : TTICallback(Arg.TTICallback) {}
TargetIRAnalysis(TargetIRAnalysis && Arg)2328 TargetIRAnalysis(TargetIRAnalysis &&Arg)
2329 : TTICallback(std::move(Arg.TTICallback)) {}
2330 TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
2331 TTICallback = RHS.TTICallback;
2332 return *this;
2333 }
2334 TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
2335 TTICallback = std::move(RHS.TTICallback);
2336 return *this;
2337 }
2338
2339 Result run(const Function &F, FunctionAnalysisManager &);
2340
2341 private:
2342 friend AnalysisInfoMixin<TargetIRAnalysis>;
2343 static AnalysisKey Key;
2344
2345 /// The callback used to produce a result.
2346 ///
2347 /// We use a completely opaque callback so that targets can provide whatever
2348 /// mechanism they desire for constructing the TTI for a given function.
2349 ///
2350 /// FIXME: Should we really use std::function? It's relatively inefficient.
2351 /// It might be possible to arrange for even stateful callbacks to outlive
2352 /// the analysis and thus use a function_ref which would be lighter weight.
2353 /// This may also be less error prone as the callback is likely to reference
2354 /// the external TargetMachine, and that reference needs to never dangle.
2355 std::function<Result(const Function &)> TTICallback;
2356
2357 /// Helper function used as the callback in the default constructor.
2358 static Result getDefaultTTI(const Function &F);
2359 };
2360
2361 /// Wrapper pass for TargetTransformInfo.
2362 ///
2363 /// This pass can be constructed from a TTI object which it stores internally
2364 /// and is queried by passes.
2365 class TargetTransformInfoWrapperPass : public ImmutablePass {
2366 TargetIRAnalysis TIRA;
2367 Optional<TargetTransformInfo> TTI;
2368
2369 virtual void anchor();
2370
2371 public:
2372 static char ID;
2373
2374 /// We must provide a default constructor for the pass but it should
2375 /// never be used.
2376 ///
2377 /// Use the constructor below or call one of the creation routines.
2378 TargetTransformInfoWrapperPass();
2379
2380 explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
2381
2382 TargetTransformInfo &getTTI(const Function &F);
2383 };
2384
2385 /// Create an analysis pass wrapper around a TTI object.
2386 ///
2387 /// This analysis pass just holds the TTI instance and makes it available to
2388 /// clients.
2389 ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
2390
2391 } // namespace llvm
2392
2393 #endif
2394