1 //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This pass exposes codegen information to IR-level passes. Every
10 /// transformation that uses codegen information is broken into three parts:
11 /// 1. The IR-level analysis pass.
12 /// 2. The IR-level transformation interface which provides the needed
13 /// information.
14 /// 3. Codegen-level implementation which uses target-specific hooks.
15 ///
16 /// This file defines #2, which is the interface that IR-level transformations
17 /// use for querying the codegen.
18 ///
19 //===----------------------------------------------------------------------===//
20
21 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
22 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23
24 #include "llvm/IR/InstrTypes.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/PassManager.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Support/AtomicOrdering.h"
29 #include "llvm/Support/DataTypes.h"
30 #include "llvm/Support/InstructionCost.h"
31 #include <functional>
32
33 namespace llvm {
34
35 namespace Intrinsic {
36 typedef unsigned ID;
37 }
38
39 class AssumptionCache;
40 class BlockFrequencyInfo;
41 class DominatorTree;
42 class BranchInst;
43 class CallBase;
44 class ExtractElementInst;
45 class Function;
46 class GlobalValue;
47 class InstCombiner;
48 class IntrinsicInst;
49 class LoadInst;
50 class LoopAccessInfo;
51 class Loop;
52 class LoopInfo;
53 class ProfileSummaryInfo;
54 class SCEV;
55 class ScalarEvolution;
56 class StoreInst;
57 class SwitchInst;
58 class TargetLibraryInfo;
59 class Type;
60 class User;
61 class Value;
62 struct KnownBits;
63 template <typename T> class Optional;
64
65 /// Information about a load/store intrinsic defined by the target.
66 struct MemIntrinsicInfo {
67 /// This is the pointer that the intrinsic is loading from or storing to.
68 /// If this is non-null, then analysis/optimization passes can assume that
69 /// this intrinsic is functionally equivalent to a load/store from this
70 /// pointer.
71 Value *PtrVal = nullptr;
72
73 // Ordering for atomic operations.
74 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
75
76 // Same Id is set by the target for corresponding load/store intrinsics.
77 unsigned short MatchingId = 0;
78
79 bool ReadMem = false;
80 bool WriteMem = false;
81 bool IsVolatile = false;
82
isUnorderedMemIntrinsicInfo83 bool isUnordered() const {
84 return (Ordering == AtomicOrdering::NotAtomic ||
85 Ordering == AtomicOrdering::Unordered) &&
86 !IsVolatile;
87 }
88 };
89
90 /// Attributes of a target dependent hardware loop.
91 struct HardwareLoopInfo {
92 HardwareLoopInfo() = delete;
HardwareLoopInfoHardwareLoopInfo93 HardwareLoopInfo(Loop *L) : L(L) {}
94 Loop *L = nullptr;
95 BasicBlock *ExitBlock = nullptr;
96 BranchInst *ExitBranch = nullptr;
97 const SCEV *ExitCount = nullptr;
98 const SCEV *TripCount = nullptr;
99 IntegerType *CountType = nullptr;
100 Value *LoopDecrement = nullptr; // Decrement the loop counter by this
101 // value in every iteration.
102 bool IsNestingLegal = false; // Can a hardware loop be a parent to
103 // another hardware loop?
104 bool CounterInReg = false; // Should loop counter be updated in
105 // the loop via a phi?
106 bool PerformEntryTest = false; // Generate the intrinsic which also performs
107 // icmp ne zero on the loop counter value and
108 // produces an i1 to guard the loop entry.
109 bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI,
110 DominatorTree &DT, bool ForceNestedLoop = false,
111 bool ForceHardwareLoopPHI = false);
112 bool canAnalyze(LoopInfo &LI);
113 };
114
115 class IntrinsicCostAttributes {
116 const IntrinsicInst *II = nullptr;
117 Type *RetTy = nullptr;
118 Intrinsic::ID IID;
119 SmallVector<Type *, 4> ParamTys;
120 SmallVector<const Value *, 4> Arguments;
121 FastMathFlags FMF;
122 ElementCount VF = ElementCount::getFixed(1);
123 // If ScalarizationCost is UINT_MAX, the cost of scalarizing the
124 // arguments and the return value will be computed based on types.
125 unsigned ScalarizationCost = std::numeric_limits<unsigned>::max();
126
127 public:
128 IntrinsicCostAttributes(const IntrinsicInst &I);
129
130 IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI);
131
132 IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI,
133 ElementCount Factor);
134
135 IntrinsicCostAttributes(Intrinsic::ID Id, const CallBase &CI,
136 ElementCount Factor, unsigned ScalarCost);
137
138 IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
139 ArrayRef<Type *> Tys, FastMathFlags Flags);
140
141 IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
142 ArrayRef<Type *> Tys, FastMathFlags Flags,
143 unsigned ScalarCost);
144
145 IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
146 ArrayRef<Type *> Tys, FastMathFlags Flags,
147 unsigned ScalarCost,
148 const IntrinsicInst *I);
149
150 IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
151 ArrayRef<Type *> Tys);
152
153 IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
154 ArrayRef<const Value *> Args);
155
getID()156 Intrinsic::ID getID() const { return IID; }
getInst()157 const IntrinsicInst *getInst() const { return II; }
getReturnType()158 Type *getReturnType() const { return RetTy; }
getVectorFactor()159 ElementCount getVectorFactor() const { return VF; }
getFlags()160 FastMathFlags getFlags() const { return FMF; }
getScalarizationCost()161 unsigned getScalarizationCost() const { return ScalarizationCost; }
getArgs()162 const SmallVectorImpl<const Value *> &getArgs() const { return Arguments; }
getArgTypes()163 const SmallVectorImpl<Type *> &getArgTypes() const { return ParamTys; }
164
isTypeBasedOnly()165 bool isTypeBasedOnly() const {
166 return Arguments.empty();
167 }
168
skipScalarizationCost()169 bool skipScalarizationCost() const {
170 return ScalarizationCost != std::numeric_limits<unsigned>::max();
171 }
172 };
173
174 class TargetTransformInfo;
175 typedef TargetTransformInfo TTI;
176
177 /// This pass provides access to the codegen interfaces that are needed
178 /// for IR-level transformations.
179 class TargetTransformInfo {
180 public:
181 /// Construct a TTI object using a type implementing the \c Concept
182 /// API below.
183 ///
184 /// This is used by targets to construct a TTI wrapping their target-specific
185 /// implementation that encodes appropriate costs for their target.
186 template <typename T> TargetTransformInfo(T Impl);
187
188 /// Construct a baseline TTI object using a minimal implementation of
189 /// the \c Concept API below.
190 ///
191 /// The TTI implementation will reflect the information in the DataLayout
192 /// provided if non-null.
193 explicit TargetTransformInfo(const DataLayout &DL);
194
195 // Provide move semantics.
196 TargetTransformInfo(TargetTransformInfo &&Arg);
197 TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
198
199 // We need to define the destructor out-of-line to define our sub-classes
200 // out-of-line.
201 ~TargetTransformInfo();
202
203 /// Handle the invalidation of this information.
204 ///
205 /// When used as a result of \c TargetIRAnalysis this method will be called
206 /// when the function this was computed for changes. When it returns false,
207 /// the information is preserved across those changes.
invalidate(Function &,const PreservedAnalyses &,FunctionAnalysisManager::Invalidator &)208 bool invalidate(Function &, const PreservedAnalyses &,
209 FunctionAnalysisManager::Invalidator &) {
210 // FIXME: We should probably in some way ensure that the subtarget
211 // information for a function hasn't changed.
212 return false;
213 }
214
215 /// \name Generic Target Information
216 /// @{
217
218 /// The kind of cost model.
219 ///
220 /// There are several different cost models that can be customized by the
221 /// target. The normalization of each cost model may be target specific.
222 enum TargetCostKind {
223 TCK_RecipThroughput, ///< Reciprocal throughput.
224 TCK_Latency, ///< The latency of instruction.
225 TCK_CodeSize, ///< Instruction code size.
226 TCK_SizeAndLatency ///< The weighted sum of size and latency.
227 };
228
229 /// Query the cost of a specified instruction.
230 ///
231 /// Clients should use this interface to query the cost of an existing
232 /// instruction. The instruction must have a valid parent (basic block).
233 ///
234 /// Note, this method does not cache the cost calculation and it
235 /// can be expensive in some cases.
getInstructionCost(const Instruction * I,enum TargetCostKind kind)236 InstructionCost getInstructionCost(const Instruction *I,
237 enum TargetCostKind kind) const {
238 InstructionCost Cost;
239 switch (kind) {
240 case TCK_RecipThroughput:
241 Cost = getInstructionThroughput(I);
242 break;
243 case TCK_Latency:
244 Cost = getInstructionLatency(I);
245 break;
246 case TCK_CodeSize:
247 case TCK_SizeAndLatency:
248 Cost = getUserCost(I, kind);
249 break;
250 }
251 if (Cost == -1)
252 Cost.setInvalid();
253 return Cost;
254 }
255
256 /// Underlying constants for 'cost' values in this interface.
257 ///
258 /// Many APIs in this interface return a cost. This enum defines the
259 /// fundamental values that should be used to interpret (and produce) those
260 /// costs. The costs are returned as an int rather than a member of this
261 /// enumeration because it is expected that the cost of one IR instruction
262 /// may have a multiplicative factor to it or otherwise won't fit directly
263 /// into the enum. Moreover, it is common to sum or average costs which works
264 /// better as simple integral values. Thus this enum only provides constants.
265 /// Also note that the returned costs are signed integers to make it natural
266 /// to add, subtract, and test with zero (a common boundary condition). It is
267 /// not expected that 2^32 is a realistic cost to be modeling at any point.
268 ///
269 /// Note that these costs should usually reflect the intersection of code-size
270 /// cost and execution cost. A free instruction is typically one that folds
271 /// into another instruction. For example, reg-to-reg moves can often be
272 /// skipped by renaming the registers in the CPU, but they still are encoded
273 /// and thus wouldn't be considered 'free' here.
274 enum TargetCostConstants {
275 TCC_Free = 0, ///< Expected to fold away in lowering.
276 TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
277 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
278 };
279
280 /// Estimate the cost of a GEP operation when lowered.
281 int getGEPCost(Type *PointeeType, const Value *Ptr,
282 ArrayRef<const Value *> Operands,
283 TargetCostKind CostKind = TCK_SizeAndLatency) const;
284
285 /// \returns A value by which our inlining threshold should be multiplied.
286 /// This is primarily used to bump up the inlining threshold wholesale on
287 /// targets where calls are unusually expensive.
288 ///
289 /// TODO: This is a rather blunt instrument. Perhaps altering the costs of
290 /// individual classes of instructions would be better.
291 unsigned getInliningThresholdMultiplier() const;
292
293 /// \returns A value to be added to the inlining threshold.
294 unsigned adjustInliningThreshold(const CallBase *CB) const;
295
296 /// \returns Vector bonus in percent.
297 ///
298 /// Vector bonuses: We want to more aggressively inline vector-dense kernels
299 /// and apply this bonus based on the percentage of vector instructions. A
300 /// bonus is applied if the vector instructions exceed 50% and half that
301 /// amount is applied if it exceeds 10%. Note that these bonuses are some what
302 /// arbitrary and evolved over time by accident as much as because they are
303 /// principled bonuses.
304 /// FIXME: It would be nice to base the bonus values on something more
305 /// scientific. A target may has no bonus on vector instructions.
306 int getInlinerVectorBonusPercent() const;
307
308 /// \return the expected cost of a memcpy, which could e.g. depend on the
309 /// source/destination type and alignment and the number of bytes copied.
310 int getMemcpyCost(const Instruction *I) const;
311
312 /// \return The estimated number of case clusters when lowering \p 'SI'.
313 /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
314 /// table.
315 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
316 unsigned &JTSize,
317 ProfileSummaryInfo *PSI,
318 BlockFrequencyInfo *BFI) const;
319
320 /// Estimate the cost of a given IR user when lowered.
321 ///
322 /// This can estimate the cost of either a ConstantExpr or Instruction when
323 /// lowered.
324 ///
325 /// \p Operands is a list of operands which can be a result of transformations
326 /// of the current operands. The number of the operands on the list must equal
327 /// to the number of the current operands the IR user has. Their order on the
328 /// list must be the same as the order of the current operands the IR user
329 /// has.
330 ///
331 /// The returned cost is defined in terms of \c TargetCostConstants, see its
332 /// comments for a detailed explanation of the cost values.
333 int getUserCost(const User *U, ArrayRef<const Value *> Operands,
334 TargetCostKind CostKind) const;
335
336 /// This is a helper function which calls the two-argument getUserCost
337 /// with \p Operands which are the current operands U has.
getUserCost(const User * U,TargetCostKind CostKind)338 int getUserCost(const User *U, TargetCostKind CostKind) const {
339 SmallVector<const Value *, 4> Operands(U->operand_values());
340 return getUserCost(U, Operands, CostKind);
341 }
342
343 /// Return true if branch divergence exists.
344 ///
345 /// Branch divergence has a significantly negative impact on GPU performance
346 /// when threads in the same wavefront take different paths due to conditional
347 /// branches.
348 bool hasBranchDivergence() const;
349
350 /// Return true if the target prefers to use GPU divergence analysis to
351 /// replace the legacy version.
352 bool useGPUDivergenceAnalysis() const;
353
354 /// Returns whether V is a source of divergence.
355 ///
356 /// This function provides the target-dependent information for
357 /// the target-independent LegacyDivergenceAnalysis. LegacyDivergenceAnalysis
358 /// first builds the dependency graph, and then runs the reachability
359 /// algorithm starting with the sources of divergence.
360 bool isSourceOfDivergence(const Value *V) const;
361
362 // Returns true for the target specific
363 // set of operations which produce uniform result
364 // even taking non-uniform arguments
365 bool isAlwaysUniform(const Value *V) const;
366
367 /// Returns the address space ID for a target's 'flat' address space. Note
368 /// this is not necessarily the same as addrspace(0), which LLVM sometimes
369 /// refers to as the generic address space. The flat address space is a
370 /// generic address space that can be used access multiple segments of memory
371 /// with different address spaces. Access of a memory location through a
372 /// pointer with this address space is expected to be legal but slower
373 /// compared to the same memory location accessed through a pointer with a
374 /// different address space.
375 //
376 /// This is for targets with different pointer representations which can
377 /// be converted with the addrspacecast instruction. If a pointer is converted
378 /// to this address space, optimizations should attempt to replace the access
379 /// with the source address space.
380 ///
381 /// \returns ~0u if the target does not have such a flat address space to
382 /// optimize away.
383 unsigned getFlatAddressSpace() const;
384
385 /// Return any intrinsic address operand indexes which may be rewritten if
386 /// they use a flat address space pointer.
387 ///
388 /// \returns true if the intrinsic was handled.
389 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
390 Intrinsic::ID IID) const;
391
392 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
393
394 unsigned getAssumedAddrSpace(const Value *V) const;
395
396 /// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
397 /// NewV, which has a different address space. This should happen for every
398 /// operand index that collectFlatAddressOperands returned for the intrinsic.
399 /// \returns nullptr if the intrinsic was not handled. Otherwise, returns the
400 /// new value (which may be the original \p II with modified operands).
401 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
402 Value *NewV) const;
403
404 /// Test whether calls to a function lower to actual program function
405 /// calls.
406 ///
407 /// The idea is to test whether the program is likely to require a 'call'
408 /// instruction or equivalent in order to call the given function.
409 ///
410 /// FIXME: It's not clear that this is a good or useful query API. Client's
411 /// should probably move to simpler cost metrics using the above.
412 /// Alternatively, we could split the cost interface into distinct code-size
413 /// and execution-speed costs. This would allow modelling the core of this
414 /// query more accurately as a call is a single small instruction, but
415 /// incurs significant execution cost.
416 bool isLoweredToCall(const Function *F) const;
417
418 struct LSRCost {
419 /// TODO: Some of these could be merged. Also, a lexical ordering
420 /// isn't always optimal.
421 unsigned Insns;
422 unsigned NumRegs;
423 unsigned AddRecCost;
424 unsigned NumIVMuls;
425 unsigned NumBaseAdds;
426 unsigned ImmCost;
427 unsigned SetupCost;
428 unsigned ScaleCost;
429 };
430
431 /// Parameters that control the generic loop unrolling transformation.
432 struct UnrollingPreferences {
433 /// The cost threshold for the unrolled loop. Should be relative to the
434 /// getUserCost values returned by this API, and the expectation is that
435 /// the unrolled loop's instructions when run through that interface should
436 /// not exceed this cost. However, this is only an estimate. Also, specific
437 /// loops may be unrolled even with a cost above this threshold if deemed
438 /// profitable. Set this to UINT_MAX to disable the loop body cost
439 /// restriction.
440 unsigned Threshold;
441 /// If complete unrolling will reduce the cost of the loop, we will boost
442 /// the Threshold by a certain percent to allow more aggressive complete
443 /// unrolling. This value provides the maximum boost percentage that we
444 /// can apply to Threshold (The value should be no less than 100).
445 /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
446 /// MaxPercentThresholdBoost / 100)
447 /// E.g. if complete unrolling reduces the loop execution time by 50%
448 /// then we boost the threshold by the factor of 2x. If unrolling is not
449 /// expected to reduce the running time, then we do not increase the
450 /// threshold.
451 unsigned MaxPercentThresholdBoost;
452 /// The cost threshold for the unrolled loop when optimizing for size (set
453 /// to UINT_MAX to disable).
454 unsigned OptSizeThreshold;
455 /// The cost threshold for the unrolled loop, like Threshold, but used
456 /// for partial/runtime unrolling (set to UINT_MAX to disable).
457 unsigned PartialThreshold;
458 /// The cost threshold for the unrolled loop when optimizing for size, like
459 /// OptSizeThreshold, but used for partial/runtime unrolling (set to
460 /// UINT_MAX to disable).
461 unsigned PartialOptSizeThreshold;
462 /// A forced unrolling factor (the number of concatenated bodies of the
463 /// original loop in the unrolled loop body). When set to 0, the unrolling
464 /// transformation will select an unrolling factor based on the current cost
465 /// threshold and other factors.
466 unsigned Count;
467 /// Default unroll count for loops with run-time trip count.
468 unsigned DefaultUnrollRuntimeCount;
469 // Set the maximum unrolling factor. The unrolling factor may be selected
470 // using the appropriate cost threshold, but may not exceed this number
471 // (set to UINT_MAX to disable). This does not apply in cases where the
472 // loop is being fully unrolled.
473 unsigned MaxCount;
474 /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
475 /// applies even if full unrolling is selected. This allows a target to fall
476 /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
477 unsigned FullUnrollMaxCount;
478 // Represents number of instructions optimized when "back edge"
479 // becomes "fall through" in unrolled loop.
480 // For now we count a conditional branch on a backedge and a comparison
481 // feeding it.
482 unsigned BEInsns;
483 /// Allow partial unrolling (unrolling of loops to expand the size of the
484 /// loop body, not only to eliminate small constant-trip-count loops).
485 bool Partial;
486 /// Allow runtime unrolling (unrolling of loops to expand the size of the
487 /// loop body even when the number of loop iterations is not known at
488 /// compile time).
489 bool Runtime;
490 /// Allow generation of a loop remainder (extra iterations after unroll).
491 bool AllowRemainder;
492 /// Allow emitting expensive instructions (such as divisions) when computing
493 /// the trip count of a loop for runtime unrolling.
494 bool AllowExpensiveTripCount;
495 /// Apply loop unroll on any kind of loop
496 /// (mainly to loops that fail runtime unrolling).
497 bool Force;
498 /// Allow using trip count upper bound to unroll loops.
499 bool UpperBound;
500 /// Allow unrolling of all the iterations of the runtime loop remainder.
501 bool UnrollRemainder;
502 /// Allow unroll and jam. Used to enable unroll and jam for the target.
503 bool UnrollAndJam;
504 /// Threshold for unroll and jam, for inner loop size. The 'Threshold'
505 /// value above is used during unroll and jam for the outer loop size.
506 /// This value is used in the same manner to limit the size of the inner
507 /// loop.
508 unsigned UnrollAndJamInnerLoopThreshold;
509 /// Don't allow loop unrolling to simulate more than this number of
510 /// iterations when checking full unroll profitability
511 unsigned MaxIterationsCountToAnalyze;
512 };
513
514 /// Get target-customized preferences for the generic loop unrolling
515 /// transformation. The caller will initialize UP with the current
516 /// target-independent defaults.
517 void getUnrollingPreferences(Loop *L, ScalarEvolution &,
518 UnrollingPreferences &UP) const;
519
520 /// Query the target whether it would be profitable to convert the given loop
521 /// into a hardware loop.
522 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
523 AssumptionCache &AC, TargetLibraryInfo *LibInfo,
524 HardwareLoopInfo &HWLoopInfo) const;
525
526 /// Query the target whether it would be prefered to create a predicated
527 /// vector loop, which can avoid the need to emit a scalar epilogue loop.
528 bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
529 AssumptionCache &AC, TargetLibraryInfo *TLI,
530 DominatorTree *DT,
531 const LoopAccessInfo *LAI) const;
532
533 /// Query the target whether lowering of the llvm.get.active.lane.mask
534 /// intrinsic is supported.
535 bool emitGetActiveLaneMask() const;
536
537 // Parameters that control the loop peeling transformation
538 struct PeelingPreferences {
539 /// A forced peeling factor (the number of bodied of the original loop
540 /// that should be peeled off before the loop body). When set to 0, the
541 /// a peeling factor based on profile information and other factors.
542 unsigned PeelCount;
543 /// Allow peeling off loop iterations.
544 bool AllowPeeling;
545 /// Allow peeling off loop iterations for loop nests.
546 bool AllowLoopNestsPeeling;
547 /// Allow peeling basing on profile. Uses to enable peeling off all
548 /// iterations basing on provided profile.
549 /// If the value is true the peeling cost model can decide to peel only
550 /// some iterations and in this case it will set this to false.
551 bool PeelProfiledIterations;
552 };
553
554 /// Get target-customized preferences for the generic loop peeling
555 /// transformation. The caller will initialize \p PP with the current
556 /// target-independent defaults with information from \p L and \p SE.
557 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
558 PeelingPreferences &PP) const;
559
560 /// Targets can implement their own combinations for target-specific
561 /// intrinsics. This function will be called from the InstCombine pass every
562 /// time a target-specific intrinsic is encountered.
563 ///
564 /// \returns None to not do anything target specific or a value that will be
565 /// returned from the InstCombiner. It is possible to return null and stop
566 /// further processing of the intrinsic by returning nullptr.
567 Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
568 IntrinsicInst &II) const;
569 /// Can be used to implement target-specific instruction combining.
570 /// \see instCombineIntrinsic
571 Optional<Value *>
572 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
573 APInt DemandedMask, KnownBits &Known,
574 bool &KnownBitsComputed) const;
575 /// Can be used to implement target-specific instruction combining.
576 /// \see instCombineIntrinsic
577 Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
578 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
579 APInt &UndefElts2, APInt &UndefElts3,
580 std::function<void(Instruction *, unsigned, APInt, APInt &)>
581 SimplifyAndSetOp) const;
582 /// @}
583
584 /// \name Scalar Target Information
585 /// @{
586
587 /// Flags indicating the kind of support for population count.
588 ///
589 /// Compared to the SW implementation, HW support is supposed to
590 /// significantly boost the performance when the population is dense, and it
591 /// may or may not degrade performance if the population is sparse. A HW
592 /// support is considered as "Fast" if it can outperform, or is on a par
593 /// with, SW implementation when the population is sparse; otherwise, it is
594 /// considered as "Slow".
595 enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
596
597 /// Return true if the specified immediate is legal add immediate, that
598 /// is the target has add instructions which can add a register with the
599 /// immediate without having to materialize the immediate into a register.
600 bool isLegalAddImmediate(int64_t Imm) const;
601
602 /// Return true if the specified immediate is legal icmp immediate,
603 /// that is the target has icmp instructions which can compare a register
604 /// against the immediate without having to materialize the immediate into a
605 /// register.
606 bool isLegalICmpImmediate(int64_t Imm) const;
607
608 /// Return true if the addressing mode represented by AM is legal for
609 /// this target, for a load/store of the specified type.
610 /// The type may be VoidTy, in which case only return true if the addressing
611 /// mode is legal for a load/store of any legal type.
612 /// If target returns true in LSRWithInstrQueries(), I may be valid.
613 /// TODO: Handle pre/postinc as well.
614 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
615 bool HasBaseReg, int64_t Scale,
616 unsigned AddrSpace = 0,
617 Instruction *I = nullptr) const;
618
619 /// Return true if LSR cost of C1 is lower than C1.
620 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
621 TargetTransformInfo::LSRCost &C2) const;
622
623 /// Return true if LSR major cost is number of registers. Targets which
624 /// implement their own isLSRCostLess and unset number of registers as major
625 /// cost should return false, otherwise return true.
626 bool isNumRegsMajorCostOfLSR() const;
627
628 /// \returns true if LSR should not optimize a chain that includes \p I.
629 bool isProfitableLSRChainElement(Instruction *I) const;
630
631 /// Return true if the target can fuse a compare and branch.
632 /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost
633 /// calculation for the instructions in a loop.
634 bool canMacroFuseCmp() const;
635
636 /// Return true if the target can save a compare for loop count, for example
637 /// hardware loop saves a compare.
638 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
639 DominatorTree *DT, AssumptionCache *AC,
640 TargetLibraryInfo *LibInfo) const;
641
642 /// \return True is LSR should make efforts to create/preserve post-inc
643 /// addressing mode expressions.
644 bool shouldFavorPostInc() const;
645
646 /// Return true if LSR should make efforts to generate indexed addressing
647 /// modes that operate across loop iterations.
648 bool shouldFavorBackedgeIndex(const Loop *L) const;
649
650 /// Return true if the target supports masked store.
651 bool isLegalMaskedStore(Type *DataType, Align Alignment) const;
652 /// Return true if the target supports masked load.
653 bool isLegalMaskedLoad(Type *DataType, Align Alignment) const;
654
655 /// Return true if the target supports nontemporal store.
656 bool isLegalNTStore(Type *DataType, Align Alignment) const;
657 /// Return true if the target supports nontemporal load.
658 bool isLegalNTLoad(Type *DataType, Align Alignment) const;
659
660 /// Return true if the target supports masked scatter.
661 bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
662 /// Return true if the target supports masked gather.
663 bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
664
665 /// Return true if the target supports masked compress store.
666 bool isLegalMaskedCompressStore(Type *DataType) const;
667 /// Return true if the target supports masked expand load.
668 bool isLegalMaskedExpandLoad(Type *DataType) const;
669
670 /// Return true if the target has a unified operation to calculate division
671 /// and remainder. If so, the additional implicit multiplication and
672 /// subtraction required to calculate a remainder from division are free. This
673 /// can enable more aggressive transformations for division and remainder than
674 /// would typically be allowed using throughput or size cost models.
675 bool hasDivRemOp(Type *DataType, bool IsSigned) const;
676
677 /// Return true if the given instruction (assumed to be a memory access
678 /// instruction) has a volatile variant. If that's the case then we can avoid
679 /// addrspacecast to generic AS for volatile loads/stores. Default
680 /// implementation returns false, which prevents address space inference for
681 /// volatile loads/stores.
682 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;
683
684 /// Return true if target doesn't mind addresses in vectors.
685 bool prefersVectorizedAddressing() const;
686
687 /// Return the cost of the scaling factor used in the addressing
688 /// mode represented by AM for this target, for a load/store
689 /// of the specified type.
690 /// If the AM is supported, the return value must be >= 0.
691 /// If the AM is not supported, it returns a negative value.
692 /// TODO: Handle pre/postinc as well.
693 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
694 bool HasBaseReg, int64_t Scale,
695 unsigned AddrSpace = 0) const;
696
697 /// Return true if the loop strength reduce pass should make
698 /// Instruction* based TTI queries to isLegalAddressingMode(). This is
699 /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
700 /// immediate offset and no index register.
701 bool LSRWithInstrQueries() const;
702
703 /// Return true if it's free to truncate a value of type Ty1 to type
704 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
705 /// by referencing its sub-register AX.
706 bool isTruncateFree(Type *Ty1, Type *Ty2) const;
707
708 /// Return true if it is profitable to hoist instruction in the
709 /// then/else to before if.
710 bool isProfitableToHoist(Instruction *I) const;
711
712 bool useAA() const;
713
714 /// Return true if this type is legal.
715 bool isTypeLegal(Type *Ty) const;
716
717 /// Returns the estimated number of registers required to represent \p Ty.
718 unsigned getRegUsageForType(Type *Ty) const;
719
720 /// Return true if switches should be turned into lookup tables for the
721 /// target.
722 bool shouldBuildLookupTables() const;
723
724 /// Return true if switches should be turned into lookup tables
725 /// containing this constant value for the target.
726 bool shouldBuildLookupTablesForConstant(Constant *C) const;
727
728 /// Return true if the input function which is cold at all call sites,
729 /// should use coldcc calling convention.
730 bool useColdCCForColdCall(Function &F) const;
731
732 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
733 /// are set if the demanded result elements need to be inserted and/or
734 /// extracted from vectors.
735 unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
736 bool Insert, bool Extract) const;
737
738 /// Estimate the overhead of scalarizing an instructions unique
739 /// non-constant operands. The types of the arguments are ordinarily
740 /// scalar, in which case the costs are multiplied with VF.
741 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
742 unsigned VF) const;
743
744 /// If target has efficient vector element load/store instructions, it can
745 /// return true here so that insertion/extraction costs are not added to
746 /// the scalarization cost of a load/store.
747 bool supportsEfficientVectorElementLoadStore() const;
748
749 /// Don't restrict interleaved unrolling to small loops.
750 bool enableAggressiveInterleaving(bool LoopHasReductions) const;
751
752 /// Returns options for expansion of memcmp. IsZeroCmp is
753 // true if this is the expansion of memcmp(p1, p2, s) == 0.
754 struct MemCmpExpansionOptions {
755 // Return true if memcmp expansion is enabled.
756 operator bool() const { return MaxNumLoads > 0; }
757
758 // Maximum number of load operations.
759 unsigned MaxNumLoads = 0;
760
761 // The list of available load sizes (in bytes), sorted in decreasing order.
762 SmallVector<unsigned, 8> LoadSizes;
763
764 // For memcmp expansion when the memcmp result is only compared equal or
765 // not-equal to 0, allow up to this number of load pairs per block. As an
766 // example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
767 // a0 = load2bytes &a[0]
768 // b0 = load2bytes &b[0]
769 // a2 = load1byte &a[2]
770 // b2 = load1byte &b[2]
771 // r = cmp eq (a0 ^ b0 | a2 ^ b2), 0
772 unsigned NumLoadsPerBlock = 1;
773
774 // Set to true to allow overlapping loads. For example, 7-byte compares can
775 // be done with two 4-byte compares instead of 4+2+1-byte compares. This
776 // requires all loads in LoadSizes to be doable in an unaligned way.
777 bool AllowOverlappingLoads = false;
778 };
779 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
780 bool IsZeroCmp) const;
781
782 /// Enable matching of interleaved access groups.
783 bool enableInterleavedAccessVectorization() const;
784
785 /// Enable matching of interleaved access groups that contain predicated
786 /// accesses or gaps and therefore vectorized using masked
787 /// vector loads/stores.
788 bool enableMaskedInterleavedAccessVectorization() const;
789
790 /// Indicate that it is potentially unsafe to automatically vectorize
791 /// floating-point operations because the semantics of vector and scalar
792 /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
793 /// does not support IEEE-754 denormal numbers, while depending on the
794 /// platform, scalar floating-point math does.
795 /// This applies to floating-point math operations and calls, not memory
796 /// operations, shuffles, or casts.
797 bool isFPVectorizationPotentiallyUnsafe() const;
798
799 /// Determine if the target supports unaligned memory accesses.
800 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
801 unsigned AddressSpace = 0,
802 unsigned Alignment = 1,
803 bool *Fast = nullptr) const;
804
805 /// Return hardware support for population count.
806 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
807
808 /// Return true if the hardware has a fast square-root instruction.
809 bool haveFastSqrt(Type *Ty) const;
810
811 /// Return true if it is faster to check if a floating-point value is NaN
812 /// (or not-NaN) versus a comparison against a constant FP zero value.
813 /// Targets should override this if materializing a 0.0 for comparison is
814 /// generally as cheap as checking for ordered/unordered.
815 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;
816
817 /// Return the expected cost of supporting the floating point operation
818 /// of the specified type.
819 int getFPOpCost(Type *Ty) const;
820
821 /// Return the expected cost of materializing for the given integer
822 /// immediate of the specified type.
823 int getIntImmCost(const APInt &Imm, Type *Ty, TargetCostKind CostKind) const;
824
825 /// Return the expected cost of materialization for the given integer
826 /// immediate of the specified type for a given instruction. The cost can be
827 /// zero if the immediate can be folded into the specified instruction.
828 int getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty,
829 TargetCostKind CostKind,
830 Instruction *Inst = nullptr) const;
831 int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
832 Type *Ty, TargetCostKind CostKind) const;
833
834 /// Return the expected cost for the given integer when optimising
835 /// for size. This is different than the other integer immediate cost
836 /// functions in that it is subtarget agnostic. This is useful when you e.g.
837 /// target one ISA such as Aarch32 but smaller encodings could be possible
838 /// with another such as Thumb. This return value is used as a penalty when
839 /// the total costs for a constant is calculated (the bigger the cost, the
840 /// more beneficial constant hoisting is).
841 int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
842 Type *Ty) const;
843 /// @}
844
845 /// \name Vector Target Information
846 /// @{
847
848 /// The various kinds of shuffle patterns for vector queries.
849 enum ShuffleKind {
850 SK_Broadcast, ///< Broadcast element 0 to all other elements.
851 SK_Reverse, ///< Reverse the order of the vector.
852 SK_Select, ///< Selects elements from the corresponding lane of
853 ///< either source operand. This is equivalent to a
854 ///< vector select with a constant condition operand.
855 SK_Transpose, ///< Transpose two vectors.
856 SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
857 SK_ExtractSubvector, ///< ExtractSubvector Index indicates start offset.
858 SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one
859 ///< with any shuffle mask.
860 SK_PermuteSingleSrc ///< Shuffle elements of single source vector with any
861 ///< shuffle mask.
862 };
863
864 /// Kind of the reduction data.
865 enum ReductionKind {
866 RK_None, /// Not a reduction.
867 RK_Arithmetic, /// Binary reduction data.
868 RK_MinMax, /// Min/max reduction data.
869 RK_UnsignedMinMax, /// Unsigned min/max reduction data.
870 };
871
872 /// Contains opcode + LHS/RHS parts of the reduction operations.
873 struct ReductionData {
874 ReductionData() = delete;
ReductionDataReductionData875 ReductionData(ReductionKind Kind, unsigned Opcode, Value *LHS, Value *RHS)
876 : Opcode(Opcode), LHS(LHS), RHS(RHS), Kind(Kind) {
877 assert(Kind != RK_None && "expected binary or min/max reduction only.");
878 }
879 unsigned Opcode = 0;
880 Value *LHS = nullptr;
881 Value *RHS = nullptr;
882 ReductionKind Kind = RK_None;
hasSameDataReductionData883 bool hasSameData(ReductionData &RD) const {
884 return Kind == RD.Kind && Opcode == RD.Opcode;
885 }
886 };
887
888 static ReductionKind matchPairwiseReduction(
889 const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty);
890
891 static ReductionKind matchVectorSplittingReduction(
892 const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty);
893
894 static ReductionKind matchVectorReduction(const ExtractElementInst *ReduxRoot,
895 unsigned &Opcode, VectorType *&Ty,
896 bool &IsPairwise);
897
898 /// Additional information about an operand's possible values.
899 enum OperandValueKind {
900 OK_AnyValue, // Operand can have any value.
901 OK_UniformValue, // Operand is uniform (splat of a value).
902 OK_UniformConstantValue, // Operand is uniform constant.
903 OK_NonUniformConstantValue // Operand is a non uniform constant value.
904 };
905
906 /// Additional properties of an operand's values.
907 enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
908
909 /// \return the number of registers in the target-provided register class.
910 unsigned getNumberOfRegisters(unsigned ClassID) const;
911
912 /// \return the target-provided register class ID for the provided type,
913 /// accounting for type promotion and other type-legalization techniques that
914 /// the target might apply. However, it specifically does not account for the
915 /// scalarization or splitting of vector types. Should a vector type require
916 /// scalarization or splitting into multiple underlying vector registers, that
917 /// type should be mapped to a register class containing no registers.
918 /// Specifically, this is designed to provide a simple, high-level view of the
919 /// register allocation later performed by the backend. These register classes
920 /// don't necessarily map onto the register classes used by the backend.
921 /// FIXME: It's not currently possible to determine how many registers
922 /// are used by the provided type.
923 unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
924
925 /// \return the target-provided register class name
926 const char *getRegisterClassName(unsigned ClassID) const;
927
928 /// \return The width of the largest scalar or vector register type.
929 unsigned getRegisterBitWidth(bool Vector) const;
930
931 /// \return The width of the smallest vector register type.
932 unsigned getMinVectorRegisterBitWidth() const;
933
934 /// \return The maximum value of vscale if the target specifies an
935 /// architectural maximum vector length, and None otherwise.
936 Optional<unsigned> getMaxVScale() const;
937
938 /// \return True if the vectorization factor should be chosen to
939 /// make the vector of the smallest element type match the size of a
940 /// vector register. For wider element types, this could result in
941 /// creating vectors that span multiple vector registers.
942 /// If false, the vectorization factor will be chosen based on the
943 /// size of the widest element type.
944 bool shouldMaximizeVectorBandwidth(bool OptSize) const;
945
946 /// \return The minimum vectorization factor for types of given element
947 /// bit width, or 0 if there is no minimum VF. The returned value only
948 /// applies when shouldMaximizeVectorBandwidth returns true.
949 unsigned getMinimumVF(unsigned ElemWidth) const;
950
951 /// \return The maximum vectorization factor for types of given element
952 /// bit width and opcode, or 0 if there is no maximum VF.
953 /// Currently only used by the SLP vectorizer.
954 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;
955
956 /// \return True if it should be considered for address type promotion.
957 /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
958 /// profitable without finding other extensions fed by the same input.
959 bool shouldConsiderAddressTypePromotion(
960 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
961
962 /// \return The size of a cache line in bytes.
963 unsigned getCacheLineSize() const;
964
965 /// The possible cache levels
966 enum class CacheLevel {
967 L1D, // The L1 data cache
968 L2D, // The L2 data cache
969
970 // We currently do not model L3 caches, as their sizes differ widely between
971 // microarchitectures. Also, we currently do not have a use for L3 cache
972 // size modeling yet.
973 };
974
975 /// \return The size of the cache level in bytes, if available.
976 Optional<unsigned> getCacheSize(CacheLevel Level) const;
977
978 /// \return The associativity of the cache level, if available.
979 Optional<unsigned> getCacheAssociativity(CacheLevel Level) const;
980
981 /// \return How much before a load we should place the prefetch
982 /// instruction. This is currently measured in number of
983 /// instructions.
984 unsigned getPrefetchDistance() const;
985
986 /// Some HW prefetchers can handle accesses up to a certain constant stride.
987 /// Sometimes prefetching is beneficial even below the HW prefetcher limit,
988 /// and the arguments provided are meant to serve as a basis for deciding this
989 /// for a particular loop.
990 ///
991 /// \param NumMemAccesses Number of memory accesses in the loop.
992 /// \param NumStridedMemAccesses Number of the memory accesses that
993 /// ScalarEvolution could find a known stride
994 /// for.
995 /// \param NumPrefetches Number of software prefetches that will be
996 /// emitted as determined by the addresses
997 /// involved and the cache line size.
998 /// \param HasCall True if the loop contains a call.
999 ///
1000 /// \return This is the minimum stride in bytes where it makes sense to start
1001 /// adding SW prefetches. The default is 1, i.e. prefetch with any
1002 /// stride.
1003 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
1004 unsigned NumStridedMemAccesses,
1005 unsigned NumPrefetches, bool HasCall) const;
1006
1007 /// \return The maximum number of iterations to prefetch ahead. If
1008 /// the required number of iterations is more than this number, no
1009 /// prefetching is performed.
1010 unsigned getMaxPrefetchIterationsAhead() const;
1011
1012 /// \return True if prefetching should also be done for writes.
1013 bool enableWritePrefetching() const;
1014
1015 /// \return The maximum interleave factor that any transform should try to
1016 /// perform for this target. This number depends on the level of parallelism
1017 /// and the number of execution units in the CPU.
1018 unsigned getMaxInterleaveFactor(unsigned VF) const;
1019
1020 /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
1021 static OperandValueKind getOperandInfo(const Value *V,
1022 OperandValueProperties &OpProps);
1023
1024 /// This is an approximation of reciprocal throughput of a math/logic op.
1025 /// A higher cost indicates less expected throughput.
1026 /// From Agner Fog's guides, reciprocal throughput is "the average number of
1027 /// clock cycles per instruction when the instructions are not part of a
1028 /// limiting dependency chain."
1029 /// Therefore, costs should be scaled to account for multiple execution units
1030 /// on the target that can process this type of instruction. For example, if
1031 /// there are 5 scalar integer units and 2 vector integer units that can
1032 /// calculate an 'add' in a single cycle, this model should indicate that the
1033 /// cost of the vector add instruction is 2.5 times the cost of the scalar
1034 /// add instruction.
1035 /// \p Args is an optional argument which holds the instruction operands
1036 /// values so the TTI can analyze those values searching for special
1037 /// cases or optimizations based on those values.
1038 /// \p CxtI is the optional original context instruction, if one exists, to
1039 /// provide even more information.
1040 int getArithmeticInstrCost(
1041 unsigned Opcode, Type *Ty,
1042 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1043 OperandValueKind Opd1Info = OK_AnyValue,
1044 OperandValueKind Opd2Info = OK_AnyValue,
1045 OperandValueProperties Opd1PropInfo = OP_None,
1046 OperandValueProperties Opd2PropInfo = OP_None,
1047 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
1048 const Instruction *CxtI = nullptr) const;
1049
1050 /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
1051 /// The index and subtype parameters are used by the subvector insertion and
1052 /// extraction shuffle kinds to show the insert/extract point and the type of
1053 /// the subvector being inserted/extracted.
1054 /// NOTE: For subvector extractions Tp represents the source type.
1055 int getShuffleCost(ShuffleKind Kind, VectorType *Tp, int Index = 0,
1056 VectorType *SubTp = nullptr) const;
1057
1058 /// Represents a hint about the context in which a cast is used.
1059 ///
1060 /// For zext/sext, the context of the cast is the operand, which must be a
1061 /// load of some kind. For trunc, the context is of the cast is the single
1062 /// user of the instruction, which must be a store of some kind.
1063 ///
1064 /// This enum allows the vectorizer to give getCastInstrCost an idea of the
1065 /// type of cast it's dealing with, as not every cast is equal. For instance,
1066 /// the zext of a load may be free, but the zext of an interleaving load can
1067 //// be (very) expensive!
1068 ///
1069 /// See \c getCastContextHint to compute a CastContextHint from a cast
1070 /// Instruction*. Callers can use it if they don't need to override the
1071 /// context and just want it to be calculated from the instruction.
1072 ///
1073 /// FIXME: This handles the types of load/store that the vectorizer can
1074 /// produce, which are the cases where the context instruction is most
1075 /// likely to be incorrect. There are other situations where that can happen
1076 /// too, which might be handled here but in the long run a more general
1077 /// solution of costing multiple instructions at the same times may be better.
1078 enum class CastContextHint : uint8_t {
1079 None, ///< The cast is not used with a load/store of any kind.
1080 Normal, ///< The cast is used with a normal load/store.
1081 Masked, ///< The cast is used with a masked load/store.
1082 GatherScatter, ///< The cast is used with a gather/scatter.
1083 Interleave, ///< The cast is used with an interleaved load/store.
1084 Reversed, ///< The cast is used with a reversed load/store.
1085 };
1086
1087 /// Calculates a CastContextHint from \p I.
1088 /// This should be used by callers of getCastInstrCost if they wish to
1089 /// determine the context from some instruction.
1090 /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr,
1091 /// or if it's another type of cast.
1092 static CastContextHint getCastContextHint(const Instruction *I);
1093
1094 /// \return The expected cost of cast instructions, such as bitcast, trunc,
1095 /// zext, etc. If there is an existing instruction that holds Opcode, it
1096 /// may be passed in the 'I' parameter.
1097 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1098 TTI::CastContextHint CCH,
1099 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
1100 const Instruction *I = nullptr) const;
1101
1102 /// \return The expected cost of a sign- or zero-extended vector extract. Use
1103 /// -1 to indicate that there is no information about the index value.
1104 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1105 unsigned Index = -1) const;
1106
1107 /// \return The expected cost of control-flow related instructions such as
1108 /// Phi, Ret, Br.
1109 int getCFInstrCost(unsigned Opcode,
1110 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const;
1111
1112 /// \returns The expected cost of compare and select instructions. If there
1113 /// is an existing instruction that holds Opcode, it may be passed in the
1114 /// 'I' parameter. The \p VecPred parameter can be used to indicate the select
1115 /// is using a compare with the specified predicate as condition. When vector
1116 /// types are passed, \p VecPred must be used for all lanes.
1117 int getCmpSelInstrCost(
1118 unsigned Opcode, Type *ValTy, Type *CondTy = nullptr,
1119 CmpInst::Predicate VecPred = CmpInst::BAD_ICMP_PREDICATE,
1120 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1121 const Instruction *I = nullptr) const;
1122
1123 /// \return The expected cost of vector Insert and Extract.
1124 /// Use -1 to indicate that there is no information on the index value.
1125 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index = -1) const;
1126
1127 /// \return The cost of Load and Store instructions.
1128 int getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1129 unsigned AddressSpace,
1130 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1131 const Instruction *I = nullptr) const;
1132
1133 /// \return The cost of masked Load and Store instructions.
1134 int getMaskedMemoryOpCost(
1135 unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
1136 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1137
1138 /// \return The cost of Gather or Scatter operation
1139 /// \p Opcode - is a type of memory access Load or Store
1140 /// \p DataTy - a vector type of the data to be loaded or stored
1141 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
1142 /// \p VariableMask - true when the memory access is predicated with a mask
1143 /// that is not a compile-time constant
1144 /// \p Alignment - alignment of single element
1145 /// \p I - the optional original context instruction, if one exists, e.g. the
1146 /// load/store to transform or the call to the gather/scatter intrinsic
1147 int getGatherScatterOpCost(
1148 unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1149 Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1150 const Instruction *I = nullptr) const;
1151
1152 /// \return The cost of the interleaved memory operation.
1153 /// \p Opcode is the memory operation code
1154 /// \p VecTy is the vector type of the interleaved access.
1155 /// \p Factor is the interleave factor
1156 /// \p Indices is the indices for interleaved load members (as interleaved
1157 /// load allows gaps)
1158 /// \p Alignment is the alignment of the memory operation
1159 /// \p AddressSpace is address space of the pointer.
1160 /// \p UseMaskForCond indicates if the memory access is predicated.
1161 /// \p UseMaskForGaps indicates if gaps should be masked.
1162 int getInterleavedMemoryOpCost(
1163 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1164 Align Alignment, unsigned AddressSpace,
1165 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
1166 bool UseMaskForCond = false, bool UseMaskForGaps = false) const;
1167
1168 /// Calculate the cost of performing a vector reduction.
1169 ///
1170 /// This is the cost of reducing the vector value of type \p Ty to a scalar
1171 /// value using the operation denoted by \p Opcode. The form of the reduction
1172 /// can either be a pairwise reduction or a reduction that splits the vector
1173 /// at every reduction level.
1174 ///
1175 /// Pairwise:
1176 /// (v0, v1, v2, v3)
1177 /// ((v0+v1), (v2+v3), undef, undef)
1178 /// Split:
1179 /// (v0, v1, v2, v3)
1180 /// ((v0+v2), (v1+v3), undef, undef)
1181 int getArithmeticReductionCost(
1182 unsigned Opcode, VectorType *Ty, bool IsPairwiseForm,
1183 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1184
1185 int getMinMaxReductionCost(
1186 VectorType *Ty, VectorType *CondTy, bool IsPairwiseForm, bool IsUnsigned,
1187 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1188
1189 /// Calculate the cost of an extended reduction pattern, similar to
1190 /// getArithmeticReductionCost of an Add reduction with an extension and
1191 /// optional multiply. This is the cost of as:
1192 /// ResTy vecreduce.add(ext(Ty A)), or if IsMLA flag is set then:
1193 /// ResTy vecreduce.add(mul(ext(Ty A), ext(Ty B)). The reduction happens
1194 /// on a VectorType with ResTy elements and Ty lanes.
1195 InstructionCost getExtendedAddReductionCost(
1196 bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1197 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;
1198
1199 /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
1200 /// Three cases are handled: 1. scalar instruction 2. vector instruction
1201 /// 3. scalar instruction which is to be vectorized.
1202 int getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1203 TTI::TargetCostKind CostKind) const;
1204
1205 /// \returns The cost of Call instructions.
1206 int getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys,
1207 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const;
1208
1209 /// \returns The number of pieces into which the provided type must be
1210 /// split during legalization. Zero is returned when the answer is unknown.
1211 unsigned getNumberOfParts(Type *Tp) const;
1212
1213 /// \returns The cost of the address computation. For most targets this can be
1214 /// merged into the instruction indexing mode. Some targets might want to
1215 /// distinguish between address computation for memory operations on vector
1216 /// types and scalar types. Such targets should override this function.
1217 /// The 'SE' parameter holds pointer for the scalar evolution object which
1218 /// is used in order to get the Ptr step value in case of constant stride.
1219 /// The 'Ptr' parameter holds SCEV of the access pointer.
1220 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE = nullptr,
1221 const SCEV *Ptr = nullptr) const;
1222
1223 /// \returns The cost, if any, of keeping values of the given types alive
1224 /// over a callsite.
1225 ///
1226 /// Some types may require the use of register classes that do not have
1227 /// any callee-saved registers, so would require a spill and fill.
1228 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
1229
1230 /// \returns True if the intrinsic is a supported memory intrinsic. Info
1231 /// will contain additional information - whether the intrinsic may write
1232 /// or read to memory, volatility and the pointer. Info is undefined
1233 /// if false is returned.
1234 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
1235
1236 /// \returns The maximum element size, in bytes, for an element
1237 /// unordered-atomic memory intrinsic.
1238 unsigned getAtomicMemIntrinsicMaxElementSize() const;
1239
1240 /// \returns A value which is the result of the given memory intrinsic. New
1241 /// instructions may be created to extract the result from the given intrinsic
1242 /// memory operation. Returns nullptr if the target cannot create a result
1243 /// from the given intrinsic.
1244 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1245 Type *ExpectedType) const;
1246
1247 /// \returns The type to use in a loop expansion of a memcpy call.
1248 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1249 unsigned SrcAddrSpace, unsigned DestAddrSpace,
1250 unsigned SrcAlign, unsigned DestAlign) const;
1251
1252 /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
1253 /// \param RemainingBytes The number of bytes to copy.
1254 ///
1255 /// Calculates the operand types to use when copying \p RemainingBytes of
1256 /// memory, where source and destination alignments are \p SrcAlign and
1257 /// \p DestAlign respectively.
1258 void getMemcpyLoopResidualLoweringType(
1259 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1260 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1261 unsigned SrcAlign, unsigned DestAlign) const;
1262
1263 /// \returns True if the two functions have compatible attributes for inlining
1264 /// purposes.
1265 bool areInlineCompatible(const Function *Caller,
1266 const Function *Callee) const;
1267
1268 /// \returns True if the caller and callee agree on how \p Args will be passed
1269 /// to the callee.
1270 /// \param[out] Args The list of compatible arguments. The implementation may
1271 /// filter out any incompatible args from this list.
1272 bool areFunctionArgsABICompatible(const Function *Caller,
1273 const Function *Callee,
1274 SmallPtrSetImpl<Argument *> &Args) const;
1275
1276 /// The type of load/store indexing.
1277 enum MemIndexedMode {
1278 MIM_Unindexed, ///< No indexing.
1279 MIM_PreInc, ///< Pre-incrementing.
1280 MIM_PreDec, ///< Pre-decrementing.
1281 MIM_PostInc, ///< Post-incrementing.
1282 MIM_PostDec ///< Post-decrementing.
1283 };
1284
1285 /// \returns True if the specified indexed load for the given type is legal.
1286 bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;
1287
1288 /// \returns True if the specified indexed store for the given type is legal.
1289 bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;
1290
1291 /// \returns The bitwidth of the largest vector type that should be used to
1292 /// load/store in the given address space.
1293 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
1294
1295 /// \returns True if the load instruction is legal to vectorize.
1296 bool isLegalToVectorizeLoad(LoadInst *LI) const;
1297
1298 /// \returns True if the store instruction is legal to vectorize.
1299 bool isLegalToVectorizeStore(StoreInst *SI) const;
1300
1301 /// \returns True if it is legal to vectorize the given load chain.
1302 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
1303 unsigned AddrSpace) const;
1304
1305 /// \returns True if it is legal to vectorize the given store chain.
1306 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
1307 unsigned AddrSpace) const;
1308
1309 /// \returns The new vector factor value if the target doesn't support \p
1310 /// SizeInBytes loads or has a better vector factor.
1311 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1312 unsigned ChainSizeInBytes,
1313 VectorType *VecTy) const;
1314
1315 /// \returns The new vector factor value if the target doesn't support \p
1316 /// SizeInBytes stores or has a better vector factor.
1317 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1318 unsigned ChainSizeInBytes,
1319 VectorType *VecTy) const;
1320
1321 /// Flags describing the kind of vector reduction.
1322 struct ReductionFlags {
ReductionFlagsReductionFlags1323 ReductionFlags() : IsMaxOp(false), IsSigned(false), NoNaN(false) {}
1324 bool IsMaxOp; ///< If the op a min/max kind, true if it's a max operation.
1325 bool IsSigned; ///< Whether the operation is a signed int reduction.
1326 bool NoNaN; ///< If op is an fp min/max, whether NaNs may be present.
1327 };
1328
1329 /// \returns True if the target wants to handle the given reduction idiom in
1330 /// the intrinsics form instead of the shuffle form.
1331 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
1332 ReductionFlags Flags) const;
1333
1334 /// \returns True if the target prefers reductions in loop.
1335 bool preferInLoopReduction(unsigned Opcode, Type *Ty,
1336 ReductionFlags Flags) const;
1337
1338 /// \returns True if the target prefers reductions select kept in the loop
1339 /// when tail folding. i.e.
1340 /// loop:
1341 /// p = phi (0, s)
1342 /// a = add (p, x)
1343 /// s = select (mask, a, p)
1344 /// vecreduce.add(s)
1345 ///
1346 /// As opposed to the normal scheme of p = phi (0, a) which allows the select
1347 /// to be pulled out of the loop. If the select(.., add, ..) can be predicated
1348 /// by the target, this can lead to cleaner code generation.
1349 bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
1350 ReductionFlags Flags) const;
1351
1352 /// \returns True if the target wants to expand the given reduction intrinsic
1353 /// into a shuffle sequence.
1354 bool shouldExpandReduction(const IntrinsicInst *II) const;
1355
1356 /// \returns the size cost of rematerializing a GlobalValue address relative
1357 /// to a stack reload.
1358 unsigned getGISelRematGlobalCost() const;
1359
1360 /// \returns True if the target supports scalable vectors.
1361 bool supportsScalableVectors() const;
1362
1363 /// \name Vector Predication Information
1364 /// @{
1365 /// Whether the target supports the %evl parameter of VP intrinsic efficiently
1366 /// in hardware. (see LLVM Language Reference - "Vector Predication
1367 /// Intrinsics") Use of %evl is discouraged when that is not the case.
1368 bool hasActiveVectorLength() const;
1369
1370 /// @}
1371
1372 /// @}
1373
1374 private:
1375 /// Estimate the latency of specified instruction.
1376 /// Returns 1 as the default value.
1377 int getInstructionLatency(const Instruction *I) const;
1378
1379 /// Returns the expected throughput cost of the instruction.
1380 /// Returns -1 if the cost is unknown.
1381 int getInstructionThroughput(const Instruction *I) const;
1382
1383 /// The abstract base class used to type erase specific TTI
1384 /// implementations.
1385 class Concept;
1386
1387 /// The template model for the base class which wraps a concrete
1388 /// implementation in a type erased interface.
1389 template <typename T> class Model;
1390
1391 std::unique_ptr<Concept> TTIImpl;
1392 };
1393
1394 class TargetTransformInfo::Concept {
1395 public:
1396 virtual ~Concept() = 0;
1397 virtual const DataLayout &getDataLayout() const = 0;
1398 virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
1399 ArrayRef<const Value *> Operands,
1400 TTI::TargetCostKind CostKind) = 0;
1401 virtual unsigned getInliningThresholdMultiplier() = 0;
1402 virtual unsigned adjustInliningThreshold(const CallBase *CB) = 0;
1403 virtual int getInlinerVectorBonusPercent() = 0;
1404 virtual int getMemcpyCost(const Instruction *I) = 0;
1405 virtual unsigned
1406 getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize,
1407 ProfileSummaryInfo *PSI,
1408 BlockFrequencyInfo *BFI) = 0;
1409 virtual int getUserCost(const User *U, ArrayRef<const Value *> Operands,
1410 TargetCostKind CostKind) = 0;
1411 virtual bool hasBranchDivergence() = 0;
1412 virtual bool useGPUDivergenceAnalysis() = 0;
1413 virtual bool isSourceOfDivergence(const Value *V) = 0;
1414 virtual bool isAlwaysUniform(const Value *V) = 0;
1415 virtual unsigned getFlatAddressSpace() = 0;
1416 virtual bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1417 Intrinsic::ID IID) const = 0;
1418 virtual bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0;
1419 virtual unsigned getAssumedAddrSpace(const Value *V) const = 0;
1420 virtual Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
1421 Value *OldV,
1422 Value *NewV) const = 0;
1423 virtual bool isLoweredToCall(const Function *F) = 0;
1424 virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &,
1425 UnrollingPreferences &UP) = 0;
1426 virtual void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1427 PeelingPreferences &PP) = 0;
1428 virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1429 AssumptionCache &AC,
1430 TargetLibraryInfo *LibInfo,
1431 HardwareLoopInfo &HWLoopInfo) = 0;
1432 virtual bool
1433 preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1434 AssumptionCache &AC, TargetLibraryInfo *TLI,
1435 DominatorTree *DT, const LoopAccessInfo *LAI) = 0;
1436 virtual bool emitGetActiveLaneMask() = 0;
1437 virtual Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
1438 IntrinsicInst &II) = 0;
1439 virtual Optional<Value *>
1440 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
1441 APInt DemandedMask, KnownBits &Known,
1442 bool &KnownBitsComputed) = 0;
1443 virtual Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
1444 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
1445 APInt &UndefElts2, APInt &UndefElts3,
1446 std::function<void(Instruction *, unsigned, APInt, APInt &)>
1447 SimplifyAndSetOp) = 0;
1448 virtual bool isLegalAddImmediate(int64_t Imm) = 0;
1449 virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
1450 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
1451 int64_t BaseOffset, bool HasBaseReg,
1452 int64_t Scale, unsigned AddrSpace,
1453 Instruction *I) = 0;
1454 virtual bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1455 TargetTransformInfo::LSRCost &C2) = 0;
1456 virtual bool isNumRegsMajorCostOfLSR() = 0;
1457 virtual bool isProfitableLSRChainElement(Instruction *I) = 0;
1458 virtual bool canMacroFuseCmp() = 0;
1459 virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
1460 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
1461 TargetLibraryInfo *LibInfo) = 0;
1462 virtual bool shouldFavorPostInc() const = 0;
1463 virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0;
1464 virtual bool isLegalMaskedStore(Type *DataType, Align Alignment) = 0;
1465 virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment) = 0;
1466 virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
1467 virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0;
1468 virtual bool isLegalMaskedScatter(Type *DataType, Align Alignment) = 0;
1469 virtual bool isLegalMaskedGather(Type *DataType, Align Alignment) = 0;
1470 virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
1471 virtual bool isLegalMaskedExpandLoad(Type *DataType) = 0;
1472 virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0;
1473 virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0;
1474 virtual bool prefersVectorizedAddressing() = 0;
1475 virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
1476 int64_t BaseOffset, bool HasBaseReg,
1477 int64_t Scale, unsigned AddrSpace) = 0;
1478 virtual bool LSRWithInstrQueries() = 0;
1479 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
1480 virtual bool isProfitableToHoist(Instruction *I) = 0;
1481 virtual bool useAA() = 0;
1482 virtual bool isTypeLegal(Type *Ty) = 0;
1483 virtual unsigned getRegUsageForType(Type *Ty) = 0;
1484 virtual bool shouldBuildLookupTables() = 0;
1485 virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
1486 virtual bool useColdCCForColdCall(Function &F) = 0;
1487 virtual unsigned getScalarizationOverhead(VectorType *Ty,
1488 const APInt &DemandedElts,
1489 bool Insert, bool Extract) = 0;
1490 virtual unsigned
1491 getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1492 unsigned VF) = 0;
1493 virtual bool supportsEfficientVectorElementLoadStore() = 0;
1494 virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
1495 virtual MemCmpExpansionOptions
1496 enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0;
1497 virtual bool enableInterleavedAccessVectorization() = 0;
1498 virtual bool enableMaskedInterleavedAccessVectorization() = 0;
1499 virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
1500 virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
1501 unsigned BitWidth,
1502 unsigned AddressSpace,
1503 unsigned Alignment,
1504 bool *Fast) = 0;
1505 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
1506 virtual bool haveFastSqrt(Type *Ty) = 0;
1507 virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0;
1508 virtual int getFPOpCost(Type *Ty) = 0;
1509 virtual int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
1510 const APInt &Imm, Type *Ty) = 0;
1511 virtual int getIntImmCost(const APInt &Imm, Type *Ty,
1512 TargetCostKind CostKind) = 0;
1513 virtual int getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm,
1514 Type *Ty, TargetCostKind CostKind,
1515 Instruction *Inst = nullptr) = 0;
1516 virtual int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
1517 const APInt &Imm, Type *Ty,
1518 TargetCostKind CostKind) = 0;
1519 virtual unsigned getNumberOfRegisters(unsigned ClassID) const = 0;
1520 virtual unsigned getRegisterClassForType(bool Vector,
1521 Type *Ty = nullptr) const = 0;
1522 virtual const char *getRegisterClassName(unsigned ClassID) const = 0;
1523 virtual unsigned getRegisterBitWidth(bool Vector) const = 0;
1524 virtual unsigned getMinVectorRegisterBitWidth() = 0;
1525 virtual Optional<unsigned> getMaxVScale() const = 0;
1526 virtual bool shouldMaximizeVectorBandwidth(bool OptSize) const = 0;
1527 virtual unsigned getMinimumVF(unsigned ElemWidth) const = 0;
1528 virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const = 0;
1529 virtual bool shouldConsiderAddressTypePromotion(
1530 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
1531 virtual unsigned getCacheLineSize() const = 0;
1532 virtual Optional<unsigned> getCacheSize(CacheLevel Level) const = 0;
1533 virtual Optional<unsigned> getCacheAssociativity(CacheLevel Level) const = 0;
1534
1535 /// \return How much before a load we should place the prefetch
1536 /// instruction. This is currently measured in number of
1537 /// instructions.
1538 virtual unsigned getPrefetchDistance() const = 0;
1539
1540 /// \return Some HW prefetchers can handle accesses up to a certain
1541 /// constant stride. This is the minimum stride in bytes where it
1542 /// makes sense to start adding SW prefetches. The default is 1,
1543 /// i.e. prefetch with any stride. Sometimes prefetching is beneficial
1544 /// even below the HW prefetcher limit, and the arguments provided are
1545 /// meant to serve as a basis for deciding this for a particular loop.
1546 virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
1547 unsigned NumStridedMemAccesses,
1548 unsigned NumPrefetches,
1549 bool HasCall) const = 0;
1550
1551 /// \return The maximum number of iterations to prefetch ahead. If
1552 /// the required number of iterations is more than this number, no
1553 /// prefetching is performed.
1554 virtual unsigned getMaxPrefetchIterationsAhead() const = 0;
1555
1556 /// \return True if prefetching should also be done for writes.
1557 virtual bool enableWritePrefetching() const = 0;
1558
1559 virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
1560 virtual unsigned getArithmeticInstrCost(
1561 unsigned Opcode, Type *Ty,
1562 TTI::TargetCostKind CostKind,
1563 OperandValueKind Opd1Info,
1564 OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
1565 OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
1566 const Instruction *CxtI = nullptr) = 0;
1567 virtual int getShuffleCost(ShuffleKind Kind, VectorType *Tp, int Index,
1568 VectorType *SubTp) = 0;
1569 virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1570 CastContextHint CCH,
1571 TTI::TargetCostKind CostKind,
1572 const Instruction *I) = 0;
1573 virtual int getExtractWithExtendCost(unsigned Opcode, Type *Dst,
1574 VectorType *VecTy, unsigned Index) = 0;
1575 virtual int getCFInstrCost(unsigned Opcode,
1576 TTI::TargetCostKind CostKind) = 0;
1577 virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1578 CmpInst::Predicate VecPred,
1579 TTI::TargetCostKind CostKind,
1580 const Instruction *I) = 0;
1581 virtual int getVectorInstrCost(unsigned Opcode, Type *Val,
1582 unsigned Index) = 0;
1583 virtual int getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1584 unsigned AddressSpace,
1585 TTI::TargetCostKind CostKind,
1586 const Instruction *I) = 0;
1587 virtual int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1588 unsigned AddressSpace,
1589 TTI::TargetCostKind CostKind) = 0;
1590 virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1591 const Value *Ptr, bool VariableMask,
1592 Align Alignment,
1593 TTI::TargetCostKind CostKind,
1594 const Instruction *I = nullptr) = 0;
1595
1596 virtual int getInterleavedMemoryOpCost(
1597 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1598 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1599 bool UseMaskForCond = false, bool UseMaskForGaps = false) = 0;
1600 virtual int getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
1601 bool IsPairwiseForm,
1602 TTI::TargetCostKind CostKind) = 0;
1603 virtual int getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
1604 bool IsPairwiseForm, bool IsUnsigned,
1605 TTI::TargetCostKind CostKind) = 0;
1606 virtual InstructionCost getExtendedAddReductionCost(
1607 bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
1608 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0;
1609 virtual int getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1610 TTI::TargetCostKind CostKind) = 0;
1611 virtual int getCallInstrCost(Function *F, Type *RetTy,
1612 ArrayRef<Type *> Tys,
1613 TTI::TargetCostKind CostKind) = 0;
1614 virtual unsigned getNumberOfParts(Type *Tp) = 0;
1615 virtual int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
1616 const SCEV *Ptr) = 0;
1617 virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
1618 virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
1619 MemIntrinsicInfo &Info) = 0;
1620 virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
1621 virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1622 Type *ExpectedType) = 0;
1623 virtual Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1624 unsigned SrcAddrSpace,
1625 unsigned DestAddrSpace,
1626 unsigned SrcAlign,
1627 unsigned DestAlign) const = 0;
1628 virtual void getMemcpyLoopResidualLoweringType(
1629 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
1630 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
1631 unsigned SrcAlign, unsigned DestAlign) const = 0;
1632 virtual bool areInlineCompatible(const Function *Caller,
1633 const Function *Callee) const = 0;
1634 virtual bool
1635 areFunctionArgsABICompatible(const Function *Caller, const Function *Callee,
1636 SmallPtrSetImpl<Argument *> &Args) const = 0;
1637 virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
1638 virtual bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const = 0;
1639 virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
1640 virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
1641 virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
1642 virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1643 Align Alignment,
1644 unsigned AddrSpace) const = 0;
1645 virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1646 Align Alignment,
1647 unsigned AddrSpace) const = 0;
1648 virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1649 unsigned ChainSizeInBytes,
1650 VectorType *VecTy) const = 0;
1651 virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1652 unsigned ChainSizeInBytes,
1653 VectorType *VecTy) const = 0;
1654 virtual bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
1655 ReductionFlags) const = 0;
1656 virtual bool preferInLoopReduction(unsigned Opcode, Type *Ty,
1657 ReductionFlags) const = 0;
1658 virtual bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
1659 ReductionFlags) const = 0;
1660 virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
1661 virtual unsigned getGISelRematGlobalCost() const = 0;
1662 virtual bool supportsScalableVectors() const = 0;
1663 virtual bool hasActiveVectorLength() const = 0;
1664 virtual int getInstructionLatency(const Instruction *I) = 0;
1665 };
1666
1667 template <typename T>
1668 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
1669 T Impl;
1670
1671 public:
Model(T Impl)1672 Model(T Impl) : Impl(std::move(Impl)) {}
~Model()1673 ~Model() override {}
1674
getDataLayout()1675 const DataLayout &getDataLayout() const override {
1676 return Impl.getDataLayout();
1677 }
1678
getGEPCost(Type * PointeeType,const Value * Ptr,ArrayRef<const Value * > Operands,enum TargetTransformInfo::TargetCostKind CostKind)1679 int getGEPCost(Type *PointeeType, const Value *Ptr,
1680 ArrayRef<const Value *> Operands,
1681 enum TargetTransformInfo::TargetCostKind CostKind) override {
1682 return Impl.getGEPCost(PointeeType, Ptr, Operands);
1683 }
getInliningThresholdMultiplier()1684 unsigned getInliningThresholdMultiplier() override {
1685 return Impl.getInliningThresholdMultiplier();
1686 }
adjustInliningThreshold(const CallBase * CB)1687 unsigned adjustInliningThreshold(const CallBase *CB) override {
1688 return Impl.adjustInliningThreshold(CB);
1689 }
getInlinerVectorBonusPercent()1690 int getInlinerVectorBonusPercent() override {
1691 return Impl.getInlinerVectorBonusPercent();
1692 }
getMemcpyCost(const Instruction * I)1693 int getMemcpyCost(const Instruction *I) override {
1694 return Impl.getMemcpyCost(I);
1695 }
getUserCost(const User * U,ArrayRef<const Value * > Operands,TargetCostKind CostKind)1696 int getUserCost(const User *U, ArrayRef<const Value *> Operands,
1697 TargetCostKind CostKind) override {
1698 return Impl.getUserCost(U, Operands, CostKind);
1699 }
hasBranchDivergence()1700 bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
useGPUDivergenceAnalysis()1701 bool useGPUDivergenceAnalysis() override {
1702 return Impl.useGPUDivergenceAnalysis();
1703 }
isSourceOfDivergence(const Value * V)1704 bool isSourceOfDivergence(const Value *V) override {
1705 return Impl.isSourceOfDivergence(V);
1706 }
1707
isAlwaysUniform(const Value * V)1708 bool isAlwaysUniform(const Value *V) override {
1709 return Impl.isAlwaysUniform(V);
1710 }
1711
getFlatAddressSpace()1712 unsigned getFlatAddressSpace() override { return Impl.getFlatAddressSpace(); }
1713
collectFlatAddressOperands(SmallVectorImpl<int> & OpIndexes,Intrinsic::ID IID)1714 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
1715 Intrinsic::ID IID) const override {
1716 return Impl.collectFlatAddressOperands(OpIndexes, IID);
1717 }
1718
isNoopAddrSpaceCast(unsigned FromAS,unsigned ToAS)1719 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
1720 return Impl.isNoopAddrSpaceCast(FromAS, ToAS);
1721 }
1722
getAssumedAddrSpace(const Value * V)1723 unsigned getAssumedAddrSpace(const Value *V) const override {
1724 return Impl.getAssumedAddrSpace(V);
1725 }
1726
rewriteIntrinsicWithAddressSpace(IntrinsicInst * II,Value * OldV,Value * NewV)1727 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
1728 Value *NewV) const override {
1729 return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
1730 }
1731
isLoweredToCall(const Function * F)1732 bool isLoweredToCall(const Function *F) override {
1733 return Impl.isLoweredToCall(F);
1734 }
getUnrollingPreferences(Loop * L,ScalarEvolution & SE,UnrollingPreferences & UP)1735 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1736 UnrollingPreferences &UP) override {
1737 return Impl.getUnrollingPreferences(L, SE, UP);
1738 }
getPeelingPreferences(Loop * L,ScalarEvolution & SE,PeelingPreferences & PP)1739 void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1740 PeelingPreferences &PP) override {
1741 return Impl.getPeelingPreferences(L, SE, PP);
1742 }
isHardwareLoopProfitable(Loop * L,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * LibInfo,HardwareLoopInfo & HWLoopInfo)1743 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1744 AssumptionCache &AC, TargetLibraryInfo *LibInfo,
1745 HardwareLoopInfo &HWLoopInfo) override {
1746 return Impl.isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
1747 }
preferPredicateOverEpilogue(Loop * L,LoopInfo * LI,ScalarEvolution & SE,AssumptionCache & AC,TargetLibraryInfo * TLI,DominatorTree * DT,const LoopAccessInfo * LAI)1748 bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1749 AssumptionCache &AC, TargetLibraryInfo *TLI,
1750 DominatorTree *DT,
1751 const LoopAccessInfo *LAI) override {
1752 return Impl.preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI);
1753 }
emitGetActiveLaneMask()1754 bool emitGetActiveLaneMask() override {
1755 return Impl.emitGetActiveLaneMask();
1756 }
instCombineIntrinsic(InstCombiner & IC,IntrinsicInst & II)1757 Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
1758 IntrinsicInst &II) override {
1759 return Impl.instCombineIntrinsic(IC, II);
1760 }
1761 Optional<Value *>
simplifyDemandedUseBitsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt DemandedMask,KnownBits & Known,bool & KnownBitsComputed)1762 simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
1763 APInt DemandedMask, KnownBits &Known,
1764 bool &KnownBitsComputed) override {
1765 return Impl.simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
1766 KnownBitsComputed);
1767 }
simplifyDemandedVectorEltsIntrinsic(InstCombiner & IC,IntrinsicInst & II,APInt DemandedElts,APInt & UndefElts,APInt & UndefElts2,APInt & UndefElts3,std::function<void (Instruction *,unsigned,APInt,APInt &)> SimplifyAndSetOp)1768 Optional<Value *> simplifyDemandedVectorEltsIntrinsic(
1769 InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
1770 APInt &UndefElts2, APInt &UndefElts3,
1771 std::function<void(Instruction *, unsigned, APInt, APInt &)>
1772 SimplifyAndSetOp) override {
1773 return Impl.simplifyDemandedVectorEltsIntrinsic(
1774 IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
1775 SimplifyAndSetOp);
1776 }
isLegalAddImmediate(int64_t Imm)1777 bool isLegalAddImmediate(int64_t Imm) override {
1778 return Impl.isLegalAddImmediate(Imm);
1779 }
isLegalICmpImmediate(int64_t Imm)1780 bool isLegalICmpImmediate(int64_t Imm) override {
1781 return Impl.isLegalICmpImmediate(Imm);
1782 }
isLegalAddressingMode(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace,Instruction * I)1783 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
1784 bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
1785 Instruction *I) override {
1786 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
1787 AddrSpace, I);
1788 }
isLSRCostLess(TargetTransformInfo::LSRCost & C1,TargetTransformInfo::LSRCost & C2)1789 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1790 TargetTransformInfo::LSRCost &C2) override {
1791 return Impl.isLSRCostLess(C1, C2);
1792 }
isNumRegsMajorCostOfLSR()1793 bool isNumRegsMajorCostOfLSR() override {
1794 return Impl.isNumRegsMajorCostOfLSR();
1795 }
isProfitableLSRChainElement(Instruction * I)1796 bool isProfitableLSRChainElement(Instruction *I) override {
1797 return Impl.isProfitableLSRChainElement(I);
1798 }
canMacroFuseCmp()1799 bool canMacroFuseCmp() override { return Impl.canMacroFuseCmp(); }
canSaveCmp(Loop * L,BranchInst ** BI,ScalarEvolution * SE,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,TargetLibraryInfo * LibInfo)1800 bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
1801 DominatorTree *DT, AssumptionCache *AC,
1802 TargetLibraryInfo *LibInfo) override {
1803 return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
1804 }
shouldFavorPostInc()1805 bool shouldFavorPostInc() const override { return Impl.shouldFavorPostInc(); }
shouldFavorBackedgeIndex(const Loop * L)1806 bool shouldFavorBackedgeIndex(const Loop *L) const override {
1807 return Impl.shouldFavorBackedgeIndex(L);
1808 }
isLegalMaskedStore(Type * DataType,Align Alignment)1809 bool isLegalMaskedStore(Type *DataType, Align Alignment) override {
1810 return Impl.isLegalMaskedStore(DataType, Alignment);
1811 }
isLegalMaskedLoad(Type * DataType,Align Alignment)1812 bool isLegalMaskedLoad(Type *DataType, Align Alignment) override {
1813 return Impl.isLegalMaskedLoad(DataType, Alignment);
1814 }
isLegalNTStore(Type * DataType,Align Alignment)1815 bool isLegalNTStore(Type *DataType, Align Alignment) override {
1816 return Impl.isLegalNTStore(DataType, Alignment);
1817 }
isLegalNTLoad(Type * DataType,Align Alignment)1818 bool isLegalNTLoad(Type *DataType, Align Alignment) override {
1819 return Impl.isLegalNTLoad(DataType, Alignment);
1820 }
isLegalMaskedScatter(Type * DataType,Align Alignment)1821 bool isLegalMaskedScatter(Type *DataType, Align Alignment) override {
1822 return Impl.isLegalMaskedScatter(DataType, Alignment);
1823 }
isLegalMaskedGather(Type * DataType,Align Alignment)1824 bool isLegalMaskedGather(Type *DataType, Align Alignment) override {
1825 return Impl.isLegalMaskedGather(DataType, Alignment);
1826 }
isLegalMaskedCompressStore(Type * DataType)1827 bool isLegalMaskedCompressStore(Type *DataType) override {
1828 return Impl.isLegalMaskedCompressStore(DataType);
1829 }
isLegalMaskedExpandLoad(Type * DataType)1830 bool isLegalMaskedExpandLoad(Type *DataType) override {
1831 return Impl.isLegalMaskedExpandLoad(DataType);
1832 }
hasDivRemOp(Type * DataType,bool IsSigned)1833 bool hasDivRemOp(Type *DataType, bool IsSigned) override {
1834 return Impl.hasDivRemOp(DataType, IsSigned);
1835 }
hasVolatileVariant(Instruction * I,unsigned AddrSpace)1836 bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) override {
1837 return Impl.hasVolatileVariant(I, AddrSpace);
1838 }
prefersVectorizedAddressing()1839 bool prefersVectorizedAddressing() override {
1840 return Impl.prefersVectorizedAddressing();
1841 }
getScalingFactorCost(Type * Ty,GlobalValue * BaseGV,int64_t BaseOffset,bool HasBaseReg,int64_t Scale,unsigned AddrSpace)1842 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
1843 bool HasBaseReg, int64_t Scale,
1844 unsigned AddrSpace) override {
1845 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
1846 AddrSpace);
1847 }
LSRWithInstrQueries()1848 bool LSRWithInstrQueries() override { return Impl.LSRWithInstrQueries(); }
isTruncateFree(Type * Ty1,Type * Ty2)1849 bool isTruncateFree(Type *Ty1, Type *Ty2) override {
1850 return Impl.isTruncateFree(Ty1, Ty2);
1851 }
isProfitableToHoist(Instruction * I)1852 bool isProfitableToHoist(Instruction *I) override {
1853 return Impl.isProfitableToHoist(I);
1854 }
useAA()1855 bool useAA() override { return Impl.useAA(); }
isTypeLegal(Type * Ty)1856 bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
getRegUsageForType(Type * Ty)1857 unsigned getRegUsageForType(Type *Ty) override {
1858 return Impl.getRegUsageForType(Ty);
1859 }
shouldBuildLookupTables()1860 bool shouldBuildLookupTables() override {
1861 return Impl.shouldBuildLookupTables();
1862 }
shouldBuildLookupTablesForConstant(Constant * C)1863 bool shouldBuildLookupTablesForConstant(Constant *C) override {
1864 return Impl.shouldBuildLookupTablesForConstant(C);
1865 }
useColdCCForColdCall(Function & F)1866 bool useColdCCForColdCall(Function &F) override {
1867 return Impl.useColdCCForColdCall(F);
1868 }
1869
getScalarizationOverhead(VectorType * Ty,const APInt & DemandedElts,bool Insert,bool Extract)1870 unsigned getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
1871 bool Insert, bool Extract) override {
1872 return Impl.getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
1873 }
getOperandsScalarizationOverhead(ArrayRef<const Value * > Args,unsigned VF)1874 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1875 unsigned VF) override {
1876 return Impl.getOperandsScalarizationOverhead(Args, VF);
1877 }
1878
supportsEfficientVectorElementLoadStore()1879 bool supportsEfficientVectorElementLoadStore() override {
1880 return Impl.supportsEfficientVectorElementLoadStore();
1881 }
1882
enableAggressiveInterleaving(bool LoopHasReductions)1883 bool enableAggressiveInterleaving(bool LoopHasReductions) override {
1884 return Impl.enableAggressiveInterleaving(LoopHasReductions);
1885 }
enableMemCmpExpansion(bool OptSize,bool IsZeroCmp)1886 MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
1887 bool IsZeroCmp) const override {
1888 return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp);
1889 }
enableInterleavedAccessVectorization()1890 bool enableInterleavedAccessVectorization() override {
1891 return Impl.enableInterleavedAccessVectorization();
1892 }
enableMaskedInterleavedAccessVectorization()1893 bool enableMaskedInterleavedAccessVectorization() override {
1894 return Impl.enableMaskedInterleavedAccessVectorization();
1895 }
isFPVectorizationPotentiallyUnsafe()1896 bool isFPVectorizationPotentiallyUnsafe() override {
1897 return Impl.isFPVectorizationPotentiallyUnsafe();
1898 }
allowsMisalignedMemoryAccesses(LLVMContext & Context,unsigned BitWidth,unsigned AddressSpace,unsigned Alignment,bool * Fast)1899 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
1900 unsigned AddressSpace, unsigned Alignment,
1901 bool *Fast) override {
1902 return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
1903 Alignment, Fast);
1904 }
getPopcntSupport(unsigned IntTyWidthInBit)1905 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
1906 return Impl.getPopcntSupport(IntTyWidthInBit);
1907 }
haveFastSqrt(Type * Ty)1908 bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
1909
isFCmpOrdCheaperThanFCmpZero(Type * Ty)1910 bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) override {
1911 return Impl.isFCmpOrdCheaperThanFCmpZero(Ty);
1912 }
1913
getFPOpCost(Type * Ty)1914 int getFPOpCost(Type *Ty) override { return Impl.getFPOpCost(Ty); }
1915
getIntImmCodeSizeCost(unsigned Opc,unsigned Idx,const APInt & Imm,Type * Ty)1916 int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
1917 Type *Ty) override {
1918 return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
1919 }
getIntImmCost(const APInt & Imm,Type * Ty,TargetCostKind CostKind)1920 int getIntImmCost(const APInt &Imm, Type *Ty,
1921 TargetCostKind CostKind) override {
1922 return Impl.getIntImmCost(Imm, Ty, CostKind);
1923 }
1924 int getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty,
1925 TargetCostKind CostKind,
1926 Instruction *Inst = nullptr) override {
1927 return Impl.getIntImmCostInst(Opc, Idx, Imm, Ty, CostKind, Inst);
1928 }
getIntImmCostIntrin(Intrinsic::ID IID,unsigned Idx,const APInt & Imm,Type * Ty,TargetCostKind CostKind)1929 int getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1930 Type *Ty, TargetCostKind CostKind) override {
1931 return Impl.getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
1932 }
getNumberOfRegisters(unsigned ClassID)1933 unsigned getNumberOfRegisters(unsigned ClassID) const override {
1934 return Impl.getNumberOfRegisters(ClassID);
1935 }
1936 unsigned getRegisterClassForType(bool Vector,
1937 Type *Ty = nullptr) const override {
1938 return Impl.getRegisterClassForType(Vector, Ty);
1939 }
getRegisterClassName(unsigned ClassID)1940 const char *getRegisterClassName(unsigned ClassID) const override {
1941 return Impl.getRegisterClassName(ClassID);
1942 }
getRegisterBitWidth(bool Vector)1943 unsigned getRegisterBitWidth(bool Vector) const override {
1944 return Impl.getRegisterBitWidth(Vector);
1945 }
getMinVectorRegisterBitWidth()1946 unsigned getMinVectorRegisterBitWidth() override {
1947 return Impl.getMinVectorRegisterBitWidth();
1948 }
getMaxVScale()1949 Optional<unsigned> getMaxVScale() const override {
1950 return Impl.getMaxVScale();
1951 }
shouldMaximizeVectorBandwidth(bool OptSize)1952 bool shouldMaximizeVectorBandwidth(bool OptSize) const override {
1953 return Impl.shouldMaximizeVectorBandwidth(OptSize);
1954 }
getMinimumVF(unsigned ElemWidth)1955 unsigned getMinimumVF(unsigned ElemWidth) const override {
1956 return Impl.getMinimumVF(ElemWidth);
1957 }
getMaximumVF(unsigned ElemWidth,unsigned Opcode)1958 unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override {
1959 return Impl.getMaximumVF(ElemWidth, Opcode);
1960 }
shouldConsiderAddressTypePromotion(const Instruction & I,bool & AllowPromotionWithoutCommonHeader)1961 bool shouldConsiderAddressTypePromotion(
1962 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
1963 return Impl.shouldConsiderAddressTypePromotion(
1964 I, AllowPromotionWithoutCommonHeader);
1965 }
getCacheLineSize()1966 unsigned getCacheLineSize() const override { return Impl.getCacheLineSize(); }
getCacheSize(CacheLevel Level)1967 Optional<unsigned> getCacheSize(CacheLevel Level) const override {
1968 return Impl.getCacheSize(Level);
1969 }
getCacheAssociativity(CacheLevel Level)1970 Optional<unsigned> getCacheAssociativity(CacheLevel Level) const override {
1971 return Impl.getCacheAssociativity(Level);
1972 }
1973
1974 /// Return the preferred prefetch distance in terms of instructions.
1975 ///
getPrefetchDistance()1976 unsigned getPrefetchDistance() const override {
1977 return Impl.getPrefetchDistance();
1978 }
1979
1980 /// Return the minimum stride necessary to trigger software
1981 /// prefetching.
1982 ///
getMinPrefetchStride(unsigned NumMemAccesses,unsigned NumStridedMemAccesses,unsigned NumPrefetches,bool HasCall)1983 unsigned getMinPrefetchStride(unsigned NumMemAccesses,
1984 unsigned NumStridedMemAccesses,
1985 unsigned NumPrefetches,
1986 bool HasCall) const override {
1987 return Impl.getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
1988 NumPrefetches, HasCall);
1989 }
1990
1991 /// Return the maximum prefetch distance in terms of loop
1992 /// iterations.
1993 ///
getMaxPrefetchIterationsAhead()1994 unsigned getMaxPrefetchIterationsAhead() const override {
1995 return Impl.getMaxPrefetchIterationsAhead();
1996 }
1997
1998 /// \return True if prefetching should also be done for writes.
enableWritePrefetching()1999 bool enableWritePrefetching() const override {
2000 return Impl.enableWritePrefetching();
2001 }
2002
getMaxInterleaveFactor(unsigned VF)2003 unsigned getMaxInterleaveFactor(unsigned VF) override {
2004 return Impl.getMaxInterleaveFactor(VF);
2005 }
getEstimatedNumberOfCaseClusters(const SwitchInst & SI,unsigned & JTSize,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI)2006 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
2007 unsigned &JTSize,
2008 ProfileSummaryInfo *PSI,
2009 BlockFrequencyInfo *BFI) override {
2010 return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
2011 }
2012 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
2013 TTI::TargetCostKind CostKind,
2014 OperandValueKind Opd1Info,
2015 OperandValueKind Opd2Info,
2016 OperandValueProperties Opd1PropInfo,
2017 OperandValueProperties Opd2PropInfo,
2018 ArrayRef<const Value *> Args,
2019 const Instruction *CxtI = nullptr) override {
2020 return Impl.getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
2021 Opd1PropInfo, Opd2PropInfo, Args, CxtI);
2022 }
getShuffleCost(ShuffleKind Kind,VectorType * Tp,int Index,VectorType * SubTp)2023 int getShuffleCost(ShuffleKind Kind, VectorType *Tp, int Index,
2024 VectorType *SubTp) override {
2025 return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
2026 }
getCastInstrCost(unsigned Opcode,Type * Dst,Type * Src,CastContextHint CCH,TTI::TargetCostKind CostKind,const Instruction * I)2027 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
2028 CastContextHint CCH, TTI::TargetCostKind CostKind,
2029 const Instruction *I) override {
2030 return Impl.getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
2031 }
getExtractWithExtendCost(unsigned Opcode,Type * Dst,VectorType * VecTy,unsigned Index)2032 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
2033 unsigned Index) override {
2034 return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
2035 }
getCFInstrCost(unsigned Opcode,TTI::TargetCostKind CostKind)2036 int getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) override {
2037 return Impl.getCFInstrCost(Opcode, CostKind);
2038 }
getCmpSelInstrCost(unsigned Opcode,Type * ValTy,Type * CondTy,CmpInst::Predicate VecPred,TTI::TargetCostKind CostKind,const Instruction * I)2039 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
2040 CmpInst::Predicate VecPred,
2041 TTI::TargetCostKind CostKind,
2042 const Instruction *I) override {
2043 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2044 }
getVectorInstrCost(unsigned Opcode,Type * Val,unsigned Index)2045 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) override {
2046 return Impl.getVectorInstrCost(Opcode, Val, Index);
2047 }
getMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,const Instruction * I)2048 int getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
2049 unsigned AddressSpace, TTI::TargetCostKind CostKind,
2050 const Instruction *I) override {
2051 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
2052 CostKind, I);
2053 }
getMaskedMemoryOpCost(unsigned Opcode,Type * Src,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind)2054 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
2055 unsigned AddressSpace,
2056 TTI::TargetCostKind CostKind) override {
2057 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
2058 CostKind);
2059 }
2060 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
2061 bool VariableMask, Align Alignment,
2062 TTI::TargetCostKind CostKind,
2063 const Instruction *I = nullptr) override {
2064 return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
2065 Alignment, CostKind, I);
2066 }
getInterleavedMemoryOpCost(unsigned Opcode,Type * VecTy,unsigned Factor,ArrayRef<unsigned> Indices,Align Alignment,unsigned AddressSpace,TTI::TargetCostKind CostKind,bool UseMaskForCond,bool UseMaskForGaps)2067 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
2068 ArrayRef<unsigned> Indices, Align Alignment,
2069 unsigned AddressSpace,
2070 TTI::TargetCostKind CostKind,
2071 bool UseMaskForCond,
2072 bool UseMaskForGaps) override {
2073 return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
2074 Alignment, AddressSpace, CostKind,
2075 UseMaskForCond, UseMaskForGaps);
2076 }
getArithmeticReductionCost(unsigned Opcode,VectorType * Ty,bool IsPairwiseForm,TTI::TargetCostKind CostKind)2077 int getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
2078 bool IsPairwiseForm,
2079 TTI::TargetCostKind CostKind) override {
2080 return Impl.getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm,
2081 CostKind);
2082 }
getMinMaxReductionCost(VectorType * Ty,VectorType * CondTy,bool IsPairwiseForm,bool IsUnsigned,TTI::TargetCostKind CostKind)2083 int getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
2084 bool IsPairwiseForm, bool IsUnsigned,
2085 TTI::TargetCostKind CostKind) override {
2086 return Impl.getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned,
2087 CostKind);
2088 }
2089 InstructionCost getExtendedAddReductionCost(
2090 bool IsMLA, bool IsUnsigned, Type *ResTy, VectorType *Ty,
2091 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) override {
2092 return Impl.getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, Ty,
2093 CostKind);
2094 }
getIntrinsicInstrCost(const IntrinsicCostAttributes & ICA,TTI::TargetCostKind CostKind)2095 int getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2096 TTI::TargetCostKind CostKind) override {
2097 return Impl.getIntrinsicInstrCost(ICA, CostKind);
2098 }
getCallInstrCost(Function * F,Type * RetTy,ArrayRef<Type * > Tys,TTI::TargetCostKind CostKind)2099 int getCallInstrCost(Function *F, Type *RetTy,
2100 ArrayRef<Type *> Tys,
2101 TTI::TargetCostKind CostKind) override {
2102 return Impl.getCallInstrCost(F, RetTy, Tys, CostKind);
2103 }
getNumberOfParts(Type * Tp)2104 unsigned getNumberOfParts(Type *Tp) override {
2105 return Impl.getNumberOfParts(Tp);
2106 }
getAddressComputationCost(Type * Ty,ScalarEvolution * SE,const SCEV * Ptr)2107 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
2108 const SCEV *Ptr) override {
2109 return Impl.getAddressComputationCost(Ty, SE, Ptr);
2110 }
getCostOfKeepingLiveOverCall(ArrayRef<Type * > Tys)2111 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
2112 return Impl.getCostOfKeepingLiveOverCall(Tys);
2113 }
getTgtMemIntrinsic(IntrinsicInst * Inst,MemIntrinsicInfo & Info)2114 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
2115 MemIntrinsicInfo &Info) override {
2116 return Impl.getTgtMemIntrinsic(Inst, Info);
2117 }
getAtomicMemIntrinsicMaxElementSize()2118 unsigned getAtomicMemIntrinsicMaxElementSize() const override {
2119 return Impl.getAtomicMemIntrinsicMaxElementSize();
2120 }
getOrCreateResultFromMemIntrinsic(IntrinsicInst * Inst,Type * ExpectedType)2121 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
2122 Type *ExpectedType) override {
2123 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
2124 }
getMemcpyLoopLoweringType(LLVMContext & Context,Value * Length,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign)2125 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
2126 unsigned SrcAddrSpace, unsigned DestAddrSpace,
2127 unsigned SrcAlign,
2128 unsigned DestAlign) const override {
2129 return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
2130 DestAddrSpace, SrcAlign, DestAlign);
2131 }
getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type * > & OpsOut,LLVMContext & Context,unsigned RemainingBytes,unsigned SrcAddrSpace,unsigned DestAddrSpace,unsigned SrcAlign,unsigned DestAlign)2132 void getMemcpyLoopResidualLoweringType(
2133 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
2134 unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
2135 unsigned SrcAlign, unsigned DestAlign) const override {
2136 Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
2137 SrcAddrSpace, DestAddrSpace,
2138 SrcAlign, DestAlign);
2139 }
areInlineCompatible(const Function * Caller,const Function * Callee)2140 bool areInlineCompatible(const Function *Caller,
2141 const Function *Callee) const override {
2142 return Impl.areInlineCompatible(Caller, Callee);
2143 }
areFunctionArgsABICompatible(const Function * Caller,const Function * Callee,SmallPtrSetImpl<Argument * > & Args)2144 bool areFunctionArgsABICompatible(
2145 const Function *Caller, const Function *Callee,
2146 SmallPtrSetImpl<Argument *> &Args) const override {
2147 return Impl.areFunctionArgsABICompatible(Caller, Callee, Args);
2148 }
isIndexedLoadLegal(MemIndexedMode Mode,Type * Ty)2149 bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
2150 return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
2151 }
isIndexedStoreLegal(MemIndexedMode Mode,Type * Ty)2152 bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const override {
2153 return Impl.isIndexedStoreLegal(Mode, Ty, getDataLayout());
2154 }
getLoadStoreVecRegBitWidth(unsigned AddrSpace)2155 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
2156 return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
2157 }
isLegalToVectorizeLoad(LoadInst * LI)2158 bool isLegalToVectorizeLoad(LoadInst *LI) const override {
2159 return Impl.isLegalToVectorizeLoad(LI);
2160 }
isLegalToVectorizeStore(StoreInst * SI)2161 bool isLegalToVectorizeStore(StoreInst *SI) const override {
2162 return Impl.isLegalToVectorizeStore(SI);
2163 }
isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace)2164 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
2165 unsigned AddrSpace) const override {
2166 return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
2167 AddrSpace);
2168 }
isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,Align Alignment,unsigned AddrSpace)2169 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
2170 unsigned AddrSpace) const override {
2171 return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
2172 AddrSpace);
2173 }
getLoadVectorFactor(unsigned VF,unsigned LoadSize,unsigned ChainSizeInBytes,VectorType * VecTy)2174 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
2175 unsigned ChainSizeInBytes,
2176 VectorType *VecTy) const override {
2177 return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
2178 }
getStoreVectorFactor(unsigned VF,unsigned StoreSize,unsigned ChainSizeInBytes,VectorType * VecTy)2179 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
2180 unsigned ChainSizeInBytes,
2181 VectorType *VecTy) const override {
2182 return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
2183 }
useReductionIntrinsic(unsigned Opcode,Type * Ty,ReductionFlags Flags)2184 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
2185 ReductionFlags Flags) const override {
2186 return Impl.useReductionIntrinsic(Opcode, Ty, Flags);
2187 }
preferInLoopReduction(unsigned Opcode,Type * Ty,ReductionFlags Flags)2188 bool preferInLoopReduction(unsigned Opcode, Type *Ty,
2189 ReductionFlags Flags) const override {
2190 return Impl.preferInLoopReduction(Opcode, Ty, Flags);
2191 }
preferPredicatedReductionSelect(unsigned Opcode,Type * Ty,ReductionFlags Flags)2192 bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
2193 ReductionFlags Flags) const override {
2194 return Impl.preferPredicatedReductionSelect(Opcode, Ty, Flags);
2195 }
shouldExpandReduction(const IntrinsicInst * II)2196 bool shouldExpandReduction(const IntrinsicInst *II) const override {
2197 return Impl.shouldExpandReduction(II);
2198 }
2199
getGISelRematGlobalCost()2200 unsigned getGISelRematGlobalCost() const override {
2201 return Impl.getGISelRematGlobalCost();
2202 }
2203
supportsScalableVectors()2204 bool supportsScalableVectors() const override {
2205 return Impl.supportsScalableVectors();
2206 }
2207
hasActiveVectorLength()2208 bool hasActiveVectorLength() const override {
2209 return Impl.hasActiveVectorLength();
2210 }
2211
getInstructionLatency(const Instruction * I)2212 int getInstructionLatency(const Instruction *I) override {
2213 return Impl.getInstructionLatency(I);
2214 }
2215 };
2216
2217 template <typename T>
TargetTransformInfo(T Impl)2218 TargetTransformInfo::TargetTransformInfo(T Impl)
2219 : TTIImpl(new Model<T>(Impl)) {}
2220
2221 /// Analysis pass providing the \c TargetTransformInfo.
2222 ///
2223 /// The core idea of the TargetIRAnalysis is to expose an interface through
2224 /// which LLVM targets can analyze and provide information about the middle
2225 /// end's target-independent IR. This supports use cases such as target-aware
2226 /// cost modeling of IR constructs.
2227 ///
2228 /// This is a function analysis because much of the cost modeling for targets
2229 /// is done in a subtarget specific way and LLVM supports compiling different
2230 /// functions targeting different subtargets in order to support runtime
2231 /// dispatch according to the observed subtarget.
2232 class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
2233 public:
2234 typedef TargetTransformInfo Result;
2235
2236 /// Default construct a target IR analysis.
2237 ///
2238 /// This will use the module's datalayout to construct a baseline
2239 /// conservative TTI result.
2240 TargetIRAnalysis();
2241
2242 /// Construct an IR analysis pass around a target-provide callback.
2243 ///
2244 /// The callback will be called with a particular function for which the TTI
2245 /// is needed and must return a TTI object for that function.
2246 TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
2247
2248 // Value semantics. We spell out the constructors for MSVC.
TargetIRAnalysis(const TargetIRAnalysis & Arg)2249 TargetIRAnalysis(const TargetIRAnalysis &Arg)
2250 : TTICallback(Arg.TTICallback) {}
TargetIRAnalysis(TargetIRAnalysis && Arg)2251 TargetIRAnalysis(TargetIRAnalysis &&Arg)
2252 : TTICallback(std::move(Arg.TTICallback)) {}
2253 TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
2254 TTICallback = RHS.TTICallback;
2255 return *this;
2256 }
2257 TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
2258 TTICallback = std::move(RHS.TTICallback);
2259 return *this;
2260 }
2261
2262 Result run(const Function &F, FunctionAnalysisManager &);
2263
2264 private:
2265 friend AnalysisInfoMixin<TargetIRAnalysis>;
2266 static AnalysisKey Key;
2267
2268 /// The callback used to produce a result.
2269 ///
2270 /// We use a completely opaque callback so that targets can provide whatever
2271 /// mechanism they desire for constructing the TTI for a given function.
2272 ///
2273 /// FIXME: Should we really use std::function? It's relatively inefficient.
2274 /// It might be possible to arrange for even stateful callbacks to outlive
2275 /// the analysis and thus use a function_ref which would be lighter weight.
2276 /// This may also be less error prone as the callback is likely to reference
2277 /// the external TargetMachine, and that reference needs to never dangle.
2278 std::function<Result(const Function &)> TTICallback;
2279
2280 /// Helper function used as the callback in the default constructor.
2281 static Result getDefaultTTI(const Function &F);
2282 };
2283
2284 /// Wrapper pass for TargetTransformInfo.
2285 ///
2286 /// This pass can be constructed from a TTI object which it stores internally
2287 /// and is queried by passes.
2288 class TargetTransformInfoWrapperPass : public ImmutablePass {
2289 TargetIRAnalysis TIRA;
2290 Optional<TargetTransformInfo> TTI;
2291
2292 virtual void anchor();
2293
2294 public:
2295 static char ID;
2296
2297 /// We must provide a default constructor for the pass but it should
2298 /// never be used.
2299 ///
2300 /// Use the constructor below or call one of the creation routines.
2301 TargetTransformInfoWrapperPass();
2302
2303 explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
2304
2305 TargetTransformInfo &getTTI(const Function &F);
2306 };
2307
2308 /// Create an analysis pass wrapper around a TTI object.
2309 ///
2310 /// This analysis pass just holds the TTI instance and makes it available to
2311 /// clients.
2312 ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
2313
2314 } // namespace llvm
2315
2316 #endif
2317