1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file describes how to lower LLVM code to machine code. This has two 11 /// main components: 12 /// 13 /// 1. Which ValueTypes are natively supported by the target. 14 /// 2. Which operations are supported for supported ValueTypes. 15 /// 3. Cost thresholds for alternative implementations of certain operations. 16 /// 17 /// In addition it has a few other components, like information about FP 18 /// immediates. 19 /// 20 //===----------------------------------------------------------------------===// 21 22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H 23 #define LLVM_CODEGEN_TARGETLOWERING_H 24 25 #include "llvm/ADT/APInt.h" 26 #include "llvm/ADT/ArrayRef.h" 27 #include "llvm/ADT/DenseMap.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/ADT/StringRef.h" 30 #include "llvm/CodeGen/ComplexDeinterleavingPass.h" 31 #include "llvm/CodeGen/DAGCombine.h" 32 #include "llvm/CodeGen/ISDOpcodes.h" 33 #include "llvm/CodeGen/LowLevelTypeUtils.h" 34 #include "llvm/CodeGen/MachineValueType.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/RuntimeLibcalls.h" 37 #include "llvm/CodeGen/SelectionDAG.h" 38 #include "llvm/CodeGen/SelectionDAGNodes.h" 39 #include "llvm/CodeGen/TargetCallingConv.h" 40 #include "llvm/CodeGen/ValueTypes.h" 41 #include "llvm/IR/Attributes.h" 42 #include "llvm/IR/CallingConv.h" 43 #include "llvm/IR/DataLayout.h" 44 #include "llvm/IR/DerivedTypes.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/InlineAsm.h" 47 #include "llvm/IR/Instruction.h" 48 #include "llvm/IR/Instructions.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/Support/Alignment.h" 51 #include "llvm/Support/AtomicOrdering.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/ErrorHandling.h" 54 #include <algorithm> 55 #include <cassert> 56 #include <climits> 57 #include <cstdint> 58 #include <iterator> 59 #include <map> 60 #include <string> 61 #include <utility> 62 #include <vector> 63 64 namespace llvm { 65 66 class AssumptionCache; 67 class CCState; 68 class CCValAssign; 69 class Constant; 70 class FastISel; 71 class FunctionLoweringInfo; 72 class GlobalValue; 73 class Loop; 74 class GISelKnownBits; 75 class IntrinsicInst; 76 class IRBuilderBase; 77 struct KnownBits; 78 class LLVMContext; 79 class MachineBasicBlock; 80 class MachineFunction; 81 class MachineInstr; 82 class MachineJumpTableInfo; 83 class MachineLoop; 84 class MachineRegisterInfo; 85 class MCContext; 86 class MCExpr; 87 class Module; 88 class ProfileSummaryInfo; 89 class TargetLibraryInfo; 90 class TargetMachine; 91 class TargetRegisterClass; 92 class TargetRegisterInfo; 93 class TargetTransformInfo; 94 class Value; 95 96 namespace Sched { 97 98 enum Preference { 99 None, // No preference 100 Source, // Follow source order. 101 RegPressure, // Scheduling for lowest register pressure. 102 Hybrid, // Scheduling for both latency and register pressure. 103 ILP, // Scheduling for ILP in low register pressure mode. 104 VLIW, // Scheduling for VLIW targets. 105 Fast, // Fast suboptimal list scheduling 106 Linearize // Linearize DAG, no scheduling 107 }; 108 109 } // end namespace Sched 110 111 // MemOp models a memory operation, either memset or memcpy/memmove. 112 struct MemOp { 113 private: 114 // Shared 115 uint64_t Size; 116 bool DstAlignCanChange; // true if destination alignment can satisfy any 117 // constraint. 118 Align DstAlign; // Specified alignment of the memory operation. 119 120 bool AllowOverlap; 121 // memset only 122 bool IsMemset; // If setthis memory operation is a memset. 123 bool ZeroMemset; // If set clears out memory with zeros. 124 // memcpy only 125 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register 126 // constant so it does not need to be loaded. 127 Align SrcAlign; // Inferred alignment of the source or default value if the 128 // memory operation does not need to load the value. 129 public: 130 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, 131 Align SrcAlign, bool IsVolatile, 132 bool MemcpyStrSrc = false) { 133 MemOp Op; 134 Op.Size = Size; 135 Op.DstAlignCanChange = DstAlignCanChange; 136 Op.DstAlign = DstAlign; 137 Op.AllowOverlap = !IsVolatile; 138 Op.IsMemset = false; 139 Op.ZeroMemset = false; 140 Op.MemcpyStrSrc = MemcpyStrSrc; 141 Op.SrcAlign = SrcAlign; 142 return Op; 143 } 144 145 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, 146 bool IsZeroMemset, bool IsVolatile) { 147 MemOp Op; 148 Op.Size = Size; 149 Op.DstAlignCanChange = DstAlignCanChange; 150 Op.DstAlign = DstAlign; 151 Op.AllowOverlap = !IsVolatile; 152 Op.IsMemset = true; 153 Op.ZeroMemset = IsZeroMemset; 154 Op.MemcpyStrSrc = false; 155 return Op; 156 } 157 158 uint64_t size() const { return Size; } 159 Align getDstAlign() const { 160 assert(!DstAlignCanChange); 161 return DstAlign; 162 } 163 bool isFixedDstAlign() const { return !DstAlignCanChange; } 164 bool allowOverlap() const { return AllowOverlap; } 165 bool isMemset() const { return IsMemset; } 166 bool isMemcpy() const { return !IsMemset; } 167 bool isMemcpyWithFixedDstAlign() const { 168 return isMemcpy() && !DstAlignCanChange; 169 } 170 bool isZeroMemset() const { return isMemset() && ZeroMemset; } 171 bool isMemcpyStrSrc() const { 172 assert(isMemcpy() && "Must be a memcpy"); 173 return MemcpyStrSrc; 174 } 175 Align getSrcAlign() const { 176 assert(isMemcpy() && "Must be a memcpy"); 177 return SrcAlign; 178 } 179 bool isSrcAligned(Align AlignCheck) const { 180 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value()); 181 } 182 bool isDstAligned(Align AlignCheck) const { 183 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value()); 184 } 185 bool isAligned(Align AlignCheck) const { 186 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck); 187 } 188 }; 189 190 /// This base class for TargetLowering contains the SelectionDAG-independent 191 /// parts that can be used from the rest of CodeGen. 192 class TargetLoweringBase { 193 public: 194 /// This enum indicates whether operations are valid for a target, and if not, 195 /// what action should be used to make them valid. 196 enum LegalizeAction : uint8_t { 197 Legal, // The target natively supports this operation. 198 Promote, // This operation should be executed in a larger type. 199 Expand, // Try to expand this to other ops, otherwise use a libcall. 200 LibCall, // Don't try to expand this to other ops, always use a libcall. 201 Custom // Use the LowerOperation hook to implement custom lowering. 202 }; 203 204 /// This enum indicates whether a types are legal for a target, and if not, 205 /// what action should be used to make them valid. 206 enum LegalizeTypeAction : uint8_t { 207 TypeLegal, // The target natively supports this type. 208 TypePromoteInteger, // Replace this integer with a larger one. 209 TypeExpandInteger, // Split this integer into two of half the size. 210 TypeSoftenFloat, // Convert this float to a same size integer type. 211 TypeExpandFloat, // Split this float into two of half the size. 212 TypeScalarizeVector, // Replace this one-element vector with its element. 213 TypeSplitVector, // Split this vector into two of half the size. 214 TypeWidenVector, // This vector should be widened into a larger vector. 215 TypePromoteFloat, // Replace this float with a larger one. 216 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic. 217 TypeScalarizeScalableVector, // This action is explicitly left unimplemented. 218 // While it is theoretically possible to 219 // legalize operations on scalable types with a 220 // loop that handles the vscale * #lanes of the 221 // vector, this is non-trivial at SelectionDAG 222 // level and these types are better to be 223 // widened or promoted. 224 }; 225 226 /// LegalizeKind holds the legalization kind that needs to happen to EVT 227 /// in order to type-legalize it. 228 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>; 229 230 /// Enum that describes how the target represents true/false values. 231 enum BooleanContent { 232 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 233 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 234 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 235 }; 236 237 /// Enum that describes what type of support for selects the target has. 238 enum SelectSupportKind { 239 ScalarValSelect, // The target supports scalar selects (ex: cmov). 240 ScalarCondVectorVal, // The target supports selects with a scalar condition 241 // and vector values (ex: cmov). 242 VectorMaskSelect // The target supports vector selects with a vector 243 // mask (ex: x86 blends). 244 }; 245 246 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded 247 /// to, if at all. Exists because different targets have different levels of 248 /// support for these atomic instructions, and also have different options 249 /// w.r.t. what they should expand to. 250 enum class AtomicExpansionKind { 251 None, // Don't expand the instruction. 252 CastToInteger, // Cast the atomic instruction to another type, e.g. from 253 // floating-point to integer type. 254 LLSC, // Expand the instruction into loadlinked/storeconditional; used 255 // by ARM/AArch64. 256 LLOnly, // Expand the (load) instruction into just a load-linked, which has 257 // greater atomic guarantees than a normal load. 258 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86. 259 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop. 260 BitTestIntrinsic, // Use a target-specific intrinsic for special bit 261 // operations; used by X86. 262 CmpArithIntrinsic,// Use a target-specific intrinsic for special compare 263 // operations; used by X86. 264 Expand, // Generic expansion in terms of other atomic operations. 265 266 // Rewrite to a non-atomic form for use in a known non-preemptible 267 // environment. 268 NotAtomic 269 }; 270 271 /// Enum that specifies when a multiplication should be expanded. 272 enum class MulExpansionKind { 273 Always, // Always expand the instruction. 274 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal 275 // or custom. 276 }; 277 278 /// Enum that specifies when a float negation is beneficial. 279 enum class NegatibleCost { 280 Cheaper = 0, // Negated expression is cheaper. 281 Neutral = 1, // Negated expression has the same cost. 282 Expensive = 2 // Negated expression is more expensive. 283 }; 284 285 /// Enum of different potentially desirable ways to fold (and/or (setcc ...), 286 /// (setcc ...)). 287 enum AndOrSETCCFoldKind : uint8_t { 288 None = 0, // No fold is preferable. 289 AddAnd = 1, // Fold with `Add` op and `And` op is preferable. 290 NotAnd = 2, // Fold with `Not` op and `And` op is preferable. 291 ABS = 4, // Fold with `llvm.abs` op is preferable. 292 }; 293 294 class ArgListEntry { 295 public: 296 Value *Val = nullptr; 297 SDValue Node = SDValue(); 298 Type *Ty = nullptr; 299 bool IsSExt : 1; 300 bool IsZExt : 1; 301 bool IsInReg : 1; 302 bool IsSRet : 1; 303 bool IsNest : 1; 304 bool IsByVal : 1; 305 bool IsByRef : 1; 306 bool IsInAlloca : 1; 307 bool IsPreallocated : 1; 308 bool IsReturned : 1; 309 bool IsSwiftSelf : 1; 310 bool IsSwiftAsync : 1; 311 bool IsSwiftError : 1; 312 bool IsCFGuardTarget : 1; 313 MaybeAlign Alignment = std::nullopt; 314 Type *IndirectType = nullptr; 315 316 ArgListEntry() 317 : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false), 318 IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false), 319 IsPreallocated(false), IsReturned(false), IsSwiftSelf(false), 320 IsSwiftAsync(false), IsSwiftError(false), IsCFGuardTarget(false) {} 321 322 void setAttributes(const CallBase *Call, unsigned ArgIdx); 323 }; 324 using ArgListTy = std::vector<ArgListEntry>; 325 326 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, 327 ArgListTy &Args) const {}; 328 329 static ISD::NodeType getExtendForContent(BooleanContent Content) { 330 switch (Content) { 331 case UndefinedBooleanContent: 332 // Extend by adding rubbish bits. 333 return ISD::ANY_EXTEND; 334 case ZeroOrOneBooleanContent: 335 // Extend by adding zero bits. 336 return ISD::ZERO_EXTEND; 337 case ZeroOrNegativeOneBooleanContent: 338 // Extend by copying the sign bit. 339 return ISD::SIGN_EXTEND; 340 } 341 llvm_unreachable("Invalid content kind"); 342 } 343 344 explicit TargetLoweringBase(const TargetMachine &TM); 345 TargetLoweringBase(const TargetLoweringBase &) = delete; 346 TargetLoweringBase &operator=(const TargetLoweringBase &) = delete; 347 virtual ~TargetLoweringBase() = default; 348 349 /// Return true if the target support strict float operation 350 bool isStrictFPEnabled() const { 351 return IsStrictFPEnabled; 352 } 353 354 protected: 355 /// Initialize all of the actions to default values. 356 void initActions(); 357 358 public: 359 const TargetMachine &getTargetMachine() const { return TM; } 360 361 virtual bool useSoftFloat() const { return false; } 362 363 /// Return the pointer type for the given address space, defaults to 364 /// the pointer type from the data layout. 365 /// FIXME: The default needs to be removed once all the code is updated. 366 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const { 367 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 368 } 369 370 /// Return the in-memory pointer type for the given address space, defaults to 371 /// the pointer type from the data layout. FIXME: The default needs to be 372 /// removed once all the code is updated. 373 virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const { 374 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 375 } 376 377 /// Return the type for frame index, which is determined by 378 /// the alloca address space specified through the data layout. 379 MVT getFrameIndexTy(const DataLayout &DL) const { 380 return getPointerTy(DL, DL.getAllocaAddrSpace()); 381 } 382 383 /// Return the type for code pointers, which is determined by the program 384 /// address space specified through the data layout. 385 MVT getProgramPointerTy(const DataLayout &DL) const { 386 return getPointerTy(DL, DL.getProgramAddressSpace()); 387 } 388 389 /// Return the type for operands of fence. 390 /// TODO: Let fence operands be of i32 type and remove this. 391 virtual MVT getFenceOperandTy(const DataLayout &DL) const { 392 return getPointerTy(DL); 393 } 394 395 /// Return the type to use for a scalar shift opcode, given the shifted amount 396 /// type. Targets should return a legal type if the input type is legal. 397 /// Targets can return a type that is too small if the input type is illegal. 398 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const; 399 400 /// Returns the type for the shift amount of a shift opcode. For vectors, 401 /// returns the input type. For scalars, behavior depends on \p LegalTypes. If 402 /// \p LegalTypes is true, calls getScalarShiftAmountTy, otherwise uses 403 /// pointer type. If getScalarShiftAmountTy or pointer type cannot represent 404 /// all possible shift amounts, returns MVT::i32. In general, \p LegalTypes 405 /// should be set to true for calls during type legalization and after type 406 /// legalization has been completed. 407 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, 408 bool LegalTypes = true) const; 409 410 /// Return the preferred type to use for a shift opcode, given the shifted 411 /// amount type is \p ShiftValueTy. 412 LLVM_READONLY 413 virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const { 414 return ShiftValueTy; 415 } 416 417 /// Returns the type to be used for the index operand of: 418 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, 419 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR 420 virtual MVT getVectorIdxTy(const DataLayout &DL) const { 421 return getPointerTy(DL); 422 } 423 424 /// Returns the type to be used for the EVL/AVL operand of VP nodes: 425 /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type, 426 /// and must be at least as large as i32. The EVL is implicitly zero-extended 427 /// to any larger type. 428 virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; } 429 430 /// This callback is used to inspect load/store instructions and add 431 /// target-specific MachineMemOperand flags to them. The default 432 /// implementation does nothing. 433 virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const { 434 return MachineMemOperand::MONone; 435 } 436 437 /// This callback is used to inspect load/store SDNode. 438 /// The default implementation does nothing. 439 virtual MachineMemOperand::Flags 440 getTargetMMOFlags(const MemSDNode &Node) const { 441 return MachineMemOperand::MONone; 442 } 443 444 MachineMemOperand::Flags 445 getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, 446 AssumptionCache *AC = nullptr, 447 const TargetLibraryInfo *LibInfo = nullptr) const; 448 MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, 449 const DataLayout &DL) const; 450 MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, 451 const DataLayout &DL) const; 452 453 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const { 454 return true; 455 } 456 457 /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded 458 /// using generic code in SelectionDAGBuilder. 459 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const { 460 return true; 461 } 462 463 virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, 464 bool IsScalable) const { 465 return true; 466 } 467 468 // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to 469 // vecreduce(op(x, y)) for the reduction opcode RedOpc. 470 virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const { 471 return true; 472 } 473 474 /// Return true if it is profitable to convert a select of FP constants into 475 /// a constant pool load whose address depends on the select condition. The 476 /// parameter may be used to differentiate a select with FP compare from 477 /// integer compare. 478 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const { 479 return true; 480 } 481 482 /// Return true if multiple condition registers are available. 483 bool hasMultipleConditionRegisters() const { 484 return HasMultipleConditionRegisters; 485 } 486 487 /// Return true if the target has BitExtract instructions. 488 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; } 489 490 /// Return the preferred vector type legalization action. 491 virtual TargetLoweringBase::LegalizeTypeAction 492 getPreferredVectorAction(MVT VT) const { 493 // The default action for one element vectors is to scalarize 494 if (VT.getVectorElementCount().isScalar()) 495 return TypeScalarizeVector; 496 // The default action for an odd-width vector is to widen. 497 if (!VT.isPow2VectorType()) 498 return TypeWidenVector; 499 // The default action for other vectors is to promote 500 return TypePromoteInteger; 501 } 502 503 // Return true if the half type should be passed around as i16, but promoted 504 // to float around arithmetic. The default behavior is to pass around as 505 // float and convert around loads/stores/bitcasts and other places where 506 // the size matters. 507 virtual bool softPromoteHalfType() const { return false; } 508 509 // There are two general methods for expanding a BUILD_VECTOR node: 510 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle 511 // them together. 512 // 2. Build the vector on the stack and then load it. 513 // If this function returns true, then method (1) will be used, subject to 514 // the constraint that all of the necessary shuffles are legal (as determined 515 // by isShuffleMaskLegal). If this function returns false, then method (2) is 516 // always used. The vector type, and the number of defined values, are 517 // provided. 518 virtual bool 519 shouldExpandBuildVectorWithShuffles(EVT /* VT */, 520 unsigned DefinedValues) const { 521 return DefinedValues < 3; 522 } 523 524 /// Return true if integer divide is usually cheaper than a sequence of 525 /// several shifts, adds, and multiplies for this target. 526 /// The definition of "cheaper" may depend on whether we're optimizing 527 /// for speed or for size. 528 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; } 529 530 /// Return true if the target can handle a standalone remainder operation. 531 virtual bool hasStandaloneRem(EVT VT) const { 532 return true; 533 } 534 535 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X). 536 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const { 537 // Default behavior is to replace SQRT(X) with X*RSQRT(X). 538 return false; 539 } 540 541 /// Reciprocal estimate status values used by the functions below. 542 enum ReciprocalEstimate : int { 543 Unspecified = -1, 544 Disabled = 0, 545 Enabled = 1 546 }; 547 548 /// Return a ReciprocalEstimate enum value for a square root of the given type 549 /// based on the function's attributes. If the operation is not overridden by 550 /// the function's attributes, "Unspecified" is returned and target defaults 551 /// are expected to be used for instruction selection. 552 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const; 553 554 /// Return a ReciprocalEstimate enum value for a division of the given type 555 /// based on the function's attributes. If the operation is not overridden by 556 /// the function's attributes, "Unspecified" is returned and target defaults 557 /// are expected to be used for instruction selection. 558 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const; 559 560 /// Return the refinement step count for a square root of the given type based 561 /// on the function's attributes. If the operation is not overridden by 562 /// the function's attributes, "Unspecified" is returned and target defaults 563 /// are expected to be used for instruction selection. 564 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const; 565 566 /// Return the refinement step count for a division of the given type based 567 /// on the function's attributes. If the operation is not overridden by 568 /// the function's attributes, "Unspecified" is returned and target defaults 569 /// are expected to be used for instruction selection. 570 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const; 571 572 /// Returns true if target has indicated at least one type should be bypassed. 573 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } 574 575 /// Returns map of slow types for division or remainder with corresponding 576 /// fast types 577 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const { 578 return BypassSlowDivWidths; 579 } 580 581 /// Return true only if vscale must be a power of two. 582 virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; } 583 584 /// Return true if Flow Control is an expensive operation that should be 585 /// avoided. 586 bool isJumpExpensive() const { return JumpIsExpensive; } 587 588 /// Return true if selects are only cheaper than branches if the branch is 589 /// unlikely to be predicted right. 590 bool isPredictableSelectExpensive() const { 591 return PredictableSelectIsExpensive; 592 } 593 594 virtual bool fallBackToDAGISel(const Instruction &Inst) const { 595 return false; 596 } 597 598 /// Return true if the following transform is beneficial: 599 /// fold (conv (load x)) -> (load (conv*)x) 600 /// On architectures that don't natively support some vector loads 601 /// efficiently, casting the load to a smaller vector of larger types and 602 /// loading is more efficient, however, this can be undone by optimizations in 603 /// dag combiner. 604 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, 605 const SelectionDAG &DAG, 606 const MachineMemOperand &MMO) const; 607 608 /// Return true if the following transform is beneficial: 609 /// (store (y (conv x)), y*)) -> (store x, (x*)) 610 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, 611 const SelectionDAG &DAG, 612 const MachineMemOperand &MMO) const { 613 // Default to the same logic as loads. 614 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO); 615 } 616 617 /// Return true if it is expected to be cheaper to do a store of vector 618 /// constant with the given size and type for the address space than to 619 /// store the individual scalar element constants. 620 virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, 621 unsigned NumElem, 622 unsigned AddrSpace) const { 623 return IsZero; 624 } 625 626 /// Allow store merging for the specified type after legalization in addition 627 /// to before legalization. This may transform stores that do not exist 628 /// earlier (for example, stores created from intrinsics). 629 virtual bool mergeStoresAfterLegalization(EVT MemVT) const { 630 return true; 631 } 632 633 /// Returns if it's reasonable to merge stores to MemVT size. 634 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, 635 const MachineFunction &MF) const { 636 return true; 637 } 638 639 /// Return true if it is cheap to speculate a call to intrinsic cttz. 640 virtual bool isCheapToSpeculateCttz(Type *Ty) const { 641 return false; 642 } 643 644 /// Return true if it is cheap to speculate a call to intrinsic ctlz. 645 virtual bool isCheapToSpeculateCtlz(Type *Ty) const { 646 return false; 647 } 648 649 /// Return true if ctlz instruction is fast. 650 virtual bool isCtlzFast() const { 651 return false; 652 } 653 654 /// Return the maximum number of "x & (x - 1)" operations that can be done 655 /// instead of deferring to a custom CTPOP. 656 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const { 657 return 1; 658 } 659 660 /// Return true if instruction generated for equality comparison is folded 661 /// with instruction generated for signed comparison. 662 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; } 663 664 /// Return true if the heuristic to prefer icmp eq zero should be used in code 665 /// gen prepare. 666 virtual bool preferZeroCompareBranch() const { return false; } 667 668 /// Return true if it is cheaper to split the store of a merged int val 669 /// from a pair of smaller values into multiple stores. 670 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const { 671 return false; 672 } 673 674 /// Return if the target supports combining a 675 /// chain like: 676 /// \code 677 /// %andResult = and %val1, #mask 678 /// %icmpResult = icmp %andResult, 0 679 /// \endcode 680 /// into a single machine instruction of a form like: 681 /// \code 682 /// cc = test %register, #mask 683 /// \endcode 684 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 685 return false; 686 } 687 688 /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes. 689 virtual bool 690 areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX, 691 const MemSDNode &NodeY) const { 692 return true; 693 } 694 695 /// Use bitwise logic to make pairs of compares more efficient. For example: 696 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0 697 /// This should be true when it takes more than one instruction to lower 698 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on 699 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win. 700 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const { 701 return false; 702 } 703 704 /// Return the preferred operand type if the target has a quick way to compare 705 /// integer values of the given size. Assume that any legal integer type can 706 /// be compared efficiently. Targets may override this to allow illegal wide 707 /// types to return a vector type if there is support to compare that type. 708 virtual MVT hasFastEqualityCompare(unsigned NumBits) const { 709 MVT VT = MVT::getIntegerVT(NumBits); 710 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE; 711 } 712 713 /// Return true if the target should transform: 714 /// (X & Y) == Y ---> (~X & Y) == 0 715 /// (X & Y) != Y ---> (~X & Y) != 0 716 /// 717 /// This may be profitable if the target has a bitwise and-not operation that 718 /// sets comparison flags. A target may want to limit the transformation based 719 /// on the type of Y or if Y is a constant. 720 /// 721 /// Note that the transform will not occur if Y is known to be a power-of-2 722 /// because a mask and compare of a single bit can be handled by inverting the 723 /// predicate, for example: 724 /// (X & 8) == 8 ---> (X & 8) != 0 725 virtual bool hasAndNotCompare(SDValue Y) const { 726 return false; 727 } 728 729 /// Return true if the target has a bitwise and-not operation: 730 /// X = ~A & B 731 /// This can be used to simplify select or other instructions. 732 virtual bool hasAndNot(SDValue X) const { 733 // If the target has the more complex version of this operation, assume that 734 // it has this operation too. 735 return hasAndNotCompare(X); 736 } 737 738 /// Return true if the target has a bit-test instruction: 739 /// (X & (1 << Y)) ==/!= 0 740 /// This knowledge can be used to prevent breaking the pattern, 741 /// or creating it if it could be recognized. 742 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; } 743 744 /// There are two ways to clear extreme bits (either low or high): 745 /// Mask: x & (-1 << y) (the instcombine canonical form) 746 /// Shifts: x >> y << y 747 /// Return true if the variant with 2 variable shifts is preferred. 748 /// Return false if there is no preference. 749 virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const { 750 // By default, let's assume that no one prefers shifts. 751 return false; 752 } 753 754 /// Return true if it is profitable to fold a pair of shifts into a mask. 755 /// This is usually true on most targets. But some targets, like Thumb1, 756 /// have immediate shift instructions, but no immediate "and" instruction; 757 /// this makes the fold unprofitable. 758 virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, 759 CombineLevel Level) const { 760 return true; 761 } 762 763 /// Should we tranform the IR-optimal check for whether given truncation 764 /// down into KeptBits would be truncating or not: 765 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 766 /// Into it's more traditional form: 767 /// ((%x << C) a>> C) dstcond %x 768 /// Return true if we should transform. 769 /// Return false if there is no preference. 770 virtual bool shouldTransformSignedTruncationCheck(EVT XVT, 771 unsigned KeptBits) const { 772 // By default, let's assume that no one prefers shifts. 773 return false; 774 } 775 776 /// Given the pattern 777 /// (X & (C l>>/<< Y)) ==/!= 0 778 /// return true if it should be transformed into: 779 /// ((X <</l>> Y) & C) ==/!= 0 780 /// WARNING: if 'X' is a constant, the fold may deadlock! 781 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat() 782 /// here because it can end up being not linked in. 783 virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 784 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, 785 unsigned OldShiftOpcode, unsigned NewShiftOpcode, 786 SelectionDAG &DAG) const { 787 if (hasBitTest(X, Y)) { 788 // One interesting pattern that we'd want to form is 'bit test': 789 // ((1 << Y) & C) ==/!= 0 790 // But we also need to be careful not to try to reverse that fold. 791 792 // Is this '1 << Y' ? 793 if (OldShiftOpcode == ISD::SHL && CC->isOne()) 794 return false; // Keep the 'bit test' pattern. 795 796 // Will it be '1 << Y' after the transform ? 797 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne()) 798 return true; // Do form the 'bit test' pattern. 799 } 800 801 // If 'X' is a constant, and we transform, then we will immediately 802 // try to undo the fold, thus causing endless combine loop. 803 // So by default, let's assume everyone prefers the fold 804 // iff 'X' is not a constant. 805 return !XC; 806 } 807 808 /// These two forms are equivalent: 809 /// sub %y, (xor %x, -1) 810 /// add (add %x, 1), %y 811 /// The variant with two add's is IR-canonical. 812 /// Some targets may prefer one to the other. 813 virtual bool preferIncOfAddToSubOfNot(EVT VT) const { 814 // By default, let's assume that everyone prefers the form with two add's. 815 return true; 816 } 817 818 // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets 819 // may want to avoid this to prevent loss of sub_nsw pattern. 820 virtual bool preferABDSToABSWithNSW(EVT VT) const { 821 return true; 822 } 823 824 // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X)) 825 virtual bool preferScalarizeSplat(SDNode *N) const { return true; } 826 827 /// Return true if the target wants to use the optimization that 828 /// turns ext(promotableInst1(...(promotableInstN(load)))) into 829 /// promotedInst1(...(promotedInstN(ext(load)))). 830 bool enableExtLdPromotion() const { return EnableExtLdPromotion; } 831 832 /// Return true if the target can combine store(extractelement VectorTy, 833 /// Idx). 834 /// \p Cost[out] gives the cost of that transformation when this is true. 835 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 836 unsigned &Cost) const { 837 return false; 838 } 839 840 /// Return true if inserting a scalar into a variable element of an undef 841 /// vector is more efficiently handled by splatting the scalar instead. 842 virtual bool shouldSplatInsEltVarIndex(EVT) const { 843 return false; 844 } 845 846 /// Return true if target always benefits from combining into FMA for a 847 /// given value type. This must typically return false on targets where FMA 848 /// takes more cycles to execute than FADD. 849 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; } 850 851 /// Return true if target always benefits from combining into FMA for a 852 /// given value type. This must typically return false on targets where FMA 853 /// takes more cycles to execute than FADD. 854 virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; } 855 856 /// Return the ValueType of the result of SETCC operations. 857 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 858 EVT VT) const; 859 860 /// Return the ValueType for comparison libcalls. Comparison libcalls include 861 /// floating point comparison calls, and Ordered/Unordered check calls on 862 /// floating point numbers. 863 virtual 864 MVT::SimpleValueType getCmpLibcallReturnType() const; 865 866 /// For targets without i1 registers, this gives the nature of the high-bits 867 /// of boolean values held in types wider than i1. 868 /// 869 /// "Boolean values" are special true/false values produced by nodes like 870 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 871 /// Not to be confused with general values promoted from i1. Some cpus 872 /// distinguish between vectors of boolean and scalars; the isVec parameter 873 /// selects between the two kinds. For example on X86 a scalar boolean should 874 /// be zero extended from i1, while the elements of a vector of booleans 875 /// should be sign extended from i1. 876 /// 877 /// Some cpus also treat floating point types the same way as they treat 878 /// vectors instead of the way they treat scalars. 879 BooleanContent getBooleanContents(bool isVec, bool isFloat) const { 880 if (isVec) 881 return BooleanVectorContents; 882 return isFloat ? BooleanFloatContents : BooleanContents; 883 } 884 885 BooleanContent getBooleanContents(EVT Type) const { 886 return getBooleanContents(Type.isVector(), Type.isFloatingPoint()); 887 } 888 889 /// Promote the given target boolean to a target boolean of the given type. 890 /// A target boolean is an integer value, not necessarily of type i1, the bits 891 /// of which conform to getBooleanContents. 892 /// 893 /// ValVT is the type of values that produced the boolean. 894 SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool, 895 EVT ValVT) const { 896 SDLoc dl(Bool); 897 EVT BoolVT = 898 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT); 899 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(ValVT)); 900 return DAG.getNode(ExtendCode, dl, BoolVT, Bool); 901 } 902 903 /// Return target scheduling preference. 904 Sched::Preference getSchedulingPreference() const { 905 return SchedPreferenceInfo; 906 } 907 908 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics 909 /// for different nodes. This function returns the preference (or none) for 910 /// the given node. 911 virtual Sched::Preference getSchedulingPreference(SDNode *) const { 912 return Sched::None; 913 } 914 915 /// Return the register class that should be used for the specified value 916 /// type. 917 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const { 918 (void)isDivergent; 919 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 920 assert(RC && "This value type is not natively supported!"); 921 return RC; 922 } 923 924 /// Allows target to decide about the register class of the 925 /// specific value that is live outside the defining block. 926 /// Returns true if the value needs uniform register class. 927 virtual bool requiresUniformRegister(MachineFunction &MF, 928 const Value *) const { 929 return false; 930 } 931 932 /// Return the 'representative' register class for the specified value 933 /// type. 934 /// 935 /// The 'representative' register class is the largest legal super-reg 936 /// register class for the register class of the value type. For example, on 937 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep 938 /// register class is GR64 on x86_64. 939 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const { 940 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy]; 941 return RC; 942 } 943 944 /// Return the cost of the 'representative' register class for the specified 945 /// value type. 946 virtual uint8_t getRepRegClassCostFor(MVT VT) const { 947 return RepRegClassCostForVT[VT.SimpleTy]; 948 } 949 950 /// Return the preferred strategy to legalize tihs SHIFT instruction, with 951 /// \p ExpansionFactor being the recursion depth - how many expansion needed. 952 enum class ShiftLegalizationStrategy { 953 ExpandToParts, 954 ExpandThroughStack, 955 LowerToLibcall 956 }; 957 virtual ShiftLegalizationStrategy 958 preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, 959 unsigned ExpansionFactor) const { 960 if (ExpansionFactor == 1) 961 return ShiftLegalizationStrategy::ExpandToParts; 962 return ShiftLegalizationStrategy::ExpandThroughStack; 963 } 964 965 /// Return true if the target has native support for the specified value type. 966 /// This means that it has a register that directly holds it without 967 /// promotions or expansions. 968 bool isTypeLegal(EVT VT) const { 969 assert(!VT.isSimple() || 970 (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT)); 971 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr; 972 } 973 974 class ValueTypeActionImpl { 975 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum 976 /// that indicates how instruction selection should deal with the type. 977 LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE]; 978 979 public: 980 ValueTypeActionImpl() { 981 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 982 TypeLegal); 983 } 984 985 LegalizeTypeAction getTypeAction(MVT VT) const { 986 return ValueTypeActions[VT.SimpleTy]; 987 } 988 989 void setTypeAction(MVT VT, LegalizeTypeAction Action) { 990 ValueTypeActions[VT.SimpleTy] = Action; 991 } 992 }; 993 994 const ValueTypeActionImpl &getValueTypeActions() const { 995 return ValueTypeActions; 996 } 997 998 /// Return pair that represents the legalization kind (first) that needs to 999 /// happen to EVT (second) in order to type-legalize it. 1000 /// 1001 /// First: how we should legalize values of this type, either it is already 1002 /// legal (return 'Legal') or we need to promote it to a larger type (return 1003 /// 'Promote'), or we need to expand it into multiple registers of smaller 1004 /// integer type (return 'Expand'). 'Custom' is not an option. 1005 /// 1006 /// Second: for types supported by the target, this is an identity function. 1007 /// For types that must be promoted to larger types, this returns the larger 1008 /// type to promote to. For integer types that are larger than the largest 1009 /// integer register, this contains one step in the expansion to get to the 1010 /// smaller register. For illegal floating point types, this returns the 1011 /// integer type to transform to. 1012 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const; 1013 1014 /// Return how we should legalize values of this type, either it is already 1015 /// legal (return 'Legal') or we need to promote it to a larger type (return 1016 /// 'Promote'), or we need to expand it into multiple registers of smaller 1017 /// integer type (return 'Expand'). 'Custom' is not an option. 1018 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { 1019 return getTypeConversion(Context, VT).first; 1020 } 1021 LegalizeTypeAction getTypeAction(MVT VT) const { 1022 return ValueTypeActions.getTypeAction(VT); 1023 } 1024 1025 /// For types supported by the target, this is an identity function. For 1026 /// types that must be promoted to larger types, this returns the larger type 1027 /// to promote to. For integer types that are larger than the largest integer 1028 /// register, this contains one step in the expansion to get to the smaller 1029 /// register. For illegal floating point types, this returns the integer type 1030 /// to transform to. 1031 virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { 1032 return getTypeConversion(Context, VT).second; 1033 } 1034 1035 /// For types supported by the target, this is an identity function. For 1036 /// types that must be expanded (i.e. integer types that are larger than the 1037 /// largest integer register or illegal floating point types), this returns 1038 /// the largest legal type it will be expanded to. 1039 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { 1040 assert(!VT.isVector()); 1041 while (true) { 1042 switch (getTypeAction(Context, VT)) { 1043 case TypeLegal: 1044 return VT; 1045 case TypeExpandInteger: 1046 VT = getTypeToTransformTo(Context, VT); 1047 break; 1048 default: 1049 llvm_unreachable("Type is not legal nor is it to be expanded!"); 1050 } 1051 } 1052 } 1053 1054 /// Vector types are broken down into some number of legal first class types. 1055 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8 1056 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64 1057 /// turns into 4 EVT::i32 values with both PPC and X86. 1058 /// 1059 /// This method returns the number of registers needed, and the VT for each 1060 /// register. It also returns the VT and quantity of the intermediate values 1061 /// before they are promoted/expanded. 1062 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 1063 EVT &IntermediateVT, 1064 unsigned &NumIntermediates, 1065 MVT &RegisterVT) const; 1066 1067 /// Certain targets such as MIPS require that some types such as vectors are 1068 /// always broken down into scalars in some contexts. This occurs even if the 1069 /// vector type is legal. 1070 virtual unsigned getVectorTypeBreakdownForCallingConv( 1071 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, 1072 unsigned &NumIntermediates, MVT &RegisterVT) const { 1073 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates, 1074 RegisterVT); 1075 } 1076 1077 struct IntrinsicInfo { 1078 unsigned opc = 0; // target opcode 1079 EVT memVT; // memory VT 1080 1081 // value representing memory location 1082 PointerUnion<const Value *, const PseudoSourceValue *> ptrVal; 1083 1084 // Fallback address space for use if ptrVal is nullptr. std::nullopt means 1085 // unknown address space. 1086 std::optional<unsigned> fallbackAddressSpace; 1087 1088 int offset = 0; // offset off of ptrVal 1089 uint64_t size = 0; // the size of the memory location 1090 // (taken from memVT if zero) 1091 MaybeAlign align = Align(1); // alignment 1092 1093 MachineMemOperand::Flags flags = MachineMemOperand::MONone; 1094 IntrinsicInfo() = default; 1095 }; 1096 1097 /// Given an intrinsic, checks if on the target the intrinsic will need to map 1098 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns 1099 /// true and store the intrinsic information into the IntrinsicInfo that was 1100 /// passed to the function. 1101 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, 1102 MachineFunction &, 1103 unsigned /*Intrinsic*/) const { 1104 return false; 1105 } 1106 1107 /// Returns true if the target can instruction select the specified FP 1108 /// immediate natively. If false, the legalizer will materialize the FP 1109 /// immediate as a load from a constant pool. 1110 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/, 1111 bool ForCodeSize = false) const { 1112 return false; 1113 } 1114 1115 /// Targets can use this to indicate that they only support *some* 1116 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a 1117 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be 1118 /// legal. 1119 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const { 1120 return true; 1121 } 1122 1123 /// Returns true if the operation can trap for the value type. 1124 /// 1125 /// VT must be a legal type. By default, we optimistically assume most 1126 /// operations don't trap except for integer divide and remainder. 1127 virtual bool canOpTrap(unsigned Op, EVT VT) const; 1128 1129 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there 1130 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a 1131 /// constant pool entry. 1132 virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/, 1133 EVT /*VT*/) const { 1134 return false; 1135 } 1136 1137 /// How to legalize this custom operation? 1138 virtual LegalizeAction getCustomOperationAction(SDNode &Op) const { 1139 return Legal; 1140 } 1141 1142 /// Return how this operation should be treated: either it is legal, needs to 1143 /// be promoted to a larger size, needs to be expanded to some other code 1144 /// sequence, or the target has a custom expander for it. 1145 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { 1146 if (VT.isExtended()) return Expand; 1147 // If a target-specific SDNode requires legalization, require the target 1148 // to provide custom legalization for it. 1149 if (Op >= std::size(OpActions[0])) 1150 return Custom; 1151 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op]; 1152 } 1153 1154 /// Custom method defined by each target to indicate if an operation which 1155 /// may require a scale is supported natively by the target. 1156 /// If not, the operation is illegal. 1157 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT, 1158 unsigned Scale) const { 1159 return false; 1160 } 1161 1162 /// Some fixed point operations may be natively supported by the target but 1163 /// only for specific scales. This method allows for checking 1164 /// if the width is supported by the target for a given operation that may 1165 /// depend on scale. 1166 LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, 1167 unsigned Scale) const { 1168 auto Action = getOperationAction(Op, VT); 1169 if (Action != Legal) 1170 return Action; 1171 1172 // This operation is supported in this type but may only work on specific 1173 // scales. 1174 bool Supported; 1175 switch (Op) { 1176 default: 1177 llvm_unreachable("Unexpected fixed point operation."); 1178 case ISD::SMULFIX: 1179 case ISD::SMULFIXSAT: 1180 case ISD::UMULFIX: 1181 case ISD::UMULFIXSAT: 1182 case ISD::SDIVFIX: 1183 case ISD::SDIVFIXSAT: 1184 case ISD::UDIVFIX: 1185 case ISD::UDIVFIXSAT: 1186 Supported = isSupportedFixedPointOperation(Op, VT, Scale); 1187 break; 1188 } 1189 1190 return Supported ? Action : Expand; 1191 } 1192 1193 // If Op is a strict floating-point operation, return the result 1194 // of getOperationAction for the equivalent non-strict operation. 1195 LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const { 1196 unsigned EqOpc; 1197 switch (Op) { 1198 default: llvm_unreachable("Unexpected FP pseudo-opcode"); 1199 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 1200 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break; 1201 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 1202 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break; 1203 #include "llvm/IR/ConstrainedOps.def" 1204 } 1205 1206 return getOperationAction(EqOpc, VT); 1207 } 1208 1209 /// Return true if the specified operation is legal on this target or can be 1210 /// made legal with custom lowering. This is used to help guide high-level 1211 /// lowering decisions. LegalOnly is an optional convenience for code paths 1212 /// traversed pre and post legalisation. 1213 bool isOperationLegalOrCustom(unsigned Op, EVT VT, 1214 bool LegalOnly = false) const { 1215 if (LegalOnly) 1216 return isOperationLegal(Op, VT); 1217 1218 return (VT == MVT::Other || isTypeLegal(VT)) && 1219 (getOperationAction(Op, VT) == Legal || 1220 getOperationAction(Op, VT) == Custom); 1221 } 1222 1223 /// Return true if the specified operation is legal on this target or can be 1224 /// made legal using promotion. This is used to help guide high-level lowering 1225 /// decisions. LegalOnly is an optional convenience for code paths traversed 1226 /// pre and post legalisation. 1227 bool isOperationLegalOrPromote(unsigned Op, EVT VT, 1228 bool LegalOnly = false) const { 1229 if (LegalOnly) 1230 return isOperationLegal(Op, VT); 1231 1232 return (VT == MVT::Other || isTypeLegal(VT)) && 1233 (getOperationAction(Op, VT) == Legal || 1234 getOperationAction(Op, VT) == Promote); 1235 } 1236 1237 /// Return true if the specified operation is legal on this target or can be 1238 /// made legal with custom lowering or using promotion. This is used to help 1239 /// guide high-level lowering decisions. LegalOnly is an optional convenience 1240 /// for code paths traversed pre and post legalisation. 1241 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, 1242 bool LegalOnly = false) const { 1243 if (LegalOnly) 1244 return isOperationLegal(Op, VT); 1245 1246 return (VT == MVT::Other || isTypeLegal(VT)) && 1247 (getOperationAction(Op, VT) == Legal || 1248 getOperationAction(Op, VT) == Custom || 1249 getOperationAction(Op, VT) == Promote); 1250 } 1251 1252 /// Return true if the operation uses custom lowering, regardless of whether 1253 /// the type is legal or not. 1254 bool isOperationCustom(unsigned Op, EVT VT) const { 1255 return getOperationAction(Op, VT) == Custom; 1256 } 1257 1258 /// Return true if lowering to a jump table is allowed. 1259 virtual bool areJTsAllowed(const Function *Fn) const { 1260 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool()) 1261 return false; 1262 1263 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 1264 isOperationLegalOrCustom(ISD::BRIND, MVT::Other); 1265 } 1266 1267 /// Check whether the range [Low,High] fits in a machine word. 1268 bool rangeFitsInWord(const APInt &Low, const APInt &High, 1269 const DataLayout &DL) const { 1270 // FIXME: Using the pointer type doesn't seem ideal. 1271 uint64_t BW = DL.getIndexSizeInBits(0u); 1272 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1; 1273 return Range <= BW; 1274 } 1275 1276 /// Return true if lowering to a jump table is suitable for a set of case 1277 /// clusters which may contain \p NumCases cases, \p Range range of values. 1278 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, 1279 uint64_t Range, ProfileSummaryInfo *PSI, 1280 BlockFrequencyInfo *BFI) const; 1281 1282 /// Returns preferred type for switch condition. 1283 virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, 1284 EVT ConditionVT) const; 1285 1286 /// Return true if lowering to a bit test is suitable for a set of case 1287 /// clusters which contains \p NumDests unique destinations, \p Low and 1288 /// \p High as its lowest and highest case values, and expects \p NumCmps 1289 /// case value comparisons. Check if the number of destinations, comparison 1290 /// metric, and range are all suitable. 1291 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, 1292 const APInt &Low, const APInt &High, 1293 const DataLayout &DL) const { 1294 // FIXME: I don't think NumCmps is the correct metric: a single case and a 1295 // range of cases both require only one branch to lower. Just looking at the 1296 // number of clusters and destinations should be enough to decide whether to 1297 // build bit tests. 1298 1299 // To lower a range with bit tests, the range must fit the bitwidth of a 1300 // machine word. 1301 if (!rangeFitsInWord(Low, High, DL)) 1302 return false; 1303 1304 // Decide whether it's profitable to lower this range with bit tests. Each 1305 // destination requires a bit test and branch, and there is an overall range 1306 // check branch. For a small number of clusters, separate comparisons might 1307 // be cheaper, and for many destinations, splitting the range might be 1308 // better. 1309 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) || 1310 (NumDests == 3 && NumCmps >= 6); 1311 } 1312 1313 /// Return true if the specified operation is illegal on this target or 1314 /// unlikely to be made legal with custom lowering. This is used to help guide 1315 /// high-level lowering decisions. 1316 bool isOperationExpand(unsigned Op, EVT VT) const { 1317 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); 1318 } 1319 1320 /// Return true if the specified operation is legal on this target. 1321 bool isOperationLegal(unsigned Op, EVT VT) const { 1322 return (VT == MVT::Other || isTypeLegal(VT)) && 1323 getOperationAction(Op, VT) == Legal; 1324 } 1325 1326 /// Return how this load with extension should be treated: either it is legal, 1327 /// needs to be promoted to a larger size, needs to be expanded to some other 1328 /// code sequence, or the target has a custom expander for it. 1329 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, 1330 EVT MemVT) const { 1331 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; 1332 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; 1333 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; 1334 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && 1335 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!"); 1336 unsigned Shift = 4 * ExtType; 1337 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf); 1338 } 1339 1340 /// Return true if the specified load with extension is legal on this target. 1341 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const { 1342 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal; 1343 } 1344 1345 /// Return true if the specified load with extension is legal or custom 1346 /// on this target. 1347 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const { 1348 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal || 1349 getLoadExtAction(ExtType, ValVT, MemVT) == Custom; 1350 } 1351 1352 /// Return how this store with truncation should be treated: either it is 1353 /// legal, needs to be promoted to a larger size, needs to be expanded to some 1354 /// other code sequence, or the target has a custom expander for it. 1355 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const { 1356 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; 1357 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; 1358 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; 1359 assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && 1360 "Table isn't big enough!"); 1361 return TruncStoreActions[ValI][MemI]; 1362 } 1363 1364 /// Return true if the specified store with truncation is legal on this 1365 /// target. 1366 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { 1367 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal; 1368 } 1369 1370 /// Return true if the specified store with truncation has solution on this 1371 /// target. 1372 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const { 1373 return isTypeLegal(ValVT) && 1374 (getTruncStoreAction(ValVT, MemVT) == Legal || 1375 getTruncStoreAction(ValVT, MemVT) == Custom); 1376 } 1377 1378 virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, 1379 bool LegalOnly) const { 1380 if (LegalOnly) 1381 return isTruncStoreLegal(ValVT, MemVT); 1382 1383 return isTruncStoreLegalOrCustom(ValVT, MemVT); 1384 } 1385 1386 /// Return how the indexed load should be treated: either it is legal, needs 1387 /// to be promoted to a larger size, needs to be expanded to some other code 1388 /// sequence, or the target has a custom expander for it. 1389 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const { 1390 return getIndexedModeAction(IdxMode, VT, IMAB_Load); 1391 } 1392 1393 /// Return true if the specified indexed load is legal on this target. 1394 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { 1395 return VT.isSimple() && 1396 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 1397 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 1398 } 1399 1400 /// Return how the indexed store should be treated: either it is legal, needs 1401 /// to be promoted to a larger size, needs to be expanded to some other code 1402 /// sequence, or the target has a custom expander for it. 1403 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const { 1404 return getIndexedModeAction(IdxMode, VT, IMAB_Store); 1405 } 1406 1407 /// Return true if the specified indexed load is legal on this target. 1408 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { 1409 return VT.isSimple() && 1410 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 1411 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 1412 } 1413 1414 /// Return how the indexed load should be treated: either it is legal, needs 1415 /// to be promoted to a larger size, needs to be expanded to some other code 1416 /// sequence, or the target has a custom expander for it. 1417 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const { 1418 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad); 1419 } 1420 1421 /// Return true if the specified indexed load is legal on this target. 1422 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const { 1423 return VT.isSimple() && 1424 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 1425 getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 1426 } 1427 1428 /// Return how the indexed store should be treated: either it is legal, needs 1429 /// to be promoted to a larger size, needs to be expanded to some other code 1430 /// sequence, or the target has a custom expander for it. 1431 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const { 1432 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore); 1433 } 1434 1435 /// Return true if the specified indexed load is legal on this target. 1436 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const { 1437 return VT.isSimple() && 1438 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 1439 getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 1440 } 1441 1442 /// Returns true if the index type for a masked gather/scatter requires 1443 /// extending 1444 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; } 1445 1446 // Returns true if VT is a legal index type for masked gathers/scatters 1447 // on this target 1448 virtual bool shouldRemoveExtendFromGSIndex(EVT IndexVT, EVT DataVT) const { 1449 return false; 1450 } 1451 1452 // Return true if the target supports a scatter/gather instruction with 1453 // indices which are scaled by the particular value. Note that all targets 1454 // must by definition support scale of 1. 1455 virtual bool isLegalScaleForGatherScatter(uint64_t Scale, 1456 uint64_t ElemSize) const { 1457 // MGATHER/MSCATTER are only required to support scaling by one or by the 1458 // element size. 1459 if (Scale != ElemSize && Scale != 1) 1460 return false; 1461 return true; 1462 } 1463 1464 /// Return how the condition code should be treated: either it is legal, needs 1465 /// to be expanded to some other code sequence, or the target has a custom 1466 /// expander for it. 1467 LegalizeAction 1468 getCondCodeAction(ISD::CondCode CC, MVT VT) const { 1469 assert((unsigned)CC < std::size(CondCodeActions) && 1470 ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) && 1471 "Table isn't big enough!"); 1472 // See setCondCodeAction for how this is encoded. 1473 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); 1474 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3]; 1475 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF); 1476 assert(Action != Promote && "Can't promote condition code!"); 1477 return Action; 1478 } 1479 1480 /// Return true if the specified condition code is legal on this target. 1481 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { 1482 return getCondCodeAction(CC, VT) == Legal; 1483 } 1484 1485 /// Return true if the specified condition code is legal or custom on this 1486 /// target. 1487 bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const { 1488 return getCondCodeAction(CC, VT) == Legal || 1489 getCondCodeAction(CC, VT) == Custom; 1490 } 1491 1492 /// If the action for this operation is to promote, this method returns the 1493 /// ValueType to promote to. 1494 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { 1495 assert(getOperationAction(Op, VT) == Promote && 1496 "This operation isn't promoted!"); 1497 1498 // See if this has an explicit type specified. 1499 std::map<std::pair<unsigned, MVT::SimpleValueType>, 1500 MVT::SimpleValueType>::const_iterator PTTI = 1501 PromoteToType.find(std::make_pair(Op, VT.SimpleTy)); 1502 if (PTTI != PromoteToType.end()) return PTTI->second; 1503 1504 assert((VT.isInteger() || VT.isFloatingPoint()) && 1505 "Cannot autopromote this type, add it with AddPromotedToType."); 1506 1507 MVT NVT = VT; 1508 do { 1509 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1); 1510 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 1511 "Didn't find type to promote to!"); 1512 } while (!isTypeLegal(NVT) || 1513 getOperationAction(Op, NVT) == Promote); 1514 return NVT; 1515 } 1516 1517 virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, 1518 bool AllowUnknown = false) const { 1519 return getValueType(DL, Ty, AllowUnknown); 1520 } 1521 1522 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM 1523 /// operations except for the pointer size. If AllowUnknown is true, this 1524 /// will return MVT::Other for types with no EVT counterpart (e.g. structs), 1525 /// otherwise it will assert. 1526 EVT getValueType(const DataLayout &DL, Type *Ty, 1527 bool AllowUnknown = false) const { 1528 // Lower scalar pointers to native pointer types. 1529 if (auto *PTy = dyn_cast<PointerType>(Ty)) 1530 return getPointerTy(DL, PTy->getAddressSpace()); 1531 1532 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1533 Type *EltTy = VTy->getElementType(); 1534 // Lower vectors of pointers to native pointer types. 1535 if (auto *PTy = dyn_cast<PointerType>(EltTy)) { 1536 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace())); 1537 EltTy = PointerTy.getTypeForEVT(Ty->getContext()); 1538 } 1539 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false), 1540 VTy->getElementCount()); 1541 } 1542 1543 return EVT::getEVT(Ty, AllowUnknown); 1544 } 1545 1546 EVT getMemValueType(const DataLayout &DL, Type *Ty, 1547 bool AllowUnknown = false) const { 1548 // Lower scalar pointers to native pointer types. 1549 if (auto *PTy = dyn_cast<PointerType>(Ty)) 1550 return getPointerMemTy(DL, PTy->getAddressSpace()); 1551 1552 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1553 Type *EltTy = VTy->getElementType(); 1554 if (auto *PTy = dyn_cast<PointerType>(EltTy)) { 1555 EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace())); 1556 EltTy = PointerTy.getTypeForEVT(Ty->getContext()); 1557 } 1558 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false), 1559 VTy->getElementCount()); 1560 } 1561 1562 return getValueType(DL, Ty, AllowUnknown); 1563 } 1564 1565 1566 /// Return the MVT corresponding to this LLVM type. See getValueType. 1567 MVT getSimpleValueType(const DataLayout &DL, Type *Ty, 1568 bool AllowUnknown = false) const { 1569 return getValueType(DL, Ty, AllowUnknown).getSimpleVT(); 1570 } 1571 1572 /// Return the desired alignment for ByVal or InAlloca aggregate function 1573 /// arguments in the caller parameter area. This is the actual alignment, not 1574 /// its logarithm. 1575 virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const; 1576 1577 /// Return the type of registers that this ValueType will eventually require. 1578 MVT getRegisterType(MVT VT) const { 1579 assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT)); 1580 return RegisterTypeForVT[VT.SimpleTy]; 1581 } 1582 1583 /// Return the type of registers that this ValueType will eventually require. 1584 MVT getRegisterType(LLVMContext &Context, EVT VT) const { 1585 if (VT.isSimple()) 1586 return getRegisterType(VT.getSimpleVT()); 1587 if (VT.isVector()) { 1588 EVT VT1; 1589 MVT RegisterVT; 1590 unsigned NumIntermediates; 1591 (void)getVectorTypeBreakdown(Context, VT, VT1, 1592 NumIntermediates, RegisterVT); 1593 return RegisterVT; 1594 } 1595 if (VT.isInteger()) { 1596 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); 1597 } 1598 llvm_unreachable("Unsupported extended type!"); 1599 } 1600 1601 /// Return the number of registers that this ValueType will eventually 1602 /// require. 1603 /// 1604 /// This is one for any types promoted to live in larger registers, but may be 1605 /// more than one for types (like i64) that are split into pieces. For types 1606 /// like i140, which are first promoted then expanded, it is the number of 1607 /// registers needed to hold all the bits of the original type. For an i140 1608 /// on a 32 bit machine this means 5 registers. 1609 /// 1610 /// RegisterVT may be passed as a way to override the default settings, for 1611 /// instance with i128 inline assembly operands on SystemZ. 1612 virtual unsigned 1613 getNumRegisters(LLVMContext &Context, EVT VT, 1614 std::optional<MVT> RegisterVT = std::nullopt) const { 1615 if (VT.isSimple()) { 1616 assert((unsigned)VT.getSimpleVT().SimpleTy < 1617 std::size(NumRegistersForVT)); 1618 return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; 1619 } 1620 if (VT.isVector()) { 1621 EVT VT1; 1622 MVT VT2; 1623 unsigned NumIntermediates; 1624 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); 1625 } 1626 if (VT.isInteger()) { 1627 unsigned BitWidth = VT.getSizeInBits(); 1628 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); 1629 return (BitWidth + RegWidth - 1) / RegWidth; 1630 } 1631 llvm_unreachable("Unsupported extended type!"); 1632 } 1633 1634 /// Certain combinations of ABIs, Targets and features require that types 1635 /// are legal for some operations and not for other operations. 1636 /// For MIPS all vector types must be passed through the integer register set. 1637 virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, 1638 CallingConv::ID CC, EVT VT) const { 1639 return getRegisterType(Context, VT); 1640 } 1641 1642 /// Certain targets require unusual breakdowns of certain types. For MIPS, 1643 /// this occurs when a vector type is used, as vector are passed through the 1644 /// integer register set. 1645 virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, 1646 CallingConv::ID CC, 1647 EVT VT) const { 1648 return getNumRegisters(Context, VT); 1649 } 1650 1651 /// Certain targets have context sensitive alignment requirements, where one 1652 /// type has the alignment requirement of another type. 1653 virtual Align getABIAlignmentForCallingConv(Type *ArgTy, 1654 const DataLayout &DL) const { 1655 return DL.getABITypeAlign(ArgTy); 1656 } 1657 1658 /// If true, then instruction selection should seek to shrink the FP constant 1659 /// of the specified type to a smaller type in order to save space and / or 1660 /// reduce runtime. 1661 virtual bool ShouldShrinkFPConstant(EVT) const { return true; } 1662 1663 /// Return true if it is profitable to reduce a load to a smaller type. 1664 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x 1665 virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, 1666 EVT NewVT) const { 1667 // By default, assume that it is cheaper to extract a subvector from a wide 1668 // vector load rather than creating multiple narrow vector loads. 1669 if (NewVT.isVector() && !Load->hasOneUse()) 1670 return false; 1671 1672 return true; 1673 } 1674 1675 /// Return true (the default) if it is profitable to remove a sext_inreg(x) 1676 /// where the sext is redundant, and use x directly. 1677 virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; } 1678 1679 /// When splitting a value of the specified type into parts, does the Lo 1680 /// or Hi part come first? This usually follows the endianness, except 1681 /// for ppcf128, where the Hi part always comes first. 1682 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const { 1683 return DL.isBigEndian() || VT == MVT::ppcf128; 1684 } 1685 1686 /// If true, the target has custom DAG combine transformations that it can 1687 /// perform for the specified node. 1688 bool hasTargetDAGCombine(ISD::NodeType NT) const { 1689 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray)); 1690 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 1691 } 1692 1693 unsigned getGatherAllAliasesMaxDepth() const { 1694 return GatherAllAliasesMaxDepth; 1695 } 1696 1697 /// Returns the size of the platform's va_list object. 1698 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const { 1699 return getPointerTy(DL).getSizeInBits(); 1700 } 1701 1702 /// Get maximum # of store operations permitted for llvm.memset 1703 /// 1704 /// This function returns the maximum number of store operations permitted 1705 /// to replace a call to llvm.memset. The value is set by the target at the 1706 /// performance threshold for such a replacement. If OptSize is true, 1707 /// return the limit for functions that have OptSize attribute. 1708 unsigned getMaxStoresPerMemset(bool OptSize) const { 1709 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset; 1710 } 1711 1712 /// Get maximum # of store operations permitted for llvm.memcpy 1713 /// 1714 /// This function returns the maximum number of store operations permitted 1715 /// to replace a call to llvm.memcpy. The value is set by the target at the 1716 /// performance threshold for such a replacement. If OptSize is true, 1717 /// return the limit for functions that have OptSize attribute. 1718 unsigned getMaxStoresPerMemcpy(bool OptSize) const { 1719 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy; 1720 } 1721 1722 /// \brief Get maximum # of store operations to be glued together 1723 /// 1724 /// This function returns the maximum number of store operations permitted 1725 /// to glue together during lowering of llvm.memcpy. The value is set by 1726 // the target at the performance threshold for such a replacement. 1727 virtual unsigned getMaxGluedStoresPerMemcpy() const { 1728 return MaxGluedStoresPerMemcpy; 1729 } 1730 1731 /// Get maximum # of load operations permitted for memcmp 1732 /// 1733 /// This function returns the maximum number of load operations permitted 1734 /// to replace a call to memcmp. The value is set by the target at the 1735 /// performance threshold for such a replacement. If OptSize is true, 1736 /// return the limit for functions that have OptSize attribute. 1737 unsigned getMaxExpandSizeMemcmp(bool OptSize) const { 1738 return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp; 1739 } 1740 1741 /// Get maximum # of store operations permitted for llvm.memmove 1742 /// 1743 /// This function returns the maximum number of store operations permitted 1744 /// to replace a call to llvm.memmove. The value is set by the target at the 1745 /// performance threshold for such a replacement. If OptSize is true, 1746 /// return the limit for functions that have OptSize attribute. 1747 unsigned getMaxStoresPerMemmove(bool OptSize) const { 1748 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove; 1749 } 1750 1751 /// Determine if the target supports unaligned memory accesses. 1752 /// 1753 /// This function returns true if the target allows unaligned memory accesses 1754 /// of the specified type in the given address space. If true, it also returns 1755 /// a relative speed of the unaligned memory access in the last argument by 1756 /// reference. The higher the speed number the faster the operation comparing 1757 /// to a number returned by another such call. This is used, for example, in 1758 /// situations where an array copy/move/set is converted to a sequence of 1759 /// store operations. Its use helps to ensure that such replacements don't 1760 /// generate code that causes an alignment error (trap) on the target machine. 1761 virtual bool allowsMisalignedMemoryAccesses( 1762 EVT, unsigned AddrSpace = 0, Align Alignment = Align(1), 1763 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1764 unsigned * /*Fast*/ = nullptr) const { 1765 return false; 1766 } 1767 1768 /// LLT handling variant. 1769 virtual bool allowsMisalignedMemoryAccesses( 1770 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1), 1771 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1772 unsigned * /*Fast*/ = nullptr) const { 1773 return false; 1774 } 1775 1776 /// This function returns true if the memory access is aligned or if the 1777 /// target allows this specific unaligned memory access. If the access is 1778 /// allowed, the optional final parameter returns a relative speed of the 1779 /// access (as defined by the target). 1780 bool allowsMemoryAccessForAlignment( 1781 LLVMContext &Context, const DataLayout &DL, EVT VT, 1782 unsigned AddrSpace = 0, Align Alignment = Align(1), 1783 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1784 unsigned *Fast = nullptr) const; 1785 1786 /// Return true if the memory access of this type is aligned or if the target 1787 /// allows this specific unaligned access for the given MachineMemOperand. 1788 /// If the access is allowed, the optional final parameter returns a relative 1789 /// speed of the access (as defined by the target). 1790 bool allowsMemoryAccessForAlignment(LLVMContext &Context, 1791 const DataLayout &DL, EVT VT, 1792 const MachineMemOperand &MMO, 1793 unsigned *Fast = nullptr) const; 1794 1795 /// Return true if the target supports a memory access of this type for the 1796 /// given address space and alignment. If the access is allowed, the optional 1797 /// final parameter returns the relative speed of the access (as defined by 1798 /// the target). 1799 virtual bool 1800 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, 1801 unsigned AddrSpace = 0, Align Alignment = Align(1), 1802 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1803 unsigned *Fast = nullptr) const; 1804 1805 /// Return true if the target supports a memory access of this type for the 1806 /// given MachineMemOperand. If the access is allowed, the optional 1807 /// final parameter returns the relative access speed (as defined by the 1808 /// target). 1809 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, 1810 const MachineMemOperand &MMO, 1811 unsigned *Fast = nullptr) const; 1812 1813 /// LLT handling variant. 1814 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty, 1815 const MachineMemOperand &MMO, 1816 unsigned *Fast = nullptr) const; 1817 1818 /// Returns the target specific optimal type for load and store operations as 1819 /// a result of memset, memcpy, and memmove lowering. 1820 /// It returns EVT::Other if the type should be determined using generic 1821 /// target-independent logic. 1822 virtual EVT 1823 getOptimalMemOpType(const MemOp &Op, 1824 const AttributeList & /*FuncAttributes*/) const { 1825 return MVT::Other; 1826 } 1827 1828 /// LLT returning variant. 1829 virtual LLT 1830 getOptimalMemOpLLT(const MemOp &Op, 1831 const AttributeList & /*FuncAttributes*/) const { 1832 return LLT(); 1833 } 1834 1835 /// Returns true if it's safe to use load / store of the specified type to 1836 /// expand memcpy / memset inline. 1837 /// 1838 /// This is mostly true for all types except for some special cases. For 1839 /// example, on X86 targets without SSE2 f64 load / store are done with fldl / 1840 /// fstpl which also does type conversion. Note the specified type doesn't 1841 /// have to be legal as the hook is used before type legalization. 1842 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; } 1843 1844 /// Return lower limit for number of blocks in a jump table. 1845 virtual unsigned getMinimumJumpTableEntries() const; 1846 1847 /// Return lower limit of the density in a jump table. 1848 unsigned getMinimumJumpTableDensity(bool OptForSize) const; 1849 1850 /// Return upper limit for number of entries in a jump table. 1851 /// Zero if no limit. 1852 unsigned getMaximumJumpTableSize() const; 1853 1854 virtual bool isJumpTableRelative() const; 1855 1856 /// If a physical register, this specifies the register that 1857 /// llvm.savestack/llvm.restorestack should save and restore. 1858 Register getStackPointerRegisterToSaveRestore() const { 1859 return StackPointerRegisterToSaveRestore; 1860 } 1861 1862 /// If a physical register, this returns the register that receives the 1863 /// exception address on entry to an EH pad. 1864 virtual Register 1865 getExceptionPointerRegister(const Constant *PersonalityFn) const { 1866 return Register(); 1867 } 1868 1869 /// If a physical register, this returns the register that receives the 1870 /// exception typeid on entry to a landing pad. 1871 virtual Register 1872 getExceptionSelectorRegister(const Constant *PersonalityFn) const { 1873 return Register(); 1874 } 1875 1876 virtual bool needsFixedCatchObjects() const { 1877 report_fatal_error("Funclet EH is not implemented for this target"); 1878 } 1879 1880 /// Return the minimum stack alignment of an argument. 1881 Align getMinStackArgumentAlignment() const { 1882 return MinStackArgumentAlignment; 1883 } 1884 1885 /// Return the minimum function alignment. 1886 Align getMinFunctionAlignment() const { return MinFunctionAlignment; } 1887 1888 /// Return the preferred function alignment. 1889 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; } 1890 1891 /// Return the preferred loop alignment. 1892 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const; 1893 1894 /// Return the maximum amount of bytes allowed to be emitted when padding for 1895 /// alignment 1896 virtual unsigned 1897 getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const; 1898 1899 /// Should loops be aligned even when the function is marked OptSize (but not 1900 /// MinSize). 1901 virtual bool alignLoopsWithOptSize() const { return false; } 1902 1903 /// If the target has a standard location for the stack protector guard, 1904 /// returns the address of that location. Otherwise, returns nullptr. 1905 /// DEPRECATED: please override useLoadStackGuardNode and customize 1906 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard(). 1907 virtual Value *getIRStackGuard(IRBuilderBase &IRB) const; 1908 1909 /// Inserts necessary declarations for SSP (stack protection) purpose. 1910 /// Should be used only when getIRStackGuard returns nullptr. 1911 virtual void insertSSPDeclarations(Module &M) const; 1912 1913 /// Return the variable that's previously inserted by insertSSPDeclarations, 1914 /// if any, otherwise return nullptr. Should be used only when 1915 /// getIRStackGuard returns nullptr. 1916 virtual Value *getSDagStackGuard(const Module &M) const; 1917 1918 /// If this function returns true, stack protection checks should XOR the 1919 /// frame pointer (or whichever pointer is used to address locals) into the 1920 /// stack guard value before checking it. getIRStackGuard must return nullptr 1921 /// if this returns true. 1922 virtual bool useStackGuardXorFP() const { return false; } 1923 1924 /// If the target has a standard stack protection check function that 1925 /// performs validation and error handling, returns the function. Otherwise, 1926 /// returns nullptr. Must be previously inserted by insertSSPDeclarations. 1927 /// Should be used only when getIRStackGuard returns nullptr. 1928 virtual Function *getSSPStackGuardCheck(const Module &M) const; 1929 1930 /// \returns true if a constant G_UBFX is legal on the target. 1931 virtual bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1, 1932 LLT Ty2) const { 1933 return false; 1934 } 1935 1936 protected: 1937 Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, 1938 bool UseTLS) const; 1939 1940 public: 1941 /// Returns the target-specific address of the unsafe stack pointer. 1942 virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const; 1943 1944 /// Returns the name of the symbol used to emit stack probes or the empty 1945 /// string if not applicable. 1946 virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; } 1947 1948 virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; } 1949 1950 virtual StringRef getStackProbeSymbolName(const MachineFunction &MF) const { 1951 return ""; 1952 } 1953 1954 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we 1955 /// are happy to sink it into basic blocks. A cast may be free, but not 1956 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer. 1957 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const; 1958 1959 /// Return true if the pointer arguments to CI should be aligned by aligning 1960 /// the object whose address is being passed. If so then MinSize is set to the 1961 /// minimum size the object must be to be aligned and PrefAlign is set to the 1962 /// preferred alignment. 1963 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/, 1964 Align & /*PrefAlign*/) const { 1965 return false; 1966 } 1967 1968 //===--------------------------------------------------------------------===// 1969 /// \name Helpers for TargetTransformInfo implementations 1970 /// @{ 1971 1972 /// Get the ISD node that corresponds to the Instruction class opcode. 1973 int InstructionOpcodeToISD(unsigned Opcode) const; 1974 1975 /// @} 1976 1977 //===--------------------------------------------------------------------===// 1978 /// \name Helpers for atomic expansion. 1979 /// @{ 1980 1981 /// Returns the maximum atomic operation size (in bits) supported by 1982 /// the backend. Atomic operations greater than this size (as well 1983 /// as ones that are not naturally aligned), will be expanded by 1984 /// AtomicExpandPass into an __atomic_* library call. 1985 unsigned getMaxAtomicSizeInBitsSupported() const { 1986 return MaxAtomicSizeInBitsSupported; 1987 } 1988 1989 /// Returns the size in bits of the maximum div/rem the backend supports. 1990 /// Larger operations will be expanded by ExpandLargeDivRem. 1991 unsigned getMaxDivRemBitWidthSupported() const { 1992 return MaxDivRemBitWidthSupported; 1993 } 1994 1995 /// Returns the size in bits of the maximum larget fp convert the backend 1996 /// supports. Larger operations will be expanded by ExpandLargeFPConvert. 1997 unsigned getMaxLargeFPConvertBitWidthSupported() const { 1998 return MaxLargeFPConvertBitWidthSupported; 1999 } 2000 2001 /// Returns the size of the smallest cmpxchg or ll/sc instruction 2002 /// the backend supports. Any smaller operations are widened in 2003 /// AtomicExpandPass. 2004 /// 2005 /// Note that *unlike* operations above the maximum size, atomic ops 2006 /// are still natively supported below the minimum; they just 2007 /// require a more complex expansion. 2008 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; } 2009 2010 /// Whether the target supports unaligned atomic operations. 2011 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; } 2012 2013 /// Whether AtomicExpandPass should automatically insert fences and reduce 2014 /// ordering for this atomic. This should be true for most architectures with 2015 /// weak memory ordering. Defaults to false. 2016 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const { 2017 return false; 2018 } 2019 2020 /// Whether AtomicExpandPass should automatically insert a trailing fence 2021 /// without reducing the ordering for this atomic. Defaults to false. 2022 virtual bool 2023 shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const { 2024 return false; 2025 } 2026 2027 /// Perform a load-linked operation on Addr, returning a "Value *" with the 2028 /// corresponding pointee type. This may entail some non-trivial operations to 2029 /// truncate or reconstruct types that will be illegal in the backend. See 2030 /// ARMISelLowering for an example implementation. 2031 virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, 2032 Value *Addr, AtomicOrdering Ord) const { 2033 llvm_unreachable("Load linked unimplemented on this target"); 2034 } 2035 2036 /// Perform a store-conditional operation to Addr. Return the status of the 2037 /// store. This should be 0 if the store succeeded, non-zero otherwise. 2038 virtual Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, 2039 Value *Addr, AtomicOrdering Ord) const { 2040 llvm_unreachable("Store conditional unimplemented on this target"); 2041 } 2042 2043 /// Perform a masked atomicrmw using a target-specific intrinsic. This 2044 /// represents the core LL/SC loop which will be lowered at a late stage by 2045 /// the backend. The target-specific intrinsic returns the loaded value and 2046 /// is not responsible for masking and shifting the result. 2047 virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, 2048 AtomicRMWInst *AI, 2049 Value *AlignedAddr, Value *Incr, 2050 Value *Mask, Value *ShiftAmt, 2051 AtomicOrdering Ord) const { 2052 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target"); 2053 } 2054 2055 /// Perform a atomicrmw expansion using a target-specific way. This is 2056 /// expected to be called when masked atomicrmw and bit test atomicrmw don't 2057 /// work, and the target supports another way to lower atomicrmw. 2058 virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const { 2059 llvm_unreachable( 2060 "Generic atomicrmw expansion unimplemented on this target"); 2061 } 2062 2063 /// Perform a bit test atomicrmw using a target-specific intrinsic. This 2064 /// represents the combined bit test intrinsic which will be lowered at a late 2065 /// stage by the backend. 2066 virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const { 2067 llvm_unreachable( 2068 "Bit test atomicrmw expansion unimplemented on this target"); 2069 } 2070 2071 /// Perform a atomicrmw which the result is only used by comparison, using a 2072 /// target-specific intrinsic. This represents the combined atomic and compare 2073 /// intrinsic which will be lowered at a late stage by the backend. 2074 virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const { 2075 llvm_unreachable( 2076 "Compare arith atomicrmw expansion unimplemented on this target"); 2077 } 2078 2079 /// Perform a masked cmpxchg using a target-specific intrinsic. This 2080 /// represents the core LL/SC loop which will be lowered at a late stage by 2081 /// the backend. The target-specific intrinsic returns the loaded value and 2082 /// is not responsible for masking and shifting the result. 2083 virtual Value *emitMaskedAtomicCmpXchgIntrinsic( 2084 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 2085 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 2086 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target"); 2087 } 2088 2089 //===--------------------------------------------------------------------===// 2090 /// \name KCFI check lowering. 2091 /// @{ 2092 2093 virtual MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB, 2094 MachineBasicBlock::instr_iterator &MBBI, 2095 const TargetInstrInfo *TII) const { 2096 llvm_unreachable("KCFI is not supported on this target"); 2097 } 2098 2099 /// @} 2100 2101 /// Inserts in the IR a target-specific intrinsic specifying a fence. 2102 /// It is called by AtomicExpandPass before expanding an 2103 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad 2104 /// if shouldInsertFencesForAtomic returns true. 2105 /// 2106 /// Inst is the original atomic instruction, prior to other expansions that 2107 /// may be performed. 2108 /// 2109 /// This function should either return a nullptr, or a pointer to an IR-level 2110 /// Instruction*. Even complex fence sequences can be represented by a 2111 /// single Instruction* through an intrinsic to be lowered later. 2112 /// Backends should override this method to produce target-specific intrinsic 2113 /// for their fences. 2114 /// FIXME: Please note that the default implementation here in terms of 2115 /// IR-level fences exists for historical/compatibility reasons and is 2116 /// *unsound* ! Fences cannot, in general, be used to restore sequential 2117 /// consistency. For example, consider the following example: 2118 /// atomic<int> x = y = 0; 2119 /// int r1, r2, r3, r4; 2120 /// Thread 0: 2121 /// x.store(1); 2122 /// Thread 1: 2123 /// y.store(1); 2124 /// Thread 2: 2125 /// r1 = x.load(); 2126 /// r2 = y.load(); 2127 /// Thread 3: 2128 /// r3 = y.load(); 2129 /// r4 = x.load(); 2130 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all 2131 /// seq_cst. But if they are lowered to monotonic accesses, no amount of 2132 /// IR-level fences can prevent it. 2133 /// @{ 2134 virtual Instruction *emitLeadingFence(IRBuilderBase &Builder, 2135 Instruction *Inst, 2136 AtomicOrdering Ord) const; 2137 2138 virtual Instruction *emitTrailingFence(IRBuilderBase &Builder, 2139 Instruction *Inst, 2140 AtomicOrdering Ord) const; 2141 /// @} 2142 2143 // Emits code that executes when the comparison result in the ll/sc 2144 // expansion of a cmpxchg instruction is such that the store-conditional will 2145 // not execute. This makes it possible to balance out the load-linked with 2146 // a dedicated instruction, if desired. 2147 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would 2148 // be unnecessarily held, except if clrex, inserted by this hook, is executed. 2149 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {} 2150 2151 /// Returns true if arguments should be sign-extended in lib calls. 2152 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { 2153 return IsSigned; 2154 } 2155 2156 /// Returns true if arguments should be extended in lib calls. 2157 virtual bool shouldExtendTypeInLibCall(EVT Type) const { 2158 return true; 2159 } 2160 2161 /// Returns how the given (atomic) load should be expanded by the 2162 /// IR-level AtomicExpand pass. 2163 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const { 2164 return AtomicExpansionKind::None; 2165 } 2166 2167 /// Returns how the given (atomic) load should be cast by the IR-level 2168 /// AtomicExpand pass. 2169 virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const { 2170 if (LI->getType()->isFloatingPointTy()) 2171 return AtomicExpansionKind::CastToInteger; 2172 return AtomicExpansionKind::None; 2173 } 2174 2175 /// Returns how the given (atomic) store should be expanded by the IR-level 2176 /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try 2177 /// to use an atomicrmw xchg. 2178 virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const { 2179 return AtomicExpansionKind::None; 2180 } 2181 2182 /// Returns how the given (atomic) store should be cast by the IR-level 2183 /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger 2184 /// will try to cast the operands to integer values. 2185 virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const { 2186 if (SI->getValueOperand()->getType()->isFloatingPointTy()) 2187 return AtomicExpansionKind::CastToInteger; 2188 return AtomicExpansionKind::None; 2189 } 2190 2191 /// Returns how the given atomic cmpxchg should be expanded by the IR-level 2192 /// AtomicExpand pass. 2193 virtual AtomicExpansionKind 2194 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { 2195 return AtomicExpansionKind::None; 2196 } 2197 2198 /// Returns how the IR-level AtomicExpand pass should expand the given 2199 /// AtomicRMW, if at all. Default is to never expand. 2200 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 2201 return RMW->isFloatingPointOperation() ? 2202 AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None; 2203 } 2204 2205 /// Returns how the given atomic atomicrmw should be cast by the IR-level 2206 /// AtomicExpand pass. 2207 virtual AtomicExpansionKind 2208 shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const { 2209 if (RMWI->getOperation() == AtomicRMWInst::Xchg && 2210 (RMWI->getValOperand()->getType()->isFloatingPointTy() || 2211 RMWI->getValOperand()->getType()->isPointerTy())) 2212 return AtomicExpansionKind::CastToInteger; 2213 2214 return AtomicExpansionKind::None; 2215 } 2216 2217 /// On some platforms, an AtomicRMW that never actually modifies the value 2218 /// (such as fetch_add of 0) can be turned into a fence followed by an 2219 /// atomic load. This may sound useless, but it makes it possible for the 2220 /// processor to keep the cacheline shared, dramatically improving 2221 /// performance. And such idempotent RMWs are useful for implementing some 2222 /// kinds of locks, see for example (justification + benchmarks): 2223 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf 2224 /// This method tries doing that transformation, returning the atomic load if 2225 /// it succeeds, and nullptr otherwise. 2226 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo 2227 /// another round of expansion. 2228 virtual LoadInst * 2229 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { 2230 return nullptr; 2231 } 2232 2233 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND, 2234 /// SIGN_EXTEND, or ANY_EXTEND). 2235 virtual ISD::NodeType getExtendForAtomicOps() const { 2236 return ISD::ZERO_EXTEND; 2237 } 2238 2239 /// Returns how the platform's atomic compare and swap expects its comparison 2240 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is 2241 /// separate from getExtendForAtomicOps, which is concerned with the 2242 /// sign-extension of the instruction's output, whereas here we are concerned 2243 /// with the sign-extension of the input. For targets with compare-and-swap 2244 /// instructions (or sub-word comparisons in their LL/SC loop expansions), 2245 /// the input can be ANY_EXTEND, but the output will still have a specific 2246 /// extension. 2247 virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const { 2248 return ISD::ANY_EXTEND; 2249 } 2250 2251 /// @} 2252 2253 /// Returns true if we should normalize 2254 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and 2255 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely 2256 /// that it saves us from materializing N0 and N1 in an integer register. 2257 /// Targets that are able to perform and/or on flags should return false here. 2258 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, 2259 EVT VT) const { 2260 // If a target has multiple condition registers, then it likely has logical 2261 // operations on those registers. 2262 if (hasMultipleConditionRegisters()) 2263 return false; 2264 // Only do the transform if the value won't be split into multiple 2265 // registers. 2266 LegalizeTypeAction Action = getTypeAction(Context, VT); 2267 return Action != TypeExpandInteger && Action != TypeExpandFloat && 2268 Action != TypeSplitVector; 2269 } 2270 2271 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; } 2272 2273 /// Return true if a select of constants (select Cond, C1, C2) should be 2274 /// transformed into simple math ops with the condition value. For example: 2275 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1 2276 virtual bool convertSelectOfConstantsToMath(EVT VT) const { 2277 return false; 2278 } 2279 2280 /// Return true if it is profitable to transform an integer 2281 /// multiplication-by-constant into simpler operations like shifts and adds. 2282 /// This may be true if the target does not directly support the 2283 /// multiplication operation for the specified type or the sequence of simpler 2284 /// ops is faster than the multiply. 2285 virtual bool decomposeMulByConstant(LLVMContext &Context, 2286 EVT VT, SDValue C) const { 2287 return false; 2288 } 2289 2290 /// Return true if it may be profitable to transform 2291 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 2292 /// This may not be true if c1 and c2 can be represented as immediates but 2293 /// c1*c2 cannot, for example. 2294 /// The target should check if c1, c2 and c1*c2 can be represented as 2295 /// immediates, or have to be materialized into registers. If it is not sure 2296 /// about some cases, a default true can be returned to let the DAGCombiner 2297 /// decide. 2298 /// AddNode is (add x, c1), and ConstNode is c2. 2299 virtual bool isMulAddWithConstProfitable(SDValue AddNode, 2300 SDValue ConstNode) const { 2301 return true; 2302 } 2303 2304 /// Return true if it is more correct/profitable to use strict FP_TO_INT 2305 /// conversion operations - canonicalizing the FP source value instead of 2306 /// converting all cases and then selecting based on value. 2307 /// This may be true if the target throws exceptions for out of bounds 2308 /// conversions or has fast FP CMOV. 2309 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, 2310 bool IsSigned) const { 2311 return false; 2312 } 2313 2314 /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic. 2315 /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always 2316 /// considered beneficial. 2317 /// If optimizing for size, expansion is only considered beneficial for upto 2318 /// 5 multiplies and a divide (if the exponent is negative). 2319 bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const { 2320 if (Exponent < 0) 2321 Exponent = -Exponent; 2322 uint64_t E = static_cast<uint64_t>(Exponent); 2323 return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7); 2324 } 2325 2326 //===--------------------------------------------------------------------===// 2327 // TargetLowering Configuration Methods - These methods should be invoked by 2328 // the derived class constructor to configure this object for the target. 2329 // 2330 protected: 2331 /// Specify how the target extends the result of integer and floating point 2332 /// boolean values from i1 to a wider type. See getBooleanContents. 2333 void setBooleanContents(BooleanContent Ty) { 2334 BooleanContents = Ty; 2335 BooleanFloatContents = Ty; 2336 } 2337 2338 /// Specify how the target extends the result of integer and floating point 2339 /// boolean values from i1 to a wider type. See getBooleanContents. 2340 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) { 2341 BooleanContents = IntTy; 2342 BooleanFloatContents = FloatTy; 2343 } 2344 2345 /// Specify how the target extends the result of a vector boolean value from a 2346 /// vector of i1 to a wider type. See getBooleanContents. 2347 void setBooleanVectorContents(BooleanContent Ty) { 2348 BooleanVectorContents = Ty; 2349 } 2350 2351 /// Specify the target scheduling preference. 2352 void setSchedulingPreference(Sched::Preference Pref) { 2353 SchedPreferenceInfo = Pref; 2354 } 2355 2356 /// Indicate the minimum number of blocks to generate jump tables. 2357 void setMinimumJumpTableEntries(unsigned Val); 2358 2359 /// Indicate the maximum number of entries in jump tables. 2360 /// Set to zero to generate unlimited jump tables. 2361 void setMaximumJumpTableSize(unsigned); 2362 2363 /// If set to a physical register, this specifies the register that 2364 /// llvm.savestack/llvm.restorestack should save and restore. 2365 void setStackPointerRegisterToSaveRestore(Register R) { 2366 StackPointerRegisterToSaveRestore = R; 2367 } 2368 2369 /// Tells the code generator that the target has multiple (allocatable) 2370 /// condition registers that can be used to store the results of comparisons 2371 /// for use by selects and conditional branches. With multiple condition 2372 /// registers, the code generator will not aggressively sink comparisons into 2373 /// the blocks of their users. 2374 void setHasMultipleConditionRegisters(bool hasManyRegs = true) { 2375 HasMultipleConditionRegisters = hasManyRegs; 2376 } 2377 2378 /// Tells the code generator that the target has BitExtract instructions. 2379 /// The code generator will aggressively sink "shift"s into the blocks of 2380 /// their users if the users will generate "and" instructions which can be 2381 /// combined with "shift" to BitExtract instructions. 2382 void setHasExtractBitsInsn(bool hasExtractInsn = true) { 2383 HasExtractBitsInsn = hasExtractInsn; 2384 } 2385 2386 /// Tells the code generator not to expand logic operations on comparison 2387 /// predicates into separate sequences that increase the amount of flow 2388 /// control. 2389 void setJumpIsExpensive(bool isExpensive = true); 2390 2391 /// Tells the code generator which bitwidths to bypass. 2392 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { 2393 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; 2394 } 2395 2396 /// Add the specified register class as an available regclass for the 2397 /// specified value type. This indicates the selector can handle values of 2398 /// that class natively. 2399 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) { 2400 assert((unsigned)VT.SimpleTy < std::size(RegClassForVT)); 2401 RegClassForVT[VT.SimpleTy] = RC; 2402 } 2403 2404 /// Return the largest legal super-reg register class of the register class 2405 /// for the specified type and its associated "cost". 2406 virtual std::pair<const TargetRegisterClass *, uint8_t> 2407 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const; 2408 2409 /// Once all of the register classes are added, this allows us to compute 2410 /// derived properties we expose. 2411 void computeRegisterProperties(const TargetRegisterInfo *TRI); 2412 2413 /// Indicate that the specified operation does not work with the specified 2414 /// type and indicate what to do about it. Note that VT may refer to either 2415 /// the type of a result or that of an operand of Op. 2416 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) { 2417 assert(Op < std::size(OpActions[0]) && "Table isn't big enough!"); 2418 OpActions[(unsigned)VT.SimpleTy][Op] = Action; 2419 } 2420 void setOperationAction(ArrayRef<unsigned> Ops, MVT VT, 2421 LegalizeAction Action) { 2422 for (auto Op : Ops) 2423 setOperationAction(Op, VT, Action); 2424 } 2425 void setOperationAction(ArrayRef<unsigned> Ops, ArrayRef<MVT> VTs, 2426 LegalizeAction Action) { 2427 for (auto VT : VTs) 2428 setOperationAction(Ops, VT, Action); 2429 } 2430 2431 /// Indicate that the specified load with extension does not work with the 2432 /// specified type and indicate what to do about it. 2433 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, 2434 LegalizeAction Action) { 2435 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && 2436 MemVT.isValid() && "Table isn't big enough!"); 2437 assert((unsigned)Action < 0x10 && "too many bits for bitfield array"); 2438 unsigned Shift = 4 * ExtType; 2439 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift); 2440 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift; 2441 } 2442 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT, 2443 LegalizeAction Action) { 2444 for (auto ExtType : ExtTypes) 2445 setLoadExtAction(ExtType, ValVT, MemVT, Action); 2446 } 2447 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, 2448 ArrayRef<MVT> MemVTs, LegalizeAction Action) { 2449 for (auto MemVT : MemVTs) 2450 setLoadExtAction(ExtTypes, ValVT, MemVT, Action); 2451 } 2452 2453 /// Indicate that the specified truncating store does not work with the 2454 /// specified type and indicate what to do about it. 2455 void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) { 2456 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!"); 2457 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action; 2458 } 2459 2460 /// Indicate that the specified indexed load does or does not work with the 2461 /// specified type and indicate what to do abort it. 2462 /// 2463 /// NOTE: All indexed mode loads are initialized to Expand in 2464 /// TargetLowering.cpp 2465 void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, MVT VT, 2466 LegalizeAction Action) { 2467 for (auto IdxMode : IdxModes) 2468 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action); 2469 } 2470 2471 void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs, 2472 LegalizeAction Action) { 2473 for (auto VT : VTs) 2474 setIndexedLoadAction(IdxModes, VT, Action); 2475 } 2476 2477 /// Indicate that the specified indexed store does or does not work with the 2478 /// specified type and indicate what to do about it. 2479 /// 2480 /// NOTE: All indexed mode stores are initialized to Expand in 2481 /// TargetLowering.cpp 2482 void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, MVT VT, 2483 LegalizeAction Action) { 2484 for (auto IdxMode : IdxModes) 2485 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action); 2486 } 2487 2488 void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs, 2489 LegalizeAction Action) { 2490 for (auto VT : VTs) 2491 setIndexedStoreAction(IdxModes, VT, Action); 2492 } 2493 2494 /// Indicate that the specified indexed masked load does or does not work with 2495 /// the specified type and indicate what to do about it. 2496 /// 2497 /// NOTE: All indexed mode masked loads are initialized to Expand in 2498 /// TargetLowering.cpp 2499 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, 2500 LegalizeAction Action) { 2501 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action); 2502 } 2503 2504 /// Indicate that the specified indexed masked store does or does not work 2505 /// with the specified type and indicate what to do about it. 2506 /// 2507 /// NOTE: All indexed mode masked stores are initialized to Expand in 2508 /// TargetLowering.cpp 2509 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, 2510 LegalizeAction Action) { 2511 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action); 2512 } 2513 2514 /// Indicate that the specified condition code is or isn't supported on the 2515 /// target and indicate what to do about it. 2516 void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, MVT VT, 2517 LegalizeAction Action) { 2518 for (auto CC : CCs) { 2519 assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) && 2520 "Table isn't big enough!"); 2521 assert((unsigned)Action < 0x10 && "too many bits for bitfield array"); 2522 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 2523 /// 32-bit value and the upper 29 bits index into the second dimension of 2524 /// the array to select what 32-bit value to use. 2525 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); 2526 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift); 2527 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift; 2528 } 2529 } 2530 void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, ArrayRef<MVT> VTs, 2531 LegalizeAction Action) { 2532 for (auto VT : VTs) 2533 setCondCodeAction(CCs, VT, Action); 2534 } 2535 2536 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults 2537 /// to trying a larger integer/fp until it can find one that works. If that 2538 /// default is insufficient, this method can be used by the target to override 2539 /// the default. 2540 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 2541 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; 2542 } 2543 2544 /// Convenience method to set an operation to Promote and specify the type 2545 /// in a single call. 2546 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 2547 setOperationAction(Opc, OrigVT, Promote); 2548 AddPromotedToType(Opc, OrigVT, DestVT); 2549 } 2550 2551 /// Targets should invoke this method for each target independent node that 2552 /// they want to provide a custom DAG combiner for by implementing the 2553 /// PerformDAGCombine virtual method. 2554 void setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs) { 2555 for (auto NT : NTs) { 2556 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray)); 2557 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7); 2558 } 2559 } 2560 2561 /// Set the target's minimum function alignment. 2562 void setMinFunctionAlignment(Align Alignment) { 2563 MinFunctionAlignment = Alignment; 2564 } 2565 2566 /// Set the target's preferred function alignment. This should be set if 2567 /// there is a performance benefit to higher-than-minimum alignment 2568 void setPrefFunctionAlignment(Align Alignment) { 2569 PrefFunctionAlignment = Alignment; 2570 } 2571 2572 /// Set the target's preferred loop alignment. Default alignment is one, it 2573 /// means the target does not care about loop alignment. The target may also 2574 /// override getPrefLoopAlignment to provide per-loop values. 2575 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; } 2576 void setMaxBytesForAlignment(unsigned MaxBytes) { 2577 MaxBytesForAlignment = MaxBytes; 2578 } 2579 2580 /// Set the minimum stack alignment of an argument. 2581 void setMinStackArgumentAlignment(Align Alignment) { 2582 MinStackArgumentAlignment = Alignment; 2583 } 2584 2585 /// Set the maximum atomic operation size supported by the 2586 /// backend. Atomic operations greater than this size (as well as 2587 /// ones that are not naturally aligned), will be expanded by 2588 /// AtomicExpandPass into an __atomic_* library call. 2589 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) { 2590 MaxAtomicSizeInBitsSupported = SizeInBits; 2591 } 2592 2593 /// Set the size in bits of the maximum div/rem the backend supports. 2594 /// Larger operations will be expanded by ExpandLargeDivRem. 2595 void setMaxDivRemBitWidthSupported(unsigned SizeInBits) { 2596 MaxDivRemBitWidthSupported = SizeInBits; 2597 } 2598 2599 /// Set the size in bits of the maximum fp convert the backend supports. 2600 /// Larger operations will be expanded by ExpandLargeFPConvert. 2601 void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) { 2602 MaxLargeFPConvertBitWidthSupported = SizeInBits; 2603 } 2604 2605 /// Sets the minimum cmpxchg or ll/sc size supported by the backend. 2606 void setMinCmpXchgSizeInBits(unsigned SizeInBits) { 2607 MinCmpXchgSizeInBits = SizeInBits; 2608 } 2609 2610 /// Sets whether unaligned atomic operations are supported. 2611 void setSupportsUnalignedAtomics(bool UnalignedSupported) { 2612 SupportsUnalignedAtomics = UnalignedSupported; 2613 } 2614 2615 public: 2616 //===--------------------------------------------------------------------===// 2617 // Addressing mode description hooks (used by LSR etc). 2618 // 2619 2620 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store 2621 /// instructions reading the address. This allows as much computation as 2622 /// possible to be done in the address mode for that operand. This hook lets 2623 /// targets also pass back when this should be done on intrinsics which 2624 /// load/store. 2625 virtual bool getAddrModeArguments(IntrinsicInst * /*I*/, 2626 SmallVectorImpl<Value*> &/*Ops*/, 2627 Type *&/*AccessTy*/) const { 2628 return false; 2629 } 2630 2631 /// This represents an addressing mode of: 2632 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg 2633 /// If BaseGV is null, there is no BaseGV. 2634 /// If BaseOffs is zero, there is no base offset. 2635 /// If HasBaseReg is false, there is no base register. 2636 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with 2637 /// no scale. 2638 struct AddrMode { 2639 GlobalValue *BaseGV = nullptr; 2640 int64_t BaseOffs = 0; 2641 bool HasBaseReg = false; 2642 int64_t Scale = 0; 2643 AddrMode() = default; 2644 }; 2645 2646 /// Return true if the addressing mode represented by AM is legal for this 2647 /// target, for a load/store of the specified type. 2648 /// 2649 /// The type may be VoidTy, in which case only return true if the addressing 2650 /// mode is legal for a load/store of any legal type. TODO: Handle 2651 /// pre/postinc as well. 2652 /// 2653 /// If the address space cannot be determined, it will be -1. 2654 /// 2655 /// TODO: Remove default argument 2656 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 2657 Type *Ty, unsigned AddrSpace, 2658 Instruction *I = nullptr) const; 2659 2660 /// Return true if the specified immediate is legal icmp immediate, that is 2661 /// the target has icmp instructions which can compare a register against the 2662 /// immediate without having to materialize the immediate into a register. 2663 virtual bool isLegalICmpImmediate(int64_t) const { 2664 return true; 2665 } 2666 2667 /// Return true if the specified immediate is legal add immediate, that is the 2668 /// target has add instructions which can add a register with the immediate 2669 /// without having to materialize the immediate into a register. 2670 virtual bool isLegalAddImmediate(int64_t) const { 2671 return true; 2672 } 2673 2674 /// Return true if the specified immediate is legal for the value input of a 2675 /// store instruction. 2676 virtual bool isLegalStoreImmediate(int64_t Value) const { 2677 // Default implementation assumes that at least 0 works since it is likely 2678 // that a zero register exists or a zero immediate is allowed. 2679 return Value == 0; 2680 } 2681 2682 /// Return true if it's significantly cheaper to shift a vector by a uniform 2683 /// scalar than by an amount which will vary across each lane. On x86 before 2684 /// AVX2 for example, there is a "psllw" instruction for the former case, but 2685 /// no simple instruction for a general "a << b" operation on vectors. 2686 /// This should also apply to lowering for vector funnel shifts (rotates). 2687 virtual bool isVectorShiftByScalarCheap(Type *Ty) const { 2688 return false; 2689 } 2690 2691 /// Given a shuffle vector SVI representing a vector splat, return a new 2692 /// scalar type of size equal to SVI's scalar type if the new type is more 2693 /// profitable. Returns nullptr otherwise. For example under MVE float splats 2694 /// are converted to integer to prevent the need to move from SPR to GPR 2695 /// registers. 2696 virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const { 2697 return nullptr; 2698 } 2699 2700 /// Given a set in interconnected phis of type 'From' that are loaded/stored 2701 /// or bitcast to type 'To', return true if the set should be converted to 2702 /// 'To'. 2703 virtual bool shouldConvertPhiType(Type *From, Type *To) const { 2704 return (From->isIntegerTy() || From->isFloatingPointTy()) && 2705 (To->isIntegerTy() || To->isFloatingPointTy()); 2706 } 2707 2708 /// Returns true if the opcode is a commutative binary operation. 2709 virtual bool isCommutativeBinOp(unsigned Opcode) const { 2710 // FIXME: This should get its info from the td file. 2711 switch (Opcode) { 2712 case ISD::ADD: 2713 case ISD::SMIN: 2714 case ISD::SMAX: 2715 case ISD::UMIN: 2716 case ISD::UMAX: 2717 case ISD::MUL: 2718 case ISD::MULHU: 2719 case ISD::MULHS: 2720 case ISD::SMUL_LOHI: 2721 case ISD::UMUL_LOHI: 2722 case ISD::FADD: 2723 case ISD::FMUL: 2724 case ISD::AND: 2725 case ISD::OR: 2726 case ISD::XOR: 2727 case ISD::SADDO: 2728 case ISD::UADDO: 2729 case ISD::ADDC: 2730 case ISD::ADDE: 2731 case ISD::SADDSAT: 2732 case ISD::UADDSAT: 2733 case ISD::FMINNUM: 2734 case ISD::FMAXNUM: 2735 case ISD::FMINNUM_IEEE: 2736 case ISD::FMAXNUM_IEEE: 2737 case ISD::FMINIMUM: 2738 case ISD::FMAXIMUM: 2739 case ISD::AVGFLOORS: 2740 case ISD::AVGFLOORU: 2741 case ISD::AVGCEILS: 2742 case ISD::AVGCEILU: 2743 case ISD::ABDS: 2744 case ISD::ABDU: 2745 return true; 2746 default: return false; 2747 } 2748 } 2749 2750 /// Return true if the node is a math/logic binary operator. 2751 virtual bool isBinOp(unsigned Opcode) const { 2752 // A commutative binop must be a binop. 2753 if (isCommutativeBinOp(Opcode)) 2754 return true; 2755 // These are non-commutative binops. 2756 switch (Opcode) { 2757 case ISD::SUB: 2758 case ISD::SHL: 2759 case ISD::SRL: 2760 case ISD::SRA: 2761 case ISD::ROTL: 2762 case ISD::ROTR: 2763 case ISD::SDIV: 2764 case ISD::UDIV: 2765 case ISD::SREM: 2766 case ISD::UREM: 2767 case ISD::SSUBSAT: 2768 case ISD::USUBSAT: 2769 case ISD::FSUB: 2770 case ISD::FDIV: 2771 case ISD::FREM: 2772 return true; 2773 default: 2774 return false; 2775 } 2776 } 2777 2778 /// Return true if it's free to truncate a value of type FromTy to type 2779 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16 2780 /// by referencing its sub-register AX. 2781 /// Targets must return false when FromTy <= ToTy. 2782 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const { 2783 return false; 2784 } 2785 2786 /// Return true if a truncation from FromTy to ToTy is permitted when deciding 2787 /// whether a call is in tail position. Typically this means that both results 2788 /// would be assigned to the same register or stack slot, but it could mean 2789 /// the target performs adequate checks of its own before proceeding with the 2790 /// tail call. Targets must return false when FromTy <= ToTy. 2791 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const { 2792 return false; 2793 } 2794 2795 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; } 2796 virtual bool isTruncateFree(LLT FromTy, LLT ToTy, const DataLayout &DL, 2797 LLVMContext &Ctx) const { 2798 return isTruncateFree(getApproximateEVTForLLT(FromTy, DL, Ctx), 2799 getApproximateEVTForLLT(ToTy, DL, Ctx)); 2800 } 2801 2802 virtual bool isProfitableToHoist(Instruction *I) const { return true; } 2803 2804 /// Return true if the extension represented by \p I is free. 2805 /// Unlikely the is[Z|FP]ExtFree family which is based on types, 2806 /// this method can use the context provided by \p I to decide 2807 /// whether or not \p I is free. 2808 /// This method extends the behavior of the is[Z|FP]ExtFree family. 2809 /// In other words, if is[Z|FP]Free returns true, then this method 2810 /// returns true as well. The converse is not true. 2811 /// The target can perform the adequate checks by overriding isExtFreeImpl. 2812 /// \pre \p I must be a sign, zero, or fp extension. 2813 bool isExtFree(const Instruction *I) const { 2814 switch (I->getOpcode()) { 2815 case Instruction::FPExt: 2816 if (isFPExtFree(EVT::getEVT(I->getType()), 2817 EVT::getEVT(I->getOperand(0)->getType()))) 2818 return true; 2819 break; 2820 case Instruction::ZExt: 2821 if (isZExtFree(I->getOperand(0)->getType(), I->getType())) 2822 return true; 2823 break; 2824 case Instruction::SExt: 2825 break; 2826 default: 2827 llvm_unreachable("Instruction is not an extension"); 2828 } 2829 return isExtFreeImpl(I); 2830 } 2831 2832 /// Return true if \p Load and \p Ext can form an ExtLoad. 2833 /// For example, in AArch64 2834 /// %L = load i8, i8* %ptr 2835 /// %E = zext i8 %L to i32 2836 /// can be lowered into one load instruction 2837 /// ldrb w0, [x0] 2838 bool isExtLoad(const LoadInst *Load, const Instruction *Ext, 2839 const DataLayout &DL) const { 2840 EVT VT = getValueType(DL, Ext->getType()); 2841 EVT LoadVT = getValueType(DL, Load->getType()); 2842 2843 // If the load has other users and the truncate is not free, the ext 2844 // probably isn't free. 2845 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) && 2846 !isTruncateFree(Ext->getType(), Load->getType())) 2847 return false; 2848 2849 // Check whether the target supports casts folded into loads. 2850 unsigned LType; 2851 if (isa<ZExtInst>(Ext)) 2852 LType = ISD::ZEXTLOAD; 2853 else { 2854 assert(isa<SExtInst>(Ext) && "Unexpected ext type!"); 2855 LType = ISD::SEXTLOAD; 2856 } 2857 2858 return isLoadExtLegal(LType, VT, LoadVT); 2859 } 2860 2861 /// Return true if any actual instruction that defines a value of type FromTy 2862 /// implicitly zero-extends the value to ToTy in the result register. 2863 /// 2864 /// The function should return true when it is likely that the truncate can 2865 /// be freely folded with an instruction defining a value of FromTy. If 2866 /// the defining instruction is unknown (because you're looking at a 2867 /// function argument, PHI, etc.) then the target may require an 2868 /// explicit truncate, which is not necessarily free, but this function 2869 /// does not deal with those cases. 2870 /// Targets must return false when FromTy >= ToTy. 2871 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const { 2872 return false; 2873 } 2874 2875 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; } 2876 virtual bool isZExtFree(LLT FromTy, LLT ToTy, const DataLayout &DL, 2877 LLVMContext &Ctx) const { 2878 return isZExtFree(getApproximateEVTForLLT(FromTy, DL, Ctx), 2879 getApproximateEVTForLLT(ToTy, DL, Ctx)); 2880 } 2881 2882 /// Return true if zero-extending the specific node Val to type VT2 is free 2883 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or 2884 /// because it's folded such as X86 zero-extending loads). 2885 virtual bool isZExtFree(SDValue Val, EVT VT2) const { 2886 return isZExtFree(Val.getValueType(), VT2); 2887 } 2888 2889 /// Return true if sign-extension from FromTy to ToTy is cheaper than 2890 /// zero-extension. 2891 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const { 2892 return false; 2893 } 2894 2895 /// Return true if this constant should be sign extended when promoting to 2896 /// a larger type. 2897 virtual bool signExtendConstant(const ConstantInt *C) const { return false; } 2898 2899 /// Return true if sinking I's operands to the same basic block as I is 2900 /// profitable, e.g. because the operands can be folded into a target 2901 /// instruction during instruction selection. After calling the function 2902 /// \p Ops contains the Uses to sink ordered by dominance (dominating users 2903 /// come first). 2904 virtual bool shouldSinkOperands(Instruction *I, 2905 SmallVectorImpl<Use *> &Ops) const { 2906 return false; 2907 } 2908 2909 /// Try to optimize extending or truncating conversion instructions (like 2910 /// zext, trunc, fptoui, uitofp) for the target. 2911 virtual bool 2912 optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, 2913 const TargetTransformInfo &TTI) const { 2914 return false; 2915 } 2916 2917 /// Return true if the target supplies and combines to a paired load 2918 /// two loaded values of type LoadedType next to each other in memory. 2919 /// RequiredAlignment gives the minimal alignment constraints that must be met 2920 /// to be able to select this paired load. 2921 /// 2922 /// This information is *not* used to generate actual paired loads, but it is 2923 /// used to generate a sequence of loads that is easier to combine into a 2924 /// paired load. 2925 /// For instance, something like this: 2926 /// a = load i64* addr 2927 /// b = trunc i64 a to i32 2928 /// c = lshr i64 a, 32 2929 /// d = trunc i64 c to i32 2930 /// will be optimized into: 2931 /// b = load i32* addr1 2932 /// d = load i32* addr2 2933 /// Where addr1 = addr2 +/- sizeof(i32). 2934 /// 2935 /// In other words, unless the target performs a post-isel load combining, 2936 /// this information should not be provided because it will generate more 2937 /// loads. 2938 virtual bool hasPairedLoad(EVT /*LoadedType*/, 2939 Align & /*RequiredAlignment*/) const { 2940 return false; 2941 } 2942 2943 /// Return true if the target has a vector blend instruction. 2944 virtual bool hasVectorBlend() const { return false; } 2945 2946 /// Get the maximum supported factor for interleaved memory accesses. 2947 /// Default to be the minimum interleave factor: 2. 2948 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; } 2949 2950 /// Lower an interleaved load to target specific intrinsics. Return 2951 /// true on success. 2952 /// 2953 /// \p LI is the vector load instruction. 2954 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector. 2955 /// \p Indices is the corresponding indices for each shufflevector. 2956 /// \p Factor is the interleave factor. 2957 virtual bool lowerInterleavedLoad(LoadInst *LI, 2958 ArrayRef<ShuffleVectorInst *> Shuffles, 2959 ArrayRef<unsigned> Indices, 2960 unsigned Factor) const { 2961 return false; 2962 } 2963 2964 /// Lower an interleaved store to target specific intrinsics. Return 2965 /// true on success. 2966 /// 2967 /// \p SI is the vector store instruction. 2968 /// \p SVI is the shufflevector to RE-interleave the stored vector. 2969 /// \p Factor is the interleave factor. 2970 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 2971 unsigned Factor) const { 2972 return false; 2973 } 2974 2975 /// Lower a deinterleave intrinsic to a target specific load intrinsic. 2976 /// Return true on success. Currently only supports 2977 /// llvm.experimental.vector.deinterleave2 2978 /// 2979 /// \p DI is the deinterleave intrinsic. 2980 /// \p LI is the accompanying load instruction 2981 virtual bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI, 2982 LoadInst *LI) const { 2983 return false; 2984 } 2985 2986 /// Lower an interleave intrinsic to a target specific store intrinsic. 2987 /// Return true on success. Currently only supports 2988 /// llvm.experimental.vector.interleave2 2989 /// 2990 /// \p II is the interleave intrinsic. 2991 /// \p SI is the accompanying store instruction 2992 virtual bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II, 2993 StoreInst *SI) const { 2994 return false; 2995 } 2996 2997 /// Return true if an fpext operation is free (for instance, because 2998 /// single-precision floating-point numbers are implicitly extended to 2999 /// double-precision). 3000 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const { 3001 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && 3002 "invalid fpext types"); 3003 return false; 3004 } 3005 3006 /// Return true if an fpext operation input to an \p Opcode operation is free 3007 /// (for instance, because half-precision floating-point numbers are 3008 /// implicitly extended to float-precision) for an FMA instruction. 3009 virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, 3010 LLT DestTy, LLT SrcTy) const { 3011 return false; 3012 } 3013 3014 /// Return true if an fpext operation input to an \p Opcode operation is free 3015 /// (for instance, because half-precision floating-point numbers are 3016 /// implicitly extended to float-precision) for an FMA instruction. 3017 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, 3018 EVT DestVT, EVT SrcVT) const { 3019 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 3020 "invalid fpext types"); 3021 return isFPExtFree(DestVT, SrcVT); 3022 } 3023 3024 /// Return true if folding a vector load into ExtVal (a sign, zero, or any 3025 /// extend node) is profitable. 3026 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; } 3027 3028 /// Return true if an fneg operation is free to the point where it is never 3029 /// worthwhile to replace it with a bitwise operation. 3030 virtual bool isFNegFree(EVT VT) const { 3031 assert(VT.isFloatingPoint()); 3032 return false; 3033 } 3034 3035 /// Return true if an fabs operation is free to the point where it is never 3036 /// worthwhile to replace it with a bitwise operation. 3037 virtual bool isFAbsFree(EVT VT) const { 3038 assert(VT.isFloatingPoint()); 3039 return false; 3040 } 3041 3042 /// Return true if an FMA operation is faster than a pair of fmul and fadd 3043 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 3044 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 3045 /// 3046 /// NOTE: This may be called before legalization on types for which FMAs are 3047 /// not legal, but should return true if those types will eventually legalize 3048 /// to types that support FMAs. After legalization, it will only be called on 3049 /// types that support FMAs (via Legal or Custom actions) 3050 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 3051 EVT) const { 3052 return false; 3053 } 3054 3055 /// Return true if an FMA operation is faster than a pair of fmul and fadd 3056 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 3057 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 3058 /// 3059 /// NOTE: This may be called before legalization on types for which FMAs are 3060 /// not legal, but should return true if those types will eventually legalize 3061 /// to types that support FMAs. After legalization, it will only be called on 3062 /// types that support FMAs (via Legal or Custom actions) 3063 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 3064 LLT) const { 3065 return false; 3066 } 3067 3068 /// IR version 3069 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const { 3070 return false; 3071 } 3072 3073 /// Returns true if \p MI can be combined with another instruction to 3074 /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD, 3075 /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be 3076 /// distributed into an fadd/fsub. 3077 virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const { 3078 assert((MI.getOpcode() == TargetOpcode::G_FADD || 3079 MI.getOpcode() == TargetOpcode::G_FSUB || 3080 MI.getOpcode() == TargetOpcode::G_FMUL) && 3081 "unexpected node in FMAD forming combine"); 3082 switch (Ty.getScalarSizeInBits()) { 3083 case 16: 3084 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16); 3085 case 32: 3086 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32); 3087 case 64: 3088 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64); 3089 default: 3090 break; 3091 } 3092 3093 return false; 3094 } 3095 3096 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an 3097 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an 3098 /// fadd/fsub. 3099 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const { 3100 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || 3101 N->getOpcode() == ISD::FMUL) && 3102 "unexpected node in FMAD forming combine"); 3103 return isOperationLegal(ISD::FMAD, N->getValueType(0)); 3104 } 3105 3106 // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather 3107 // than FMUL and ADD is delegated to the machine combiner. 3108 virtual bool generateFMAsInMachineCombiner(EVT VT, 3109 CodeGenOpt::Level OptLevel) const { 3110 return false; 3111 } 3112 3113 /// Return true if it's profitable to narrow operations of type SrcVT to 3114 /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from 3115 /// i32 to i16. 3116 virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const { 3117 return false; 3118 } 3119 3120 /// Return true if pulling a binary operation into a select with an identity 3121 /// constant is profitable. This is the inverse of an IR transform. 3122 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X 3123 virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, 3124 EVT VT) const { 3125 return false; 3126 } 3127 3128 /// Return true if it is beneficial to convert a load of a constant to 3129 /// just the constant itself. 3130 /// On some targets it might be more efficient to use a combination of 3131 /// arithmetic instructions to materialize the constant instead of loading it 3132 /// from a constant pool. 3133 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 3134 Type *Ty) const { 3135 return false; 3136 } 3137 3138 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type 3139 /// from this source type with this index. This is needed because 3140 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of 3141 /// the first element, and only the target knows which lowering is cheap. 3142 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 3143 unsigned Index) const { 3144 return false; 3145 } 3146 3147 /// Try to convert an extract element of a vector binary operation into an 3148 /// extract element followed by a scalar operation. 3149 virtual bool shouldScalarizeBinop(SDValue VecOp) const { 3150 return false; 3151 } 3152 3153 /// Return true if extraction of a scalar element from the given vector type 3154 /// at the given index is cheap. For example, if scalar operations occur on 3155 /// the same register file as vector operations, then an extract element may 3156 /// be a sub-register rename rather than an actual instruction. 3157 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const { 3158 return false; 3159 } 3160 3161 /// Try to convert math with an overflow comparison into the corresponding DAG 3162 /// node operation. Targets may want to override this independently of whether 3163 /// the operation is legal/custom for the given type because it may obscure 3164 /// matching of other patterns. 3165 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 3166 bool MathUsed) const { 3167 // TODO: The default logic is inherited from code in CodeGenPrepare. 3168 // The opcode should not make a difference by default? 3169 if (Opcode != ISD::UADDO) 3170 return false; 3171 3172 // Allow the transform as long as we have an integer type that is not 3173 // obviously illegal and unsupported and if the math result is used 3174 // besides the overflow check. On some targets (e.g. SPARC), it is 3175 // not profitable to form on overflow op if the math result has no 3176 // concrete users. 3177 if (VT.isVector()) 3178 return false; 3179 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT)); 3180 } 3181 3182 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR 3183 // even if the vector itself has multiple uses. 3184 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const { 3185 return false; 3186 } 3187 3188 // Return true if CodeGenPrepare should consider splitting large offset of a 3189 // GEP to make the GEP fit into the addressing mode and can be sunk into the 3190 // same blocks of its users. 3191 virtual bool shouldConsiderGEPOffsetSplit() const { return false; } 3192 3193 /// Return true if creating a shift of the type by the given 3194 /// amount is not profitable. 3195 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const { 3196 return false; 3197 } 3198 3199 /// Does this target require the clearing of high-order bits in a register 3200 /// passed to the fp16 to fp conversion library function. 3201 virtual bool shouldKeepZExtForFP16Conv() const { return false; } 3202 3203 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT 3204 /// from min(max(fptoi)) saturation patterns. 3205 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const { 3206 return isOperationLegalOrCustom(Op, VT); 3207 } 3208 3209 /// Does this target support complex deinterleaving 3210 virtual bool isComplexDeinterleavingSupported() const { return false; } 3211 3212 /// Does this target support complex deinterleaving with the given operation 3213 /// and type 3214 virtual bool isComplexDeinterleavingOperationSupported( 3215 ComplexDeinterleavingOperation Operation, Type *Ty) const { 3216 return false; 3217 } 3218 3219 /// Create the IR node for the given complex deinterleaving operation. 3220 /// If one cannot be created using all the given inputs, nullptr should be 3221 /// returned. 3222 virtual Value *createComplexDeinterleavingIR( 3223 IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, 3224 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, 3225 Value *Accumulator = nullptr) const { 3226 return nullptr; 3227 } 3228 3229 //===--------------------------------------------------------------------===// 3230 // Runtime Library hooks 3231 // 3232 3233 /// Rename the default libcall routine name for the specified libcall. 3234 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 3235 LibcallRoutineNames[Call] = Name; 3236 } 3237 void setLibcallName(ArrayRef<RTLIB::Libcall> Calls, const char *Name) { 3238 for (auto Call : Calls) 3239 setLibcallName(Call, Name); 3240 } 3241 3242 /// Get the libcall routine name for the specified libcall. 3243 const char *getLibcallName(RTLIB::Libcall Call) const { 3244 return LibcallRoutineNames[Call]; 3245 } 3246 3247 /// Override the default CondCode to be used to test the result of the 3248 /// comparison libcall against zero. 3249 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 3250 CmpLibcallCCs[Call] = CC; 3251 } 3252 3253 /// Get the CondCode that's to be used to test the result of the comparison 3254 /// libcall against zero. 3255 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 3256 return CmpLibcallCCs[Call]; 3257 } 3258 3259 /// Set the CallingConv that should be used for the specified libcall. 3260 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { 3261 LibcallCallingConvs[Call] = CC; 3262 } 3263 3264 /// Get the CallingConv that should be used for the specified libcall. 3265 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { 3266 return LibcallCallingConvs[Call]; 3267 } 3268 3269 /// Execute target specific actions to finalize target lowering. 3270 /// This is used to set extra flags in MachineFrameInformation and freezing 3271 /// the set of reserved registers. 3272 /// The default implementation just freezes the set of reserved registers. 3273 virtual void finalizeLowering(MachineFunction &MF) const; 3274 3275 //===----------------------------------------------------------------------===// 3276 // GlobalISel Hooks 3277 //===----------------------------------------------------------------------===// 3278 /// Check whether or not \p MI needs to be moved close to its uses. 3279 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const; 3280 3281 3282 private: 3283 const TargetMachine &TM; 3284 3285 /// Tells the code generator that the target has multiple (allocatable) 3286 /// condition registers that can be used to store the results of comparisons 3287 /// for use by selects and conditional branches. With multiple condition 3288 /// registers, the code generator will not aggressively sink comparisons into 3289 /// the blocks of their users. 3290 bool HasMultipleConditionRegisters; 3291 3292 /// Tells the code generator that the target has BitExtract instructions. 3293 /// The code generator will aggressively sink "shift"s into the blocks of 3294 /// their users if the users will generate "and" instructions which can be 3295 /// combined with "shift" to BitExtract instructions. 3296 bool HasExtractBitsInsn; 3297 3298 /// Tells the code generator to bypass slow divide or remainder 3299 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code 3300 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer 3301 /// div/rem when the operands are positive and less than 256. 3302 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths; 3303 3304 /// Tells the code generator that it shouldn't generate extra flow control 3305 /// instructions and should attempt to combine flow control instructions via 3306 /// predication. 3307 bool JumpIsExpensive; 3308 3309 /// Information about the contents of the high-bits in boolean values held in 3310 /// a type wider than i1. See getBooleanContents. 3311 BooleanContent BooleanContents; 3312 3313 /// Information about the contents of the high-bits in boolean values held in 3314 /// a type wider than i1. See getBooleanContents. 3315 BooleanContent BooleanFloatContents; 3316 3317 /// Information about the contents of the high-bits in boolean vector values 3318 /// when the element type is wider than i1. See getBooleanContents. 3319 BooleanContent BooleanVectorContents; 3320 3321 /// The target scheduling preference: shortest possible total cycles or lowest 3322 /// register usage. 3323 Sched::Preference SchedPreferenceInfo; 3324 3325 /// The minimum alignment that any argument on the stack needs to have. 3326 Align MinStackArgumentAlignment; 3327 3328 /// The minimum function alignment (used when optimizing for size, and to 3329 /// prevent explicitly provided alignment from leading to incorrect code). 3330 Align MinFunctionAlignment; 3331 3332 /// The preferred function alignment (used when alignment unspecified and 3333 /// optimizing for speed). 3334 Align PrefFunctionAlignment; 3335 3336 /// The preferred loop alignment (in log2 bot in bytes). 3337 Align PrefLoopAlignment; 3338 /// The maximum amount of bytes permitted to be emitted for alignment. 3339 unsigned MaxBytesForAlignment; 3340 3341 /// Size in bits of the maximum atomics size the backend supports. 3342 /// Accesses larger than this will be expanded by AtomicExpandPass. 3343 unsigned MaxAtomicSizeInBitsSupported; 3344 3345 /// Size in bits of the maximum div/rem size the backend supports. 3346 /// Larger operations will be expanded by ExpandLargeDivRem. 3347 unsigned MaxDivRemBitWidthSupported; 3348 3349 /// Size in bits of the maximum larget fp convert size the backend 3350 /// supports. Larger operations will be expanded by ExpandLargeFPConvert. 3351 unsigned MaxLargeFPConvertBitWidthSupported; 3352 3353 /// Size in bits of the minimum cmpxchg or ll/sc operation the 3354 /// backend supports. 3355 unsigned MinCmpXchgSizeInBits; 3356 3357 /// This indicates if the target supports unaligned atomic operations. 3358 bool SupportsUnalignedAtomics; 3359 3360 /// If set to a physical register, this specifies the register that 3361 /// llvm.savestack/llvm.restorestack should save and restore. 3362 Register StackPointerRegisterToSaveRestore; 3363 3364 /// This indicates the default register class to use for each ValueType the 3365 /// target supports natively. 3366 const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE]; 3367 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE]; 3368 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE]; 3369 3370 /// This indicates the "representative" register class to use for each 3371 /// ValueType the target supports natively. This information is used by the 3372 /// scheduler to track register pressure. By default, the representative 3373 /// register class is the largest legal super-reg register class of the 3374 /// register class of the specified type. e.g. On x86, i8, i16, and i32's 3375 /// representative class would be GR32. 3376 const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0}; 3377 3378 /// This indicates the "cost" of the "representative" register class for each 3379 /// ValueType. The cost is used by the scheduler to approximate register 3380 /// pressure. 3381 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE]; 3382 3383 /// For any value types we are promoting or expanding, this contains the value 3384 /// type that we are changing to. For Expanded types, this contains one step 3385 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required 3386 /// (e.g. i64 -> i16). For types natively supported by the system, this holds 3387 /// the same type (e.g. i32 -> i32). 3388 MVT TransformToType[MVT::VALUETYPE_SIZE]; 3389 3390 /// For each operation and each value type, keep a LegalizeAction that 3391 /// indicates how instruction selection should deal with the operation. Most 3392 /// operations are Legal (aka, supported natively by the target), but 3393 /// operations that are not should be described. Note that operations on 3394 /// non-legal value types are not described here. 3395 LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END]; 3396 3397 /// For each load extension type and each value type, keep a LegalizeAction 3398 /// that indicates how instruction selection should deal with a load of a 3399 /// specific value type and extension type. Uses 4-bits to store the action 3400 /// for each of the 4 load ext types. 3401 uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; 3402 3403 /// For each value type pair keep a LegalizeAction that indicates whether a 3404 /// truncating store of a specific value type and truncating type is legal. 3405 LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; 3406 3407 /// For each indexed mode and each value type, keep a quad of LegalizeAction 3408 /// that indicates how instruction selection should deal with the load / 3409 /// store / maskedload / maskedstore. 3410 /// 3411 /// The first dimension is the value_type for the reference. The second 3412 /// dimension represents the various modes for load store. 3413 uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE]; 3414 3415 /// For each condition code (ISD::CondCode) keep a LegalizeAction that 3416 /// indicates how instruction selection should deal with the condition code. 3417 /// 3418 /// Because each CC action takes up 4 bits, we need to have the array size be 3419 /// large enough to fit all of the value types. This can be done by rounding 3420 /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8. 3421 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8]; 3422 3423 ValueTypeActionImpl ValueTypeActions; 3424 3425 private: 3426 /// Targets can specify ISD nodes that they would like PerformDAGCombine 3427 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this 3428 /// array. 3429 unsigned char 3430 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 3431 3432 /// For operations that must be promoted to a specific type, this holds the 3433 /// destination type. This map should be sparse, so don't hold it as an 3434 /// array. 3435 /// 3436 /// Targets add entries to this map with AddPromotedToType(..), clients access 3437 /// this with getTypeToPromoteTo(..). 3438 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 3439 PromoteToType; 3440 3441 /// Stores the name each libcall. 3442 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1]; 3443 3444 /// The ISD::CondCode that should be used to test the result of each of the 3445 /// comparison libcall against zero. 3446 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 3447 3448 /// Stores the CallingConv that should be used for each libcall. 3449 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; 3450 3451 /// Set default libcall names and calling conventions. 3452 void InitLibcalls(const Triple &TT); 3453 3454 /// The bits of IndexedModeActions used to store the legalisation actions 3455 /// We store the data as | ML | MS | L | S | each taking 4 bits. 3456 enum IndexedModeActionsBits { 3457 IMAB_Store = 0, 3458 IMAB_Load = 4, 3459 IMAB_MaskedStore = 8, 3460 IMAB_MaskedLoad = 12 3461 }; 3462 3463 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift, 3464 LegalizeAction Action) { 3465 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && 3466 (unsigned)Action < 0xf && "Table isn't big enough!"); 3467 unsigned Ty = (unsigned)VT.SimpleTy; 3468 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift); 3469 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift; 3470 } 3471 3472 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT, 3473 unsigned Shift) const { 3474 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && 3475 "Table isn't big enough!"); 3476 unsigned Ty = (unsigned)VT.SimpleTy; 3477 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf); 3478 } 3479 3480 protected: 3481 /// Return true if the extension represented by \p I is free. 3482 /// \pre \p I is a sign, zero, or fp extension and 3483 /// is[Z|FP]ExtFree of the related types is not true. 3484 virtual bool isExtFreeImpl(const Instruction *I) const { return false; } 3485 3486 /// Depth that GatherAllAliases should should continue looking for chain 3487 /// dependencies when trying to find a more preferable chain. As an 3488 /// approximation, this should be more than the number of consecutive stores 3489 /// expected to be merged. 3490 unsigned GatherAllAliasesMaxDepth; 3491 3492 /// \brief Specify maximum number of store instructions per memset call. 3493 /// 3494 /// When lowering \@llvm.memset this field specifies the maximum number of 3495 /// store operations that may be substituted for the call to memset. Targets 3496 /// must set this value based on the cost threshold for that target. Targets 3497 /// should assume that the memset will be done using as many of the largest 3498 /// store operations first, followed by smaller ones, if necessary, per 3499 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 3500 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 3501 /// store. This only applies to setting a constant array of a constant size. 3502 unsigned MaxStoresPerMemset; 3503 /// Likewise for functions with the OptSize attribute. 3504 unsigned MaxStoresPerMemsetOptSize; 3505 3506 /// \brief Specify maximum number of store instructions per memcpy call. 3507 /// 3508 /// When lowering \@llvm.memcpy this field specifies the maximum number of 3509 /// store operations that may be substituted for a call to memcpy. Targets 3510 /// must set this value based on the cost threshold for that target. Targets 3511 /// should assume that the memcpy will be done using as many of the largest 3512 /// store operations first, followed by smaller ones, if necessary, per 3513 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 3514 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 3515 /// and one 1-byte store. This only applies to copying a constant array of 3516 /// constant size. 3517 unsigned MaxStoresPerMemcpy; 3518 /// Likewise for functions with the OptSize attribute. 3519 unsigned MaxStoresPerMemcpyOptSize; 3520 /// \brief Specify max number of store instructions to glue in inlined memcpy. 3521 /// 3522 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number 3523 /// of store instructions to keep together. This helps in pairing and 3524 // vectorization later on. 3525 unsigned MaxGluedStoresPerMemcpy = 0; 3526 3527 /// \brief Specify maximum number of load instructions per memcmp call. 3528 /// 3529 /// When lowering \@llvm.memcmp this field specifies the maximum number of 3530 /// pairs of load operations that may be substituted for a call to memcmp. 3531 /// Targets must set this value based on the cost threshold for that target. 3532 /// Targets should assume that the memcmp will be done using as many of the 3533 /// largest load operations first, followed by smaller ones, if necessary, per 3534 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine 3535 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load 3536 /// and one 1-byte load. This only applies to copying a constant array of 3537 /// constant size. 3538 unsigned MaxLoadsPerMemcmp; 3539 /// Likewise for functions with the OptSize attribute. 3540 unsigned MaxLoadsPerMemcmpOptSize; 3541 3542 /// \brief Specify maximum number of store instructions per memmove call. 3543 /// 3544 /// When lowering \@llvm.memmove this field specifies the maximum number of 3545 /// store instructions that may be substituted for a call to memmove. Targets 3546 /// must set this value based on the cost threshold for that target. Targets 3547 /// should assume that the memmove will be done using as many of the largest 3548 /// store operations first, followed by smaller ones, if necessary, per 3549 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 3550 /// with 8-bit alignment would result in nine 1-byte stores. This only 3551 /// applies to copying a constant array of constant size. 3552 unsigned MaxStoresPerMemmove; 3553 /// Likewise for functions with the OptSize attribute. 3554 unsigned MaxStoresPerMemmoveOptSize; 3555 3556 /// Tells the code generator that select is more expensive than a branch if 3557 /// the branch is usually predicted right. 3558 bool PredictableSelectIsExpensive; 3559 3560 /// \see enableExtLdPromotion. 3561 bool EnableExtLdPromotion; 3562 3563 /// Return true if the value types that can be represented by the specified 3564 /// register class are all legal. 3565 bool isLegalRC(const TargetRegisterInfo &TRI, 3566 const TargetRegisterClass &RC) const; 3567 3568 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 3569 /// sequence of memory operands that is recognized by PrologEpilogInserter. 3570 MachineBasicBlock *emitPatchPoint(MachineInstr &MI, 3571 MachineBasicBlock *MBB) const; 3572 3573 bool IsStrictFPEnabled; 3574 }; 3575 3576 /// This class defines information used to lower LLVM code to legal SelectionDAG 3577 /// operators that the target instruction selector can accept natively. 3578 /// 3579 /// This class also defines callbacks that targets must implement to lower 3580 /// target-specific constructs to SelectionDAG operators. 3581 class TargetLowering : public TargetLoweringBase { 3582 public: 3583 struct DAGCombinerInfo; 3584 struct MakeLibCallOptions; 3585 3586 TargetLowering(const TargetLowering &) = delete; 3587 TargetLowering &operator=(const TargetLowering &) = delete; 3588 3589 explicit TargetLowering(const TargetMachine &TM); 3590 3591 bool isPositionIndependent() const; 3592 3593 virtual bool isSDNodeSourceOfDivergence(const SDNode *N, 3594 FunctionLoweringInfo *FLI, 3595 UniformityInfo *UA) const { 3596 return false; 3597 } 3598 3599 // Lets target to control the following reassociation of operands: (op (op x, 3600 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By 3601 // default consider profitable any case where N0 has single use. This 3602 // behavior reflects the condition replaced by this target hook call in the 3603 // DAGCombiner. Any particular target can implement its own heuristic to 3604 // restrict common combiner. 3605 virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, 3606 SDValue N1) const { 3607 return N0.hasOneUse(); 3608 } 3609 3610 // Lets target to control the following reassociation of operands: (op (op x, 3611 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By 3612 // default consider profitable any case where N0 has single use. This 3613 // behavior reflects the condition replaced by this target hook call in the 3614 // combiner. Any particular target can implement its own heuristic to 3615 // restrict common combiner. 3616 virtual bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, 3617 Register N1) const { 3618 return MRI.hasOneNonDBGUse(N0); 3619 } 3620 3621 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const { 3622 return false; 3623 } 3624 3625 /// Returns true by value, base pointer and offset pointer and addressing mode 3626 /// by reference if the node's address can be legally represented as 3627 /// pre-indexed load / store address. 3628 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, 3629 SDValue &/*Offset*/, 3630 ISD::MemIndexedMode &/*AM*/, 3631 SelectionDAG &/*DAG*/) const { 3632 return false; 3633 } 3634 3635 /// Returns true by value, base pointer and offset pointer and addressing mode 3636 /// by reference if this node can be combined with a load / store to form a 3637 /// post-indexed load / store. 3638 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, 3639 SDValue &/*Base*/, 3640 SDValue &/*Offset*/, 3641 ISD::MemIndexedMode &/*AM*/, 3642 SelectionDAG &/*DAG*/) const { 3643 return false; 3644 } 3645 3646 /// Returns true if the specified base+offset is a legal indexed addressing 3647 /// mode for this target. \p MI is the load or store instruction that is being 3648 /// considered for transformation. 3649 virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset, 3650 bool IsPre, MachineRegisterInfo &MRI) const { 3651 return false; 3652 } 3653 3654 /// Return the entry encoding for a jump table in the current function. The 3655 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 3656 virtual unsigned getJumpTableEncoding() const; 3657 3658 virtual const MCExpr * 3659 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, 3660 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, 3661 MCContext &/*Ctx*/) const { 3662 llvm_unreachable("Need to implement this hook if target has custom JTIs"); 3663 } 3664 3665 /// Returns relocation base for the given PIC jumptable. 3666 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 3667 SelectionDAG &DAG) const; 3668 3669 /// This returns the relocation base for the given PIC jumptable, the same as 3670 /// getPICJumpTableRelocBase, but as an MCExpr. 3671 virtual const MCExpr * 3672 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 3673 unsigned JTI, MCContext &Ctx) const; 3674 3675 /// Return true if folding a constant offset with the given GlobalAddress is 3676 /// legal. It is frequently not legal in PIC relocation models. 3677 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 3678 3679 /// On x86, return true if the operand with index OpNo is a CALL or JUMP 3680 /// instruction, which can use either a memory constraint or an address 3681 /// constraint. -fasm-blocks "__asm call foo" lowers to 3682 /// call void asm sideeffect inteldialect "call ${0:P}", "*m..." 3683 /// 3684 /// This function is used by a hack to choose the address constraint, 3685 /// lowering to a direct call. 3686 virtual bool 3687 isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> &AsmStrs, 3688 unsigned OpNo) const { 3689 return false; 3690 } 3691 3692 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 3693 SDValue &Chain) const; 3694 3695 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, 3696 SDValue &NewRHS, ISD::CondCode &CCCode, 3697 const SDLoc &DL, const SDValue OldLHS, 3698 const SDValue OldRHS) const; 3699 3700 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, 3701 SDValue &NewRHS, ISD::CondCode &CCCode, 3702 const SDLoc &DL, const SDValue OldLHS, 3703 const SDValue OldRHS, SDValue &Chain, 3704 bool IsSignaling = false) const; 3705 3706 /// Returns a pair of (return value, chain). 3707 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC. 3708 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, 3709 EVT RetVT, ArrayRef<SDValue> Ops, 3710 MakeLibCallOptions CallOptions, 3711 const SDLoc &dl, 3712 SDValue Chain = SDValue()) const; 3713 3714 /// Check whether parameters to a call that are passed in callee saved 3715 /// registers are the same as from the calling function. This needs to be 3716 /// checked for tail call eligibility. 3717 bool parametersInCSRMatch(const MachineRegisterInfo &MRI, 3718 const uint32_t *CallerPreservedMask, 3719 const SmallVectorImpl<CCValAssign> &ArgLocs, 3720 const SmallVectorImpl<SDValue> &OutVals) const; 3721 3722 //===--------------------------------------------------------------------===// 3723 // TargetLowering Optimization Methods 3724 // 3725 3726 /// A convenience struct that encapsulates a DAG, and two SDValues for 3727 /// returning information from TargetLowering to its clients that want to 3728 /// combine. 3729 struct TargetLoweringOpt { 3730 SelectionDAG &DAG; 3731 bool LegalTys; 3732 bool LegalOps; 3733 SDValue Old; 3734 SDValue New; 3735 3736 explicit TargetLoweringOpt(SelectionDAG &InDAG, 3737 bool LT, bool LO) : 3738 DAG(InDAG), LegalTys(LT), LegalOps(LO) {} 3739 3740 bool LegalTypes() const { return LegalTys; } 3741 bool LegalOperations() const { return LegalOps; } 3742 3743 bool CombineTo(SDValue O, SDValue N) { 3744 Old = O; 3745 New = N; 3746 return true; 3747 } 3748 }; 3749 3750 /// Determines the optimal series of memory ops to replace the memset / memcpy. 3751 /// Return true if the number of memory ops is below the threshold (Limit). 3752 /// Note that this is always the case when Limit is ~0. 3753 /// It returns the types of the sequence of memory ops to perform 3754 /// memset / memcpy by reference. 3755 virtual bool 3756 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit, 3757 const MemOp &Op, unsigned DstAS, unsigned SrcAS, 3758 const AttributeList &FuncAttributes) const; 3759 3760 /// Check to see if the specified operand of the specified instruction is a 3761 /// constant integer. If so, check to see if there are any bits set in the 3762 /// constant that are not demanded. If so, shrink the constant and return 3763 /// true. 3764 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 3765 const APInt &DemandedElts, 3766 TargetLoweringOpt &TLO) const; 3767 3768 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements. 3769 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 3770 TargetLoweringOpt &TLO) const; 3771 3772 // Target hook to do target-specific const optimization, which is called by 3773 // ShrinkDemandedConstant. This function should return true if the target 3774 // doesn't want ShrinkDemandedConstant to further optimize the constant. 3775 virtual bool targetShrinkDemandedConstant(SDValue Op, 3776 const APInt &DemandedBits, 3777 const APInt &DemandedElts, 3778 TargetLoweringOpt &TLO) const { 3779 return false; 3780 } 3781 3782 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This 3783 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 3784 /// generalized for targets with other types of implicit widening casts. 3785 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 3786 const APInt &DemandedBits, 3787 TargetLoweringOpt &TLO) const; 3788 3789 /// Look at Op. At this point, we know that only the DemandedBits bits of the 3790 /// result of Op are ever used downstream. If we can use this information to 3791 /// simplify Op, create a new simplified DAG node and return true, returning 3792 /// the original and new nodes in Old and New. Otherwise, analyze the 3793 /// expression and return a mask of KnownOne and KnownZero bits for the 3794 /// expression (used to simplify the caller). The KnownZero/One bits may only 3795 /// be accurate for those bits in the Demanded masks. 3796 /// \p AssumeSingleUse When this parameter is true, this function will 3797 /// attempt to simplify \p Op even if there are multiple uses. 3798 /// Callers are responsible for correctly updating the DAG based on the 3799 /// results of this function, because simply replacing replacing TLO.Old 3800 /// with TLO.New will be incorrect when this parameter is true and TLO.Old 3801 /// has multiple uses. 3802 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3803 const APInt &DemandedElts, KnownBits &Known, 3804 TargetLoweringOpt &TLO, unsigned Depth = 0, 3805 bool AssumeSingleUse = false) const; 3806 3807 /// Helper wrapper around SimplifyDemandedBits, demanding all elements. 3808 /// Adds Op back to the worklist upon success. 3809 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3810 KnownBits &Known, TargetLoweringOpt &TLO, 3811 unsigned Depth = 0, 3812 bool AssumeSingleUse = false) const; 3813 3814 /// Helper wrapper around SimplifyDemandedBits. 3815 /// Adds Op back to the worklist upon success. 3816 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3817 DAGCombinerInfo &DCI) const; 3818 3819 /// Helper wrapper around SimplifyDemandedBits. 3820 /// Adds Op back to the worklist upon success. 3821 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3822 const APInt &DemandedElts, 3823 DAGCombinerInfo &DCI) const; 3824 3825 /// More limited version of SimplifyDemandedBits that can be used to "look 3826 /// through" ops that don't contribute to the DemandedBits/DemandedElts - 3827 /// bitwise ops etc. 3828 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, 3829 const APInt &DemandedElts, 3830 SelectionDAG &DAG, 3831 unsigned Depth = 0) const; 3832 3833 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all 3834 /// elements. 3835 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, 3836 SelectionDAG &DAG, 3837 unsigned Depth = 0) const; 3838 3839 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all 3840 /// bits from only some vector elements. 3841 SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, 3842 const APInt &DemandedElts, 3843 SelectionDAG &DAG, 3844 unsigned Depth = 0) const; 3845 3846 /// Look at Vector Op. At this point, we know that only the DemandedElts 3847 /// elements of the result of Op are ever used downstream. If we can use 3848 /// this information to simplify Op, create a new simplified DAG node and 3849 /// return true, storing the original and new nodes in TLO. 3850 /// Otherwise, analyze the expression and return a mask of KnownUndef and 3851 /// KnownZero elements for the expression (used to simplify the caller). 3852 /// The KnownUndef/Zero elements may only be accurate for those bits 3853 /// in the DemandedMask. 3854 /// \p AssumeSingleUse When this parameter is true, this function will 3855 /// attempt to simplify \p Op even if there are multiple uses. 3856 /// Callers are responsible for correctly updating the DAG based on the 3857 /// results of this function, because simply replacing replacing TLO.Old 3858 /// with TLO.New will be incorrect when this parameter is true and TLO.Old 3859 /// has multiple uses. 3860 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, 3861 APInt &KnownUndef, APInt &KnownZero, 3862 TargetLoweringOpt &TLO, unsigned Depth = 0, 3863 bool AssumeSingleUse = false) const; 3864 3865 /// Helper wrapper around SimplifyDemandedVectorElts. 3866 /// Adds Op back to the worklist upon success. 3867 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts, 3868 DAGCombinerInfo &DCI) const; 3869 3870 /// Return true if the target supports simplifying demanded vector elements by 3871 /// converting them to undefs. 3872 virtual bool 3873 shouldSimplifyDemandedVectorElts(SDValue Op, 3874 const TargetLoweringOpt &TLO) const { 3875 return true; 3876 } 3877 3878 /// Determine which of the bits specified in Mask are known to be either zero 3879 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts 3880 /// argument allows us to only collect the known bits that are shared by the 3881 /// requested vector elements. 3882 virtual void computeKnownBitsForTargetNode(const SDValue Op, 3883 KnownBits &Known, 3884 const APInt &DemandedElts, 3885 const SelectionDAG &DAG, 3886 unsigned Depth = 0) const; 3887 3888 /// Determine which of the bits specified in Mask are known to be either zero 3889 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts 3890 /// argument allows us to only collect the known bits that are shared by the 3891 /// requested vector elements. This is for GISel. 3892 virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, 3893 Register R, KnownBits &Known, 3894 const APInt &DemandedElts, 3895 const MachineRegisterInfo &MRI, 3896 unsigned Depth = 0) const; 3897 3898 /// Determine the known alignment for the pointer value \p R. This is can 3899 /// typically be inferred from the number of low known 0 bits. However, for a 3900 /// pointer with a non-integral address space, the alignment value may be 3901 /// independent from the known low bits. 3902 virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, 3903 Register R, 3904 const MachineRegisterInfo &MRI, 3905 unsigned Depth = 0) const; 3906 3907 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0. 3908 /// Default implementation computes low bits based on alignment 3909 /// information. This should preserve known bits passed into it. 3910 virtual void computeKnownBitsForFrameIndex(int FIOp, 3911 KnownBits &Known, 3912 const MachineFunction &MF) const; 3913 3914 /// This method can be implemented by targets that want to expose additional 3915 /// information about sign bits to the DAG Combiner. The DemandedElts 3916 /// argument allows us to only collect the minimum sign bits that are shared 3917 /// by the requested vector elements. 3918 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 3919 const APInt &DemandedElts, 3920 const SelectionDAG &DAG, 3921 unsigned Depth = 0) const; 3922 3923 /// This method can be implemented by targets that want to expose additional 3924 /// information about sign bits to GlobalISel combiners. The DemandedElts 3925 /// argument allows us to only collect the minimum sign bits that are shared 3926 /// by the requested vector elements. 3927 virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, 3928 Register R, 3929 const APInt &DemandedElts, 3930 const MachineRegisterInfo &MRI, 3931 unsigned Depth = 0) const; 3932 3933 /// Attempt to simplify any target nodes based on the demanded vector 3934 /// elements, returning true on success. Otherwise, analyze the expression and 3935 /// return a mask of KnownUndef and KnownZero elements for the expression 3936 /// (used to simplify the caller). The KnownUndef/Zero elements may only be 3937 /// accurate for those bits in the DemandedMask. 3938 virtual bool SimplifyDemandedVectorEltsForTargetNode( 3939 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, 3940 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const; 3941 3942 /// Attempt to simplify any target nodes based on the demanded bits/elts, 3943 /// returning true on success. Otherwise, analyze the 3944 /// expression and return a mask of KnownOne and KnownZero bits for the 3945 /// expression (used to simplify the caller). The KnownZero/One bits may only 3946 /// be accurate for those bits in the Demanded masks. 3947 virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, 3948 const APInt &DemandedBits, 3949 const APInt &DemandedElts, 3950 KnownBits &Known, 3951 TargetLoweringOpt &TLO, 3952 unsigned Depth = 0) const; 3953 3954 /// More limited version of SimplifyDemandedBits that can be used to "look 3955 /// through" ops that don't contribute to the DemandedBits/DemandedElts - 3956 /// bitwise ops etc. 3957 virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode( 3958 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3959 SelectionDAG &DAG, unsigned Depth) const; 3960 3961 /// Return true if this function can prove that \p Op is never poison 3962 /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts 3963 /// argument limits the check to the requested vector elements. 3964 virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode( 3965 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 3966 bool PoisonOnly, unsigned Depth) const; 3967 3968 /// Return true if Op can create undef or poison from non-undef & non-poison 3969 /// operands. The DemandedElts argument limits the check to the requested 3970 /// vector elements. 3971 virtual bool 3972 canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, 3973 const SelectionDAG &DAG, bool PoisonOnly, 3974 bool ConsiderFlags, unsigned Depth) const; 3975 3976 /// Tries to build a legal vector shuffle using the provided parameters 3977 /// or equivalent variations. The Mask argument maybe be modified as the 3978 /// function tries different variations. 3979 /// Returns an empty SDValue if the operation fails. 3980 SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 3981 SDValue N1, MutableArrayRef<int> Mask, 3982 SelectionDAG &DAG) const; 3983 3984 /// This method returns the constant pool value that will be loaded by LD. 3985 /// NOTE: You must check for implicit extensions of the constant by LD. 3986 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const; 3987 3988 /// If \p SNaN is false, \returns true if \p Op is known to never be any 3989 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling 3990 /// NaN. 3991 virtual bool isKnownNeverNaNForTargetNode(SDValue Op, 3992 const SelectionDAG &DAG, 3993 bool SNaN = false, 3994 unsigned Depth = 0) const; 3995 3996 /// Return true if vector \p Op has the same value across all \p DemandedElts, 3997 /// indicating any elements which may be undef in the output \p UndefElts. 3998 virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, 3999 APInt &UndefElts, 4000 const SelectionDAG &DAG, 4001 unsigned Depth = 0) const; 4002 4003 /// Returns true if the given Opc is considered a canonical constant for the 4004 /// target, which should not be transformed back into a BUILD_VECTOR. 4005 virtual bool isTargetCanonicalConstantNode(SDValue Op) const { 4006 return Op.getOpcode() == ISD::SPLAT_VECTOR; 4007 } 4008 4009 struct DAGCombinerInfo { 4010 void *DC; // The DAG Combiner object. 4011 CombineLevel Level; 4012 bool CalledByLegalizer; 4013 4014 public: 4015 SelectionDAG &DAG; 4016 4017 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc) 4018 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {} 4019 4020 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; } 4021 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; } 4022 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; } 4023 CombineLevel getDAGCombineLevel() { return Level; } 4024 bool isCalledByLegalizer() const { return CalledByLegalizer; } 4025 4026 void AddToWorklist(SDNode *N); 4027 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true); 4028 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); 4029 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); 4030 4031 bool recursivelyDeleteUnusedNodes(SDNode *N); 4032 4033 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); 4034 }; 4035 4036 /// Return if the N is a constant or constant vector equal to the true value 4037 /// from getBooleanContents(). 4038 bool isConstTrueVal(SDValue N) const; 4039 4040 /// Return if the N is a constant or constant vector equal to the false value 4041 /// from getBooleanContents(). 4042 bool isConstFalseVal(SDValue N) const; 4043 4044 /// Return if \p N is a True value when extended to \p VT. 4045 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const; 4046 4047 /// Try to simplify a setcc built with the specified operands and cc. If it is 4048 /// unable to simplify it, return a null SDValue. 4049 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 4050 bool foldBooleans, DAGCombinerInfo &DCI, 4051 const SDLoc &dl) const; 4052 4053 // For targets which wrap address, unwrap for analysis. 4054 virtual SDValue unwrapAddress(SDValue N) const { return N; } 4055 4056 /// Returns true (and the GlobalValue and the offset) if the node is a 4057 /// GlobalAddress + offset. 4058 virtual bool 4059 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 4060 4061 /// This method will be invoked for all target nodes and for any 4062 /// target-independent nodes that the target has registered with invoke it 4063 /// for. 4064 /// 4065 /// The semantics are as follows: 4066 /// Return Value: 4067 /// SDValue.Val == 0 - No change was made 4068 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 4069 /// otherwise - N should be replaced by the returned Operand. 4070 /// 4071 /// In addition, methods provided by DAGCombinerInfo may be used to perform 4072 /// more complex transformations. 4073 /// 4074 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 4075 4076 /// Return true if it is profitable to move this shift by a constant amount 4077 /// through its operand, adjusting any immediate operands as necessary to 4078 /// preserve semantics. This transformation may not be desirable if it 4079 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield 4080 /// extraction in AArch64). By default, it returns true. 4081 /// 4082 /// @param N the shift node 4083 /// @param Level the current DAGCombine legalization level. 4084 virtual bool isDesirableToCommuteWithShift(const SDNode *N, 4085 CombineLevel Level) const { 4086 return true; 4087 } 4088 4089 /// GlobalISel - return true if it is profitable to move this shift by a 4090 /// constant amount through its operand, adjusting any immediate operands as 4091 /// necessary to preserve semantics. This transformation may not be desirable 4092 /// if it disrupts a particularly auspicious target-specific tree (e.g. 4093 /// bitfield extraction in AArch64). By default, it returns true. 4094 /// 4095 /// @param MI the shift instruction 4096 /// @param IsAfterLegal true if running after legalization. 4097 virtual bool isDesirableToCommuteWithShift(const MachineInstr &MI, 4098 bool IsAfterLegal) const { 4099 return true; 4100 } 4101 4102 // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and 4103 // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of 4104 // writing this) is: 4105 // With C as a power of 2 and C != 0 and C != INT_MIN: 4106 // AddAnd: 4107 // (icmp eq A, C) | (icmp eq A, -C) 4108 // -> (icmp eq and(add(A, C), ~(C + C)), 0) 4109 // (icmp ne A, C) & (icmp ne A, -C)w 4110 // -> (icmp ne and(add(A, C), ~(C + C)), 0) 4111 // ABS: 4112 // (icmp eq A, C) | (icmp eq A, -C) 4113 // -> (icmp eq Abs(A), C) 4114 // (icmp ne A, C) & (icmp ne A, -C)w 4115 // -> (icmp ne Abs(A), C) 4116 // 4117 // @param LogicOp the logic op 4118 // @param SETCC0 the first of the SETCC nodes 4119 // @param SETCC0 the second of the SETCC nodes 4120 virtual AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC( 4121 const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const { 4122 return AndOrSETCCFoldKind::None; 4123 } 4124 4125 /// Return true if it is profitable to combine an XOR of a logical shift 4126 /// to create a logical shift of NOT. This transformation may not be desirable 4127 /// if it disrupts a particularly auspicious target-specific tree (e.g. 4128 /// BIC on ARM/AArch64). By default, it returns true. 4129 virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const { 4130 return true; 4131 } 4132 4133 /// Return true if the target has native support for the specified value type 4134 /// and it is 'desirable' to use the type for the given node type. e.g. On x86 4135 /// i16 is legal, but undesirable since i16 instruction encodings are longer 4136 /// and some i16 instructions are slow. 4137 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { 4138 // By default, assume all legal types are desirable. 4139 return isTypeLegal(VT); 4140 } 4141 4142 /// Return true if it is profitable for dag combiner to transform a floating 4143 /// point op of specified opcode to a equivalent op of an integer 4144 /// type. e.g. f32 load -> i32 load can be profitable on ARM. 4145 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, 4146 EVT /*VT*/) const { 4147 return false; 4148 } 4149 4150 /// This method query the target whether it is beneficial for dag combiner to 4151 /// promote the specified node. If true, it should return the desired 4152 /// promotion type by reference. 4153 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { 4154 return false; 4155 } 4156 4157 /// Return true if the target supports swifterror attribute. It optimizes 4158 /// loads and stores to reading and writing a specific register. 4159 virtual bool supportSwiftError() const { 4160 return false; 4161 } 4162 4163 /// Return true if the target supports that a subset of CSRs for the given 4164 /// machine function is handled explicitly via copies. 4165 virtual bool supportSplitCSR(MachineFunction *MF) const { 4166 return false; 4167 } 4168 4169 /// Return true if the target supports kcfi operand bundles. 4170 virtual bool supportKCFIBundles() const { return false; } 4171 4172 /// Perform necessary initialization to handle a subset of CSRs explicitly 4173 /// via copies. This function is called at the beginning of instruction 4174 /// selection. 4175 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const { 4176 llvm_unreachable("Not Implemented"); 4177 } 4178 4179 /// Insert explicit copies in entry and exit blocks. We copy a subset of 4180 /// CSRs to virtual registers in the entry block, and copy them back to 4181 /// physical registers in the exit blocks. This function is called at the end 4182 /// of instruction selection. 4183 virtual void insertCopiesSplitCSR( 4184 MachineBasicBlock *Entry, 4185 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 4186 llvm_unreachable("Not Implemented"); 4187 } 4188 4189 /// Return the newly negated expression if the cost is not expensive and 4190 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to 4191 /// do the negation. 4192 virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, 4193 bool LegalOps, bool OptForSize, 4194 NegatibleCost &Cost, 4195 unsigned Depth = 0) const; 4196 4197 SDValue getCheaperOrNeutralNegatedExpression( 4198 SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, 4199 const NegatibleCost CostThreshold = NegatibleCost::Neutral, 4200 unsigned Depth = 0) const { 4201 NegatibleCost Cost = NegatibleCost::Expensive; 4202 SDValue Neg = 4203 getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); 4204 if (!Neg) 4205 return SDValue(); 4206 4207 if (Cost <= CostThreshold) 4208 return Neg; 4209 4210 // Remove the new created node to avoid the side effect to the DAG. 4211 if (Neg->use_empty()) 4212 DAG.RemoveDeadNode(Neg.getNode()); 4213 return SDValue(); 4214 } 4215 4216 /// This is the helper function to return the newly negated expression only 4217 /// when the cost is cheaper. 4218 SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, 4219 bool LegalOps, bool OptForSize, 4220 unsigned Depth = 0) const { 4221 return getCheaperOrNeutralNegatedExpression(Op, DAG, LegalOps, OptForSize, 4222 NegatibleCost::Cheaper, Depth); 4223 } 4224 4225 /// This is the helper function to return the newly negated expression if 4226 /// the cost is not expensive. 4227 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, 4228 bool OptForSize, unsigned Depth = 0) const { 4229 NegatibleCost Cost = NegatibleCost::Expensive; 4230 return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); 4231 } 4232 4233 //===--------------------------------------------------------------------===// 4234 // Lowering methods - These methods must be implemented by targets so that 4235 // the SelectionDAGBuilder code knows how to lower these. 4236 // 4237 4238 /// Target-specific splitting of values into parts that fit a register 4239 /// storing a legal type 4240 virtual bool splitValueIntoRegisterParts( 4241 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 4242 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const { 4243 return false; 4244 } 4245 4246 /// Allows the target to handle physreg-carried dependency 4247 /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether 4248 /// to add the edge to the dependency graph. 4249 /// Def - input: Selection DAG node defininfg physical register 4250 /// User - input: Selection DAG node using physical register 4251 /// Op - input: Number of User operand 4252 /// PhysReg - inout: set to the physical register if the edge is 4253 /// necessary, unchanged otherwise 4254 /// Cost - inout: physical register copy cost. 4255 /// Returns 'true' is the edge is necessary, 'false' otherwise 4256 virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, 4257 const TargetRegisterInfo *TRI, 4258 const TargetInstrInfo *TII, 4259 unsigned &PhysReg, int &Cost) const { 4260 return false; 4261 } 4262 4263 /// Target-specific combining of register parts into its original value 4264 virtual SDValue 4265 joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, 4266 const SDValue *Parts, unsigned NumParts, 4267 MVT PartVT, EVT ValueVT, 4268 std::optional<CallingConv::ID> CC) const { 4269 return SDValue(); 4270 } 4271 4272 /// This hook must be implemented to lower the incoming (formal) arguments, 4273 /// described by the Ins array, into the specified DAG. The implementation 4274 /// should fill in the InVals array with legal-type argument values, and 4275 /// return the resulting token chain value. 4276 virtual SDValue LowerFormalArguments( 4277 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/, 4278 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/, 4279 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const { 4280 llvm_unreachable("Not Implemented"); 4281 } 4282 4283 /// This structure contains all information that is necessary for lowering 4284 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder 4285 /// needs to lower a call, and targets will see this struct in their LowerCall 4286 /// implementation. 4287 struct CallLoweringInfo { 4288 SDValue Chain; 4289 Type *RetTy = nullptr; 4290 bool RetSExt : 1; 4291 bool RetZExt : 1; 4292 bool IsVarArg : 1; 4293 bool IsInReg : 1; 4294 bool DoesNotReturn : 1; 4295 bool IsReturnValueUsed : 1; 4296 bool IsConvergent : 1; 4297 bool IsPatchPoint : 1; 4298 bool IsPreallocated : 1; 4299 bool NoMerge : 1; 4300 4301 // IsTailCall should be modified by implementations of 4302 // TargetLowering::LowerCall that perform tail call conversions. 4303 bool IsTailCall = false; 4304 4305 // Is Call lowering done post SelectionDAG type legalization. 4306 bool IsPostTypeLegalization = false; 4307 4308 unsigned NumFixedArgs = -1; 4309 CallingConv::ID CallConv = CallingConv::C; 4310 SDValue Callee; 4311 ArgListTy Args; 4312 SelectionDAG &DAG; 4313 SDLoc DL; 4314 const CallBase *CB = nullptr; 4315 SmallVector<ISD::OutputArg, 32> Outs; 4316 SmallVector<SDValue, 32> OutVals; 4317 SmallVector<ISD::InputArg, 32> Ins; 4318 SmallVector<SDValue, 4> InVals; 4319 const ConstantInt *CFIType = nullptr; 4320 4321 CallLoweringInfo(SelectionDAG &DAG) 4322 : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false), 4323 DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false), 4324 IsPatchPoint(false), IsPreallocated(false), NoMerge(false), 4325 DAG(DAG) {} 4326 4327 CallLoweringInfo &setDebugLoc(const SDLoc &dl) { 4328 DL = dl; 4329 return *this; 4330 } 4331 4332 CallLoweringInfo &setChain(SDValue InChain) { 4333 Chain = InChain; 4334 return *this; 4335 } 4336 4337 // setCallee with target/module-specific attributes 4338 CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType, 4339 SDValue Target, ArgListTy &&ArgsList) { 4340 RetTy = ResultType; 4341 Callee = Target; 4342 CallConv = CC; 4343 NumFixedArgs = ArgsList.size(); 4344 Args = std::move(ArgsList); 4345 4346 DAG.getTargetLoweringInfo().markLibCallAttributes( 4347 &(DAG.getMachineFunction()), CC, Args); 4348 return *this; 4349 } 4350 4351 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType, 4352 SDValue Target, ArgListTy &&ArgsList) { 4353 RetTy = ResultType; 4354 Callee = Target; 4355 CallConv = CC; 4356 NumFixedArgs = ArgsList.size(); 4357 Args = std::move(ArgsList); 4358 return *this; 4359 } 4360 4361 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy, 4362 SDValue Target, ArgListTy &&ArgsList, 4363 const CallBase &Call) { 4364 RetTy = ResultType; 4365 4366 IsInReg = Call.hasRetAttr(Attribute::InReg); 4367 DoesNotReturn = 4368 Call.doesNotReturn() || 4369 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode())); 4370 IsVarArg = FTy->isVarArg(); 4371 IsReturnValueUsed = !Call.use_empty(); 4372 RetSExt = Call.hasRetAttr(Attribute::SExt); 4373 RetZExt = Call.hasRetAttr(Attribute::ZExt); 4374 NoMerge = Call.hasFnAttr(Attribute::NoMerge); 4375 4376 Callee = Target; 4377 4378 CallConv = Call.getCallingConv(); 4379 NumFixedArgs = FTy->getNumParams(); 4380 Args = std::move(ArgsList); 4381 4382 CB = &Call; 4383 4384 return *this; 4385 } 4386 4387 CallLoweringInfo &setInRegister(bool Value = true) { 4388 IsInReg = Value; 4389 return *this; 4390 } 4391 4392 CallLoweringInfo &setNoReturn(bool Value = true) { 4393 DoesNotReturn = Value; 4394 return *this; 4395 } 4396 4397 CallLoweringInfo &setVarArg(bool Value = true) { 4398 IsVarArg = Value; 4399 return *this; 4400 } 4401 4402 CallLoweringInfo &setTailCall(bool Value = true) { 4403 IsTailCall = Value; 4404 return *this; 4405 } 4406 4407 CallLoweringInfo &setDiscardResult(bool Value = true) { 4408 IsReturnValueUsed = !Value; 4409 return *this; 4410 } 4411 4412 CallLoweringInfo &setConvergent(bool Value = true) { 4413 IsConvergent = Value; 4414 return *this; 4415 } 4416 4417 CallLoweringInfo &setSExtResult(bool Value = true) { 4418 RetSExt = Value; 4419 return *this; 4420 } 4421 4422 CallLoweringInfo &setZExtResult(bool Value = true) { 4423 RetZExt = Value; 4424 return *this; 4425 } 4426 4427 CallLoweringInfo &setIsPatchPoint(bool Value = true) { 4428 IsPatchPoint = Value; 4429 return *this; 4430 } 4431 4432 CallLoweringInfo &setIsPreallocated(bool Value = true) { 4433 IsPreallocated = Value; 4434 return *this; 4435 } 4436 4437 CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) { 4438 IsPostTypeLegalization = Value; 4439 return *this; 4440 } 4441 4442 CallLoweringInfo &setCFIType(const ConstantInt *Type) { 4443 CFIType = Type; 4444 return *this; 4445 } 4446 4447 ArgListTy &getArgs() { 4448 return Args; 4449 } 4450 }; 4451 4452 /// This structure is used to pass arguments to makeLibCall function. 4453 struct MakeLibCallOptions { 4454 // By passing type list before soften to makeLibCall, the target hook 4455 // shouldExtendTypeInLibCall can get the original type before soften. 4456 ArrayRef<EVT> OpsVTBeforeSoften; 4457 EVT RetVTBeforeSoften; 4458 bool IsSExt : 1; 4459 bool DoesNotReturn : 1; 4460 bool IsReturnValueUsed : 1; 4461 bool IsPostTypeLegalization : 1; 4462 bool IsSoften : 1; 4463 4464 MakeLibCallOptions() 4465 : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true), 4466 IsPostTypeLegalization(false), IsSoften(false) {} 4467 4468 MakeLibCallOptions &setSExt(bool Value = true) { 4469 IsSExt = Value; 4470 return *this; 4471 } 4472 4473 MakeLibCallOptions &setNoReturn(bool Value = true) { 4474 DoesNotReturn = Value; 4475 return *this; 4476 } 4477 4478 MakeLibCallOptions &setDiscardResult(bool Value = true) { 4479 IsReturnValueUsed = !Value; 4480 return *this; 4481 } 4482 4483 MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) { 4484 IsPostTypeLegalization = Value; 4485 return *this; 4486 } 4487 4488 MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT, 4489 bool Value = true) { 4490 OpsVTBeforeSoften = OpsVT; 4491 RetVTBeforeSoften = RetVT; 4492 IsSoften = Value; 4493 return *this; 4494 } 4495 }; 4496 4497 /// This function lowers an abstract call to a function into an actual call. 4498 /// This returns a pair of operands. The first element is the return value 4499 /// for the function (if RetTy is not VoidTy). The second element is the 4500 /// outgoing token chain. It calls LowerCall to do the actual lowering. 4501 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const; 4502 4503 /// This hook must be implemented to lower calls into the specified 4504 /// DAG. The outgoing arguments to the call are described by the Outs array, 4505 /// and the values to be returned by the call are described by the Ins 4506 /// array. The implementation should fill in the InVals array with legal-type 4507 /// return values from the call, and return the resulting token chain value. 4508 virtual SDValue 4509 LowerCall(CallLoweringInfo &/*CLI*/, 4510 SmallVectorImpl<SDValue> &/*InVals*/) const { 4511 llvm_unreachable("Not Implemented"); 4512 } 4513 4514 /// Target-specific cleanup for formal ByVal parameters. 4515 virtual void HandleByVal(CCState *, unsigned &, Align) const {} 4516 4517 /// This hook should be implemented to check whether the return values 4518 /// described by the Outs array can fit into the return registers. If false 4519 /// is returned, an sret-demotion is performed. 4520 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, 4521 MachineFunction &/*MF*/, bool /*isVarArg*/, 4522 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 4523 LLVMContext &/*Context*/) const 4524 { 4525 // Return true by default to get preexisting behavior. 4526 return true; 4527 } 4528 4529 /// This hook must be implemented to lower outgoing return values, described 4530 /// by the Outs array, into the specified DAG. The implementation should 4531 /// return the resulting token chain value. 4532 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 4533 bool /*isVarArg*/, 4534 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/, 4535 const SmallVectorImpl<SDValue> & /*OutVals*/, 4536 const SDLoc & /*dl*/, 4537 SelectionDAG & /*DAG*/) const { 4538 llvm_unreachable("Not Implemented"); 4539 } 4540 4541 /// Return true if result of the specified node is used by a return node 4542 /// only. It also compute and return the input chain for the tail call. 4543 /// 4544 /// This is used to determine whether it is possible to codegen a libcall as 4545 /// tail call at legalization time. 4546 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const { 4547 return false; 4548 } 4549 4550 /// Return true if the target may be able emit the call instruction as a tail 4551 /// call. This is used by optimization passes to determine if it's profitable 4552 /// to duplicate return instructions to enable tailcall optimization. 4553 virtual bool mayBeEmittedAsTailCall(const CallInst *) const { 4554 return false; 4555 } 4556 4557 /// Return the builtin name for the __builtin___clear_cache intrinsic 4558 /// Default is to invoke the clear cache library call 4559 virtual const char * getClearCacheBuiltinName() const { 4560 return "__clear_cache"; 4561 } 4562 4563 /// Return the register ID of the name passed in. Used by named register 4564 /// global variables extension. There is no target-independent behaviour 4565 /// so the default action is to bail. 4566 virtual Register getRegisterByName(const char* RegName, LLT Ty, 4567 const MachineFunction &MF) const { 4568 report_fatal_error("Named registers not implemented for this target"); 4569 } 4570 4571 /// Return the type that should be used to zero or sign extend a 4572 /// zeroext/signext integer return value. FIXME: Some C calling conventions 4573 /// require the return type to be promoted, but this is not true all the time, 4574 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling 4575 /// conventions. The frontend should handle this and include all of the 4576 /// necessary information. 4577 virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, 4578 ISD::NodeType /*ExtendKind*/) const { 4579 EVT MinVT = getRegisterType(MVT::i32); 4580 return VT.bitsLT(MinVT) ? MinVT : VT; 4581 } 4582 4583 /// For some targets, an LLVM struct type must be broken down into multiple 4584 /// simple types, but the calling convention specifies that the entire struct 4585 /// must be passed in a block of consecutive registers. 4586 virtual bool 4587 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, 4588 bool isVarArg, 4589 const DataLayout &DL) const { 4590 return false; 4591 } 4592 4593 /// For most targets, an LLVM type must be broken down into multiple 4594 /// smaller types. Usually the halves are ordered according to the endianness 4595 /// but for some platform that would break. So this method will default to 4596 /// matching the endianness but can be overridden. 4597 virtual bool 4598 shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const { 4599 return DL.isLittleEndian(); 4600 } 4601 4602 /// Returns a 0 terminated array of registers that can be safely used as 4603 /// scratch registers. 4604 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const { 4605 return nullptr; 4606 } 4607 4608 /// Returns a 0 terminated array of rounding control registers that can be 4609 /// attached into strict FP call. 4610 virtual ArrayRef<MCPhysReg> getRoundingControlRegisters() const { 4611 return ArrayRef<MCPhysReg>(); 4612 } 4613 4614 /// This callback is used to prepare for a volatile or atomic load. 4615 /// It takes a chain node as input and returns the chain for the load itself. 4616 /// 4617 /// Having a callback like this is necessary for targets like SystemZ, 4618 /// which allows a CPU to reuse the result of a previous load indefinitely, 4619 /// even if a cache-coherent store is performed by another CPU. The default 4620 /// implementation does nothing. 4621 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, 4622 SelectionDAG &DAG) const { 4623 return Chain; 4624 } 4625 4626 /// Should SelectionDAG lower an atomic store of the given kind as a normal 4627 /// StoreSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to 4628 /// eventually migrate all targets to the using StoreSDNodes, but porting is 4629 /// being done target at a time. 4630 virtual bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const { 4631 assert(SI.isAtomic() && "violated precondition"); 4632 return false; 4633 } 4634 4635 /// Should SelectionDAG lower an atomic load of the given kind as a normal 4636 /// LoadSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to 4637 /// eventually migrate all targets to the using LoadSDNodes, but porting is 4638 /// being done target at a time. 4639 virtual bool lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const { 4640 assert(LI.isAtomic() && "violated precondition"); 4641 return false; 4642 } 4643 4644 4645 /// This callback is invoked by the type legalizer to legalize nodes with an 4646 /// illegal operand type but legal result types. It replaces the 4647 /// LowerOperation callback in the type Legalizer. The reason we can not do 4648 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to 4649 /// use this callback. 4650 /// 4651 /// TODO: Consider merging with ReplaceNodeResults. 4652 /// 4653 /// The target places new result values for the node in Results (their number 4654 /// and types must exactly match those of the original return values of 4655 /// the node), or leaves Results empty, which indicates that the node is not 4656 /// to be custom lowered after all. 4657 /// The default implementation calls LowerOperation. 4658 virtual void LowerOperationWrapper(SDNode *N, 4659 SmallVectorImpl<SDValue> &Results, 4660 SelectionDAG &DAG) const; 4661 4662 /// This callback is invoked for operations that are unsupported by the 4663 /// target, which are registered to use 'custom' lowering, and whose defined 4664 /// values are all legal. If the target has no operations that require custom 4665 /// lowering, it need not implement this. The default implementation of this 4666 /// aborts. 4667 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 4668 4669 /// This callback is invoked when a node result type is illegal for the 4670 /// target, and the operation was registered to use 'custom' lowering for that 4671 /// result type. The target places new result values for the node in Results 4672 /// (their number and types must exactly match those of the original return 4673 /// values of the node), or leaves Results empty, which indicates that the 4674 /// node is not to be custom lowered after all. 4675 /// 4676 /// If the target has no operations that require custom lowering, it need not 4677 /// implement this. The default implementation aborts. 4678 virtual void ReplaceNodeResults(SDNode * /*N*/, 4679 SmallVectorImpl<SDValue> &/*Results*/, 4680 SelectionDAG &/*DAG*/) const { 4681 llvm_unreachable("ReplaceNodeResults not implemented for this target!"); 4682 } 4683 4684 /// This method returns the name of a target specific DAG node. 4685 virtual const char *getTargetNodeName(unsigned Opcode) const; 4686 4687 /// This method returns a target specific FastISel object, or null if the 4688 /// target does not support "fast" ISel. 4689 virtual FastISel *createFastISel(FunctionLoweringInfo &, 4690 const TargetLibraryInfo *) const { 4691 return nullptr; 4692 } 4693 4694 bool verifyReturnAddressArgumentIsConstant(SDValue Op, 4695 SelectionDAG &DAG) const; 4696 4697 //===--------------------------------------------------------------------===// 4698 // Inline Asm Support hooks 4699 // 4700 4701 /// This hook allows the target to expand an inline asm call to be explicit 4702 /// llvm code if it wants to. This is useful for turning simple inline asms 4703 /// into LLVM intrinsics, which gives the compiler more information about the 4704 /// behavior of the code. 4705 virtual bool ExpandInlineAsm(CallInst *) const { 4706 return false; 4707 } 4708 4709 enum ConstraintType { 4710 C_Register, // Constraint represents specific register(s). 4711 C_RegisterClass, // Constraint represents any of register(s) in class. 4712 C_Memory, // Memory constraint. 4713 C_Address, // Address constraint. 4714 C_Immediate, // Requires an immediate. 4715 C_Other, // Something else. 4716 C_Unknown // Unsupported constraint. 4717 }; 4718 4719 enum ConstraintWeight { 4720 // Generic weights. 4721 CW_Invalid = -1, // No match. 4722 CW_Okay = 0, // Acceptable. 4723 CW_Good = 1, // Good weight. 4724 CW_Better = 2, // Better weight. 4725 CW_Best = 3, // Best weight. 4726 4727 // Well-known weights. 4728 CW_SpecificReg = CW_Okay, // Specific register operands. 4729 CW_Register = CW_Good, // Register operands. 4730 CW_Memory = CW_Better, // Memory operands. 4731 CW_Constant = CW_Best, // Constant operand. 4732 CW_Default = CW_Okay // Default or don't know type. 4733 }; 4734 4735 /// This contains information for each constraint that we are lowering. 4736 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 4737 /// This contains the actual string for the code, like "m". TargetLowering 4738 /// picks the 'best' code from ConstraintInfo::Codes that most closely 4739 /// matches the operand. 4740 std::string ConstraintCode; 4741 4742 /// Information about the constraint code, e.g. Register, RegisterClass, 4743 /// Memory, Other, Unknown. 4744 TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown; 4745 4746 /// If this is the result output operand or a clobber, this is null, 4747 /// otherwise it is the incoming operand to the CallInst. This gets 4748 /// modified as the asm is processed. 4749 Value *CallOperandVal = nullptr; 4750 4751 /// The ValueType for the operand value. 4752 MVT ConstraintVT = MVT::Other; 4753 4754 /// Copy constructor for copying from a ConstraintInfo. 4755 AsmOperandInfo(InlineAsm::ConstraintInfo Info) 4756 : InlineAsm::ConstraintInfo(std::move(Info)) {} 4757 4758 /// Return true of this is an input operand that is a matching constraint 4759 /// like "4". 4760 bool isMatchingInputConstraint() const; 4761 4762 /// If this is an input matching constraint, this method returns the output 4763 /// operand it matches. 4764 unsigned getMatchedOperand() const; 4765 }; 4766 4767 using AsmOperandInfoVector = std::vector<AsmOperandInfo>; 4768 4769 /// Split up the constraint string from the inline assembly value into the 4770 /// specific constraints and their prefixes, and also tie in the associated 4771 /// operand values. If this returns an empty vector, and if the constraint 4772 /// string itself isn't empty, there was an error parsing. 4773 virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, 4774 const TargetRegisterInfo *TRI, 4775 const CallBase &Call) const; 4776 4777 /// Examine constraint type and operand type and determine a weight value. 4778 /// The operand object must already have been set up with the operand type. 4779 virtual ConstraintWeight getMultipleConstraintMatchWeight( 4780 AsmOperandInfo &info, int maIndex) const; 4781 4782 /// Examine constraint string and operand type and determine a weight value. 4783 /// The operand object must already have been set up with the operand type. 4784 virtual ConstraintWeight getSingleConstraintMatchWeight( 4785 AsmOperandInfo &info, const char *constraint) const; 4786 4787 /// Determines the constraint code and constraint type to use for the specific 4788 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 4789 /// If the actual operand being passed in is available, it can be passed in as 4790 /// Op, otherwise an empty SDValue can be passed. 4791 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 4792 SDValue Op, 4793 SelectionDAG *DAG = nullptr) const; 4794 4795 /// Given a constraint, return the type of constraint it is for this target. 4796 virtual ConstraintType getConstraintType(StringRef Constraint) const; 4797 4798 /// Given a physical register constraint (e.g. {edx}), return the register 4799 /// number and the register class for the register. 4800 /// 4801 /// Given a register class constraint, like 'r', if this corresponds directly 4802 /// to an LLVM register class, return a register of 0 and the register class 4803 /// pointer. 4804 /// 4805 /// This should only be used for C_Register constraints. On error, this 4806 /// returns a register number of 0 and a null register class pointer. 4807 virtual std::pair<unsigned, const TargetRegisterClass *> 4808 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 4809 StringRef Constraint, MVT VT) const; 4810 4811 virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const { 4812 if (ConstraintCode == "m") 4813 return InlineAsm::Constraint_m; 4814 if (ConstraintCode == "o") 4815 return InlineAsm::Constraint_o; 4816 if (ConstraintCode == "X") 4817 return InlineAsm::Constraint_X; 4818 if (ConstraintCode == "p") 4819 return InlineAsm::Constraint_p; 4820 return InlineAsm::Constraint_Unknown; 4821 } 4822 4823 /// Try to replace an X constraint, which matches anything, with another that 4824 /// has more specific requirements based on the type of the corresponding 4825 /// operand. This returns null if there is no replacement to make. 4826 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 4827 4828 /// Lower the specified operand into the Ops vector. If it is invalid, don't 4829 /// add anything to Ops. 4830 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 4831 std::vector<SDValue> &Ops, 4832 SelectionDAG &DAG) const; 4833 4834 // Lower custom output constraints. If invalid, return SDValue(). 4835 virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, 4836 const SDLoc &DL, 4837 const AsmOperandInfo &OpInfo, 4838 SelectionDAG &DAG) const; 4839 4840 // Targets may override this function to collect operands from the CallInst 4841 // and for example, lower them into the SelectionDAG operands. 4842 virtual void CollectTargetIntrinsicOperands(const CallInst &I, 4843 SmallVectorImpl<SDValue> &Ops, 4844 SelectionDAG &DAG) const; 4845 4846 //===--------------------------------------------------------------------===// 4847 // Div utility functions 4848 // 4849 4850 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 4851 SmallVectorImpl<SDNode *> &Created) const; 4852 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 4853 SmallVectorImpl<SDNode *> &Created) const; 4854 4855 /// Targets may override this function to provide custom SDIV lowering for 4856 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM 4857 /// assumes SDIV is expensive and replaces it with a series of other integer 4858 /// operations. 4859 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, 4860 SelectionDAG &DAG, 4861 SmallVectorImpl<SDNode *> &Created) const; 4862 4863 /// Targets may override this function to provide custom SREM lowering for 4864 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM 4865 /// assumes SREM is expensive and replaces it with a series of other integer 4866 /// operations. 4867 virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, 4868 SelectionDAG &DAG, 4869 SmallVectorImpl<SDNode *> &Created) const; 4870 4871 /// Indicate whether this target prefers to combine FDIVs with the same 4872 /// divisor. If the transform should never be done, return zero. If the 4873 /// transform should be done, return the minimum number of divisor uses 4874 /// that must exist. 4875 virtual unsigned combineRepeatedFPDivisors() const { 4876 return 0; 4877 } 4878 4879 /// Hooks for building estimates in place of slower divisions and square 4880 /// roots. 4881 4882 /// Return either a square root or its reciprocal estimate value for the input 4883 /// operand. 4884 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or 4885 /// 'Enabled' as set by a potential default override attribute. 4886 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson 4887 /// refinement iterations required to generate a sufficient (though not 4888 /// necessarily IEEE-754 compliant) estimate is returned in that parameter. 4889 /// The boolean UseOneConstNR output is used to select a Newton-Raphson 4890 /// algorithm implementation that uses either one or two constants. 4891 /// The boolean Reciprocal is used to select whether the estimate is for the 4892 /// square root of the input operand or the reciprocal of its square root. 4893 /// A target may choose to implement its own refinement within this function. 4894 /// If that's true, then return '0' as the number of RefinementSteps to avoid 4895 /// any further refinement of the estimate. 4896 /// An empty SDValue return means no estimate sequence can be created. 4897 virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 4898 int Enabled, int &RefinementSteps, 4899 bool &UseOneConstNR, bool Reciprocal) const { 4900 return SDValue(); 4901 } 4902 4903 /// Try to convert the fminnum/fmaxnum to a compare/select sequence. This is 4904 /// required for correctness since InstCombine might have canonicalized a 4905 /// fcmp+select sequence to a FMINNUM/FMAXNUM intrinsic. If we were to fall 4906 /// through to the default expansion/soften to libcall, we might introduce a 4907 /// link-time dependency on libm into a file that originally did not have one. 4908 SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const; 4909 4910 /// Return a reciprocal estimate value for the input operand. 4911 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or 4912 /// 'Enabled' as set by a potential default override attribute. 4913 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson 4914 /// refinement iterations required to generate a sufficient (though not 4915 /// necessarily IEEE-754 compliant) estimate is returned in that parameter. 4916 /// A target may choose to implement its own refinement within this function. 4917 /// If that's true, then return '0' as the number of RefinementSteps to avoid 4918 /// any further refinement of the estimate. 4919 /// An empty SDValue return means no estimate sequence can be created. 4920 virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 4921 int Enabled, int &RefinementSteps) const { 4922 return SDValue(); 4923 } 4924 4925 /// Return a target-dependent comparison result if the input operand is 4926 /// suitable for use with a square root estimate calculation. For example, the 4927 /// comparison may check if the operand is NAN, INF, zero, normal, etc. The 4928 /// result should be used as the condition operand for a select or branch. 4929 virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, 4930 const DenormalMode &Mode) const; 4931 4932 /// Return a target-dependent result if the input operand is not suitable for 4933 /// use with a square root estimate calculation. 4934 virtual SDValue getSqrtResultForDenormInput(SDValue Operand, 4935 SelectionDAG &DAG) const { 4936 return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType()); 4937 } 4938 4939 //===--------------------------------------------------------------------===// 4940 // Legalization utility functions 4941 // 4942 4943 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, 4944 /// respectively, each computing an n/2-bit part of the result. 4945 /// \param Result A vector that will be filled with the parts of the result 4946 /// in little-endian order. 4947 /// \param LL Low bits of the LHS of the MUL. You can use this parameter 4948 /// if you want to control how low bits are extracted from the LHS. 4949 /// \param LH High bits of the LHS of the MUL. See LL for meaning. 4950 /// \param RL Low bits of the RHS of the MUL. See LL for meaning 4951 /// \param RH High bits of the RHS of the MUL. See LL for meaning. 4952 /// \returns true if the node has been expanded, false if it has not 4953 bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, 4954 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT, 4955 SelectionDAG &DAG, MulExpansionKind Kind, 4956 SDValue LL = SDValue(), SDValue LH = SDValue(), 4957 SDValue RL = SDValue(), SDValue RH = SDValue()) const; 4958 4959 /// Expand a MUL into two nodes. One that computes the high bits of 4960 /// the result and one that computes the low bits. 4961 /// \param HiLoVT The value type to use for the Lo and Hi nodes. 4962 /// \param LL Low bits of the LHS of the MUL. You can use this parameter 4963 /// if you want to control how low bits are extracted from the LHS. 4964 /// \param LH High bits of the LHS of the MUL. See LL for meaning. 4965 /// \param RL Low bits of the RHS of the MUL. See LL for meaning 4966 /// \param RH High bits of the RHS of the MUL. See LL for meaning. 4967 /// \returns true if the node has been expanded. false if it has not 4968 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 4969 SelectionDAG &DAG, MulExpansionKind Kind, 4970 SDValue LL = SDValue(), SDValue LH = SDValue(), 4971 SDValue RL = SDValue(), SDValue RH = SDValue()) const; 4972 4973 /// Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit 4974 /// urem by constant and other arithmetic ops. The n/2-bit urem by constant 4975 /// will be expanded by DAGCombiner. This is not possible for all constant 4976 /// divisors. 4977 /// \param N Node to expand 4978 /// \param Result A vector that will be filled with the lo and high parts of 4979 /// the results. For *DIVREM, this will be the quotient parts followed 4980 /// by the remainder parts. 4981 /// \param HiLoVT The value type to use for the Lo and Hi parts. Should be 4982 /// half of VT. 4983 /// \param LL Low bits of the LHS of the operation. You can use this 4984 /// parameter if you want to control how low bits are extracted from 4985 /// the LHS. 4986 /// \param LH High bits of the LHS of the operation. See LL for meaning. 4987 /// \returns true if the node has been expanded, false if it has not. 4988 bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl<SDValue> &Result, 4989 EVT HiLoVT, SelectionDAG &DAG, 4990 SDValue LL = SDValue(), 4991 SDValue LH = SDValue()) const; 4992 4993 /// Expand funnel shift. 4994 /// \param N Node to expand 4995 /// \returns The expansion if successful, SDValue() otherwise 4996 SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const; 4997 4998 /// Expand rotations. 4999 /// \param N Node to expand 5000 /// \param AllowVectorOps expand vector rotate, this should only be performed 5001 /// if the legalization is happening outside of LegalizeVectorOps 5002 /// \returns The expansion if successful, SDValue() otherwise 5003 SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const; 5004 5005 /// Expand shift-by-parts. 5006 /// \param N Node to expand 5007 /// \param Lo lower-output-part after conversion 5008 /// \param Hi upper-output-part after conversion 5009 void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, 5010 SelectionDAG &DAG) const; 5011 5012 /// Expand float(f32) to SINT(i64) conversion 5013 /// \param N Node to expand 5014 /// \param Result output after conversion 5015 /// \returns True, if the expansion was successful, false otherwise 5016 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; 5017 5018 /// Expand float to UINT conversion 5019 /// \param N Node to expand 5020 /// \param Result output after conversion 5021 /// \param Chain output chain after conversion 5022 /// \returns True, if the expansion was successful, false otherwise 5023 bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, 5024 SelectionDAG &DAG) const; 5025 5026 /// Expand UINT(i64) to double(f64) conversion 5027 /// \param N Node to expand 5028 /// \param Result output after conversion 5029 /// \param Chain output chain after conversion 5030 /// \returns True, if the expansion was successful, false otherwise 5031 bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, 5032 SelectionDAG &DAG) const; 5033 5034 /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs. 5035 SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const; 5036 5037 /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max. 5038 /// \param N Node to expand 5039 /// \returns The expansion result 5040 SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const; 5041 5042 /// Expand check for floating point class. 5043 /// \param ResultVT The type of intrinsic call result. 5044 /// \param Op The tested value. 5045 /// \param Test The test to perform. 5046 /// \param Flags The optimization flags. 5047 /// \returns The expansion result or SDValue() if it fails. 5048 SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, 5049 SDNodeFlags Flags, const SDLoc &DL, 5050 SelectionDAG &DAG) const; 5051 5052 /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes, 5053 /// vector nodes can only succeed if all operations are legal/custom. 5054 /// \param N Node to expand 5055 /// \returns The expansion result or SDValue() if it fails. 5056 SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const; 5057 5058 /// Expand VP_CTPOP nodes. 5059 /// \returns The expansion result or SDValue() if it fails. 5060 SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const; 5061 5062 /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes, 5063 /// vector nodes can only succeed if all operations are legal/custom. 5064 /// \param N Node to expand 5065 /// \returns The expansion result or SDValue() if it fails. 5066 SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const; 5067 5068 /// Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes. 5069 /// \param N Node to expand 5070 /// \returns The expansion result or SDValue() if it fails. 5071 SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const; 5072 5073 /// Expand CTTZ via Table Lookup. 5074 /// \param N Node to expand 5075 /// \returns The expansion result or SDValue() if it fails. 5076 SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, 5077 SDValue Op, unsigned NumBitsPerElt) const; 5078 5079 /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes, 5080 /// vector nodes can only succeed if all operations are legal/custom. 5081 /// \param N Node to expand 5082 /// \returns The expansion result or SDValue() if it fails. 5083 SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const; 5084 5085 /// Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes. 5086 /// \param N Node to expand 5087 /// \returns The expansion result or SDValue() if it fails. 5088 SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const; 5089 5090 /// Expand ABS nodes. Expands vector/scalar ABS nodes, 5091 /// vector nodes can only succeed if all operations are legal/custom. 5092 /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size)) 5093 /// \param N Node to expand 5094 /// \param IsNegative indicate negated abs 5095 /// \returns The expansion result or SDValue() if it fails. 5096 SDValue expandABS(SDNode *N, SelectionDAG &DAG, 5097 bool IsNegative = false) const; 5098 5099 /// Expand ABDS/ABDU nodes. Expands vector/scalar ABDS/ABDU nodes. 5100 /// \param N Node to expand 5101 /// \returns The expansion result or SDValue() if it fails. 5102 SDValue expandABD(SDNode *N, SelectionDAG &DAG) const; 5103 5104 /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64 5105 /// scalar types. Returns SDValue() if expand fails. 5106 /// \param N Node to expand 5107 /// \returns The expansion result or SDValue() if it fails. 5108 SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const; 5109 5110 /// Expand VP_BSWAP nodes. Expands VP_BSWAP nodes with 5111 /// i16/i32/i64 scalar types. Returns SDValue() if expand fails. \param N Node 5112 /// to expand \returns The expansion result or SDValue() if it fails. 5113 SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const; 5114 5115 /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes. 5116 /// Returns SDValue() if expand fails. 5117 /// \param N Node to expand 5118 /// \returns The expansion result or SDValue() if it fails. 5119 SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const; 5120 5121 /// Expand VP_BITREVERSE nodes. Expands VP_BITREVERSE nodes with 5122 /// i8/i16/i32/i64 scalar types. \param N Node to expand \returns The 5123 /// expansion result or SDValue() if it fails. 5124 SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const; 5125 5126 /// Turn load of vector type into a load of the individual elements. 5127 /// \param LD load to expand 5128 /// \returns BUILD_VECTOR and TokenFactor nodes. 5129 std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD, 5130 SelectionDAG &DAG) const; 5131 5132 // Turn a store of a vector type into stores of the individual elements. 5133 /// \param ST Store with a vector value type 5134 /// \returns TokenFactor of the individual store chains. 5135 SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const; 5136 5137 /// Expands an unaligned load to 2 half-size loads for an integer, and 5138 /// possibly more for vectors. 5139 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD, 5140 SelectionDAG &DAG) const; 5141 5142 /// Expands an unaligned store to 2 half-size stores for integer values, and 5143 /// possibly more for vectors. 5144 SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const; 5145 5146 /// Increments memory address \p Addr according to the type of the value 5147 /// \p DataVT that should be stored. If the data is stored in compressed 5148 /// form, the memory address should be incremented according to the number of 5149 /// the stored elements. This number is equal to the number of '1's bits 5150 /// in the \p Mask. 5151 /// \p DataVT is a vector type. \p Mask is a vector value. 5152 /// \p DataVT and \p Mask have the same number of vector elements. 5153 SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, 5154 EVT DataVT, SelectionDAG &DAG, 5155 bool IsCompressedMemory) const; 5156 5157 /// Get a pointer to vector element \p Idx located in memory for a vector of 5158 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of 5159 /// bounds the returned pointer is unspecified, but will be within the vector 5160 /// bounds. 5161 SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, 5162 SDValue Index) const; 5163 5164 /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located 5165 /// in memory for a vector of type \p VecVT starting at a base address of 5166 /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the 5167 /// returned pointer is unspecified, but the value returned will be such that 5168 /// the entire subvector would be within the vector bounds. 5169 SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, 5170 EVT SubVecVT, SDValue Index) const; 5171 5172 /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This 5173 /// method accepts integers as its arguments. 5174 SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const; 5175 5176 /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This 5177 /// method accepts integers as its arguments. 5178 SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const; 5179 5180 /// Method for building the DAG expansion of ISD::[US]SHLSAT. This 5181 /// method accepts integers as its arguments. 5182 SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const; 5183 5184 /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This 5185 /// method accepts integers as its arguments. 5186 SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const; 5187 5188 /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This 5189 /// method accepts integers as its arguments. 5190 /// Note: This method may fail if the division could not be performed 5191 /// within the type. Clients must retry with a wider type if this happens. 5192 SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 5193 SDValue LHS, SDValue RHS, 5194 unsigned Scale, SelectionDAG &DAG) const; 5195 5196 /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion 5197 /// always suceeds and populates the Result and Overflow arguments. 5198 void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, 5199 SelectionDAG &DAG) const; 5200 5201 /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion 5202 /// always suceeds and populates the Result and Overflow arguments. 5203 void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, 5204 SelectionDAG &DAG) const; 5205 5206 /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether 5207 /// expansion was successful and populates the Result and Overflow arguments. 5208 bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, 5209 SelectionDAG &DAG) const; 5210 5211 /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified, 5212 /// only the first Count elements of the vector are used. 5213 SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const; 5214 5215 /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation. 5216 SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const; 5217 5218 /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal. 5219 /// Returns true if the expansion was successful. 5220 bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const; 5221 5222 /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This 5223 /// method accepts vectors as its arguments. 5224 SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const; 5225 5226 /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC 5227 /// on the current target. A VP_SETCC will additionally be given a Mask 5228 /// and/or EVL not equal to SDValue(). 5229 /// 5230 /// If the SETCC has been legalized using AND / OR, then the legalized node 5231 /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert 5232 /// will be set to false. This will also hold if the VP_SETCC has been 5233 /// legalized using VP_AND / VP_OR. 5234 /// 5235 /// If the SETCC / VP_SETCC has been legalized by using 5236 /// getSetCCSwappedOperands(), then the values of LHS and RHS will be 5237 /// swapped, CC will be set to the new condition, and NeedInvert will be set 5238 /// to false. 5239 /// 5240 /// If the SETCC / VP_SETCC has been legalized using the inverse condcode, 5241 /// then LHS and RHS will be unchanged, CC will set to the inverted condcode, 5242 /// and NeedInvert will be set to true. The caller must invert the result of 5243 /// the SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to 5244 /// swap the effect of a true/false result. 5245 /// 5246 /// \returns true if the SETCC / VP_SETCC has been legalized, false if it 5247 /// hasn't. 5248 bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, 5249 SDValue &RHS, SDValue &CC, SDValue Mask, 5250 SDValue EVL, bool &NeedInvert, const SDLoc &dl, 5251 SDValue &Chain, bool IsSignaling = false) const; 5252 5253 //===--------------------------------------------------------------------===// 5254 // Instruction Emitting Hooks 5255 // 5256 5257 /// This method should be implemented by targets that mark instructions with 5258 /// the 'usesCustomInserter' flag. These instructions are special in various 5259 /// ways, which require special support to insert. The specified MachineInstr 5260 /// is created but not inserted into any basic blocks, and this method is 5261 /// called to expand it into a sequence of instructions, potentially also 5262 /// creating new basic blocks and control flow. 5263 /// As long as the returned basic block is different (i.e., we created a new 5264 /// one), the custom inserter is free to modify the rest of \p MBB. 5265 virtual MachineBasicBlock * 5266 EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const; 5267 5268 /// This method should be implemented by targets that mark instructions with 5269 /// the 'hasPostISelHook' flag. These instructions must be adjusted after 5270 /// instruction selection by target hooks. e.g. To fill in optional defs for 5271 /// ARM 's' setting instructions. 5272 virtual void AdjustInstrPostInstrSelection(MachineInstr &MI, 5273 SDNode *Node) const; 5274 5275 /// If this function returns true, SelectionDAGBuilder emits a 5276 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector. 5277 virtual bool useLoadStackGuardNode() const { 5278 return false; 5279 } 5280 5281 virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, 5282 const SDLoc &DL) const { 5283 llvm_unreachable("not implemented for this target"); 5284 } 5285 5286 /// Lower TLS global address SDNode for target independent emulated TLS model. 5287 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 5288 SelectionDAG &DAG) const; 5289 5290 /// Expands target specific indirect branch for the case of JumpTable 5291 /// expanasion. 5292 virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr, 5293 SelectionDAG &DAG) const { 5294 return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr); 5295 } 5296 5297 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits))) 5298 // If we're comparing for equality to zero and isCtlzFast is true, expose the 5299 // fact that this can be implemented as a ctlz/srl pair, so that the dag 5300 // combiner can fold the new nodes. 5301 SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const; 5302 5303 private: 5304 SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 5305 const SDLoc &DL, DAGCombinerInfo &DCI) const; 5306 SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 5307 const SDLoc &DL, DAGCombinerInfo &DCI) const; 5308 5309 SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0, 5310 SDValue N1, ISD::CondCode Cond, 5311 DAGCombinerInfo &DCI, 5312 const SDLoc &DL) const; 5313 5314 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 5315 SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift( 5316 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 5317 DAGCombinerInfo &DCI, const SDLoc &DL) const; 5318 5319 SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 5320 SDValue CompTargetNode, ISD::CondCode Cond, 5321 DAGCombinerInfo &DCI, const SDLoc &DL, 5322 SmallVectorImpl<SDNode *> &Created) const; 5323 SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, 5324 ISD::CondCode Cond, DAGCombinerInfo &DCI, 5325 const SDLoc &DL) const; 5326 5327 SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 5328 SDValue CompTargetNode, ISD::CondCode Cond, 5329 DAGCombinerInfo &DCI, const SDLoc &DL, 5330 SmallVectorImpl<SDNode *> &Created) const; 5331 SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, 5332 ISD::CondCode Cond, DAGCombinerInfo &DCI, 5333 const SDLoc &DL) const; 5334 }; 5335 5336 /// Given an LLVM IR type and return type attributes, compute the return value 5337 /// EVTs and flags, and optionally also the offsets, if the return value is 5338 /// being lowered to memory. 5339 void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, 5340 SmallVectorImpl<ISD::OutputArg> &Outs, 5341 const TargetLowering &TLI, const DataLayout &DL); 5342 5343 } // end namespace llvm 5344 5345 #endif // LLVM_CODEGEN_TARGETLOWERING_H 5346