1 //===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// 9 /// \file 10 /// This file describes how to lower LLVM code to machine code. This has two 11 /// main components: 12 /// 13 /// 1. Which ValueTypes are natively supported by the target. 14 /// 2. Which operations are supported for supported ValueTypes. 15 /// 3. Cost thresholds for alternative implementations of certain operations. 16 /// 17 /// In addition it has a few other components, like information about FP 18 /// immediates. 19 /// 20 //===----------------------------------------------------------------------===// 21 22 #ifndef LLVM_CODEGEN_TARGETLOWERING_H 23 #define LLVM_CODEGEN_TARGETLOWERING_H 24 25 #include "llvm/ADT/APInt.h" 26 #include "llvm/ADT/ArrayRef.h" 27 #include "llvm/ADT/DenseMap.h" 28 #include "llvm/ADT/STLArrayExtras.h" 29 #include "llvm/ADT/SmallVector.h" 30 #include "llvm/ADT/StringRef.h" 31 #include "llvm/CodeGen/DAGCombine.h" 32 #include "llvm/CodeGen/ISDOpcodes.h" 33 #include "llvm/CodeGen/LowLevelType.h" 34 #include "llvm/CodeGen/RuntimeLibcalls.h" 35 #include "llvm/CodeGen/SelectionDAG.h" 36 #include "llvm/CodeGen/SelectionDAGNodes.h" 37 #include "llvm/CodeGen/TargetCallingConv.h" 38 #include "llvm/CodeGen/ValueTypes.h" 39 #include "llvm/IR/Attributes.h" 40 #include "llvm/IR/CallingConv.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Function.h" 44 #include "llvm/IR/InlineAsm.h" 45 #include "llvm/IR/Instruction.h" 46 #include "llvm/IR/Instructions.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/Support/Alignment.h" 49 #include "llvm/Support/AtomicOrdering.h" 50 #include "llvm/Support/Casting.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/InstructionCost.h" 53 #include "llvm/Support/MachineValueType.h" 54 #include <algorithm> 55 #include <cassert> 56 #include <climits> 57 #include <cstdint> 58 #include <iterator> 59 #include <map> 60 #include <string> 61 #include <utility> 62 #include <vector> 63 64 namespace llvm { 65 66 class CCState; 67 class CCValAssign; 68 class Constant; 69 class FastISel; 70 class FunctionLoweringInfo; 71 class GlobalValue; 72 class GISelKnownBits; 73 class IntrinsicInst; 74 class IRBuilderBase; 75 struct KnownBits; 76 class LegacyDivergenceAnalysis; 77 class LLVMContext; 78 class MachineBasicBlock; 79 class MachineFunction; 80 class MachineInstr; 81 class MachineJumpTableInfo; 82 class MachineLoop; 83 class MachineRegisterInfo; 84 class MCContext; 85 class MCExpr; 86 class Module; 87 class ProfileSummaryInfo; 88 class TargetLibraryInfo; 89 class TargetMachine; 90 class TargetRegisterClass; 91 class TargetRegisterInfo; 92 class TargetTransformInfo; 93 class Value; 94 95 namespace Sched { 96 97 enum Preference { 98 None, // No preference 99 Source, // Follow source order. 100 RegPressure, // Scheduling for lowest register pressure. 101 Hybrid, // Scheduling for both latency and register pressure. 102 ILP, // Scheduling for ILP in low register pressure mode. 103 VLIW, // Scheduling for VLIW targets. 104 Fast, // Fast suboptimal list scheduling 105 Linearize // Linearize DAG, no scheduling 106 }; 107 108 } // end namespace Sched 109 110 // MemOp models a memory operation, either memset or memcpy/memmove. 111 struct MemOp { 112 private: 113 // Shared 114 uint64_t Size; 115 bool DstAlignCanChange; // true if destination alignment can satisfy any 116 // constraint. 117 Align DstAlign; // Specified alignment of the memory operation. 118 119 bool AllowOverlap; 120 // memset only 121 bool IsMemset; // If setthis memory operation is a memset. 122 bool ZeroMemset; // If set clears out memory with zeros. 123 // memcpy only 124 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register 125 // constant so it does not need to be loaded. 126 Align SrcAlign; // Inferred alignment of the source or default value if the 127 // memory operation does not need to load the value. 128 public: 129 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, 130 Align SrcAlign, bool IsVolatile, 131 bool MemcpyStrSrc = false) { 132 MemOp Op; 133 Op.Size = Size; 134 Op.DstAlignCanChange = DstAlignCanChange; 135 Op.DstAlign = DstAlign; 136 Op.AllowOverlap = !IsVolatile; 137 Op.IsMemset = false; 138 Op.ZeroMemset = false; 139 Op.MemcpyStrSrc = MemcpyStrSrc; 140 Op.SrcAlign = SrcAlign; 141 return Op; 142 } 143 144 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, 145 bool IsZeroMemset, bool IsVolatile) { 146 MemOp Op; 147 Op.Size = Size; 148 Op.DstAlignCanChange = DstAlignCanChange; 149 Op.DstAlign = DstAlign; 150 Op.AllowOverlap = !IsVolatile; 151 Op.IsMemset = true; 152 Op.ZeroMemset = IsZeroMemset; 153 Op.MemcpyStrSrc = false; 154 return Op; 155 } 156 157 uint64_t size() const { return Size; } 158 Align getDstAlign() const { 159 assert(!DstAlignCanChange); 160 return DstAlign; 161 } 162 bool isFixedDstAlign() const { return !DstAlignCanChange; } 163 bool allowOverlap() const { return AllowOverlap; } 164 bool isMemset() const { return IsMemset; } 165 bool isMemcpy() const { return !IsMemset; } 166 bool isMemcpyWithFixedDstAlign() const { 167 return isMemcpy() && !DstAlignCanChange; 168 } 169 bool isZeroMemset() const { return isMemset() && ZeroMemset; } 170 bool isMemcpyStrSrc() const { 171 assert(isMemcpy() && "Must be a memcpy"); 172 return MemcpyStrSrc; 173 } 174 Align getSrcAlign() const { 175 assert(isMemcpy() && "Must be a memcpy"); 176 return SrcAlign; 177 } 178 bool isSrcAligned(Align AlignCheck) const { 179 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value()); 180 } 181 bool isDstAligned(Align AlignCheck) const { 182 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value()); 183 } 184 bool isAligned(Align AlignCheck) const { 185 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck); 186 } 187 }; 188 189 /// This base class for TargetLowering contains the SelectionDAG-independent 190 /// parts that can be used from the rest of CodeGen. 191 class TargetLoweringBase { 192 public: 193 /// This enum indicates whether operations are valid for a target, and if not, 194 /// what action should be used to make them valid. 195 enum LegalizeAction : uint8_t { 196 Legal, // The target natively supports this operation. 197 Promote, // This operation should be executed in a larger type. 198 Expand, // Try to expand this to other ops, otherwise use a libcall. 199 LibCall, // Don't try to expand this to other ops, always use a libcall. 200 Custom // Use the LowerOperation hook to implement custom lowering. 201 }; 202 203 /// This enum indicates whether a types are legal for a target, and if not, 204 /// what action should be used to make them valid. 205 enum LegalizeTypeAction : uint8_t { 206 TypeLegal, // The target natively supports this type. 207 TypePromoteInteger, // Replace this integer with a larger one. 208 TypeExpandInteger, // Split this integer into two of half the size. 209 TypeSoftenFloat, // Convert this float to a same size integer type. 210 TypeExpandFloat, // Split this float into two of half the size. 211 TypeScalarizeVector, // Replace this one-element vector with its element. 212 TypeSplitVector, // Split this vector into two of half the size. 213 TypeWidenVector, // This vector should be widened into a larger vector. 214 TypePromoteFloat, // Replace this float with a larger one. 215 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic. 216 TypeScalarizeScalableVector, // This action is explicitly left unimplemented. 217 // While it is theoretically possible to 218 // legalize operations on scalable types with a 219 // loop that handles the vscale * #lanes of the 220 // vector, this is non-trivial at SelectionDAG 221 // level and these types are better to be 222 // widened or promoted. 223 }; 224 225 /// LegalizeKind holds the legalization kind that needs to happen to EVT 226 /// in order to type-legalize it. 227 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>; 228 229 /// Enum that describes how the target represents true/false values. 230 enum BooleanContent { 231 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. 232 ZeroOrOneBooleanContent, // All bits zero except for bit 0. 233 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. 234 }; 235 236 /// Enum that describes what type of support for selects the target has. 237 enum SelectSupportKind { 238 ScalarValSelect, // The target supports scalar selects (ex: cmov). 239 ScalarCondVectorVal, // The target supports selects with a scalar condition 240 // and vector values (ex: cmov). 241 VectorMaskSelect // The target supports vector selects with a vector 242 // mask (ex: x86 blends). 243 }; 244 245 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded 246 /// to, if at all. Exists because different targets have different levels of 247 /// support for these atomic instructions, and also have different options 248 /// w.r.t. what they should expand to. 249 enum class AtomicExpansionKind { 250 None, // Don't expand the instruction. 251 CastToInteger, // Cast the atomic instruction to another type, e.g. from 252 // floating-point to integer type. 253 LLSC, // Expand the instruction into loadlinked/storeconditional; used 254 // by ARM/AArch64. 255 LLOnly, // Expand the (load) instruction into just a load-linked, which has 256 // greater atomic guarantees than a normal load. 257 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86. 258 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop. 259 BitTestIntrinsic, // Use a target-specific intrinsic for special bit 260 // operations; used by X86. 261 Expand, // Generic expansion in terms of other atomic operations. 262 263 // Rewrite to a non-atomic form for use in a known non-preemptible 264 // environment. 265 NotAtomic 266 }; 267 268 /// Enum that specifies when a multiplication should be expanded. 269 enum class MulExpansionKind { 270 Always, // Always expand the instruction. 271 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal 272 // or custom. 273 }; 274 275 /// Enum that specifies when a float negation is beneficial. 276 enum class NegatibleCost { 277 Cheaper = 0, // Negated expression is cheaper. 278 Neutral = 1, // Negated expression has the same cost. 279 Expensive = 2 // Negated expression is more expensive. 280 }; 281 282 class ArgListEntry { 283 public: 284 Value *Val = nullptr; 285 SDValue Node = SDValue(); 286 Type *Ty = nullptr; 287 bool IsSExt : 1; 288 bool IsZExt : 1; 289 bool IsInReg : 1; 290 bool IsSRet : 1; 291 bool IsNest : 1; 292 bool IsByVal : 1; 293 bool IsByRef : 1; 294 bool IsInAlloca : 1; 295 bool IsPreallocated : 1; 296 bool IsReturned : 1; 297 bool IsSwiftSelf : 1; 298 bool IsSwiftAsync : 1; 299 bool IsSwiftError : 1; 300 bool IsCFGuardTarget : 1; 301 MaybeAlign Alignment = None; 302 Type *IndirectType = nullptr; 303 304 ArgListEntry() 305 : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false), 306 IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false), 307 IsPreallocated(false), IsReturned(false), IsSwiftSelf(false), 308 IsSwiftAsync(false), IsSwiftError(false), IsCFGuardTarget(false) {} 309 310 void setAttributes(const CallBase *Call, unsigned ArgIdx); 311 }; 312 using ArgListTy = std::vector<ArgListEntry>; 313 314 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, 315 ArgListTy &Args) const {}; 316 317 static ISD::NodeType getExtendForContent(BooleanContent Content) { 318 switch (Content) { 319 case UndefinedBooleanContent: 320 // Extend by adding rubbish bits. 321 return ISD::ANY_EXTEND; 322 case ZeroOrOneBooleanContent: 323 // Extend by adding zero bits. 324 return ISD::ZERO_EXTEND; 325 case ZeroOrNegativeOneBooleanContent: 326 // Extend by copying the sign bit. 327 return ISD::SIGN_EXTEND; 328 } 329 llvm_unreachable("Invalid content kind"); 330 } 331 332 explicit TargetLoweringBase(const TargetMachine &TM); 333 TargetLoweringBase(const TargetLoweringBase &) = delete; 334 TargetLoweringBase &operator=(const TargetLoweringBase &) = delete; 335 virtual ~TargetLoweringBase() = default; 336 337 /// Return true if the target support strict float operation 338 bool isStrictFPEnabled() const { 339 return IsStrictFPEnabled; 340 } 341 342 protected: 343 /// Initialize all of the actions to default values. 344 void initActions(); 345 346 public: 347 const TargetMachine &getTargetMachine() const { return TM; } 348 349 virtual bool useSoftFloat() const { return false; } 350 351 /// Return the pointer type for the given address space, defaults to 352 /// the pointer type from the data layout. 353 /// FIXME: The default needs to be removed once all the code is updated. 354 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const { 355 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 356 } 357 358 /// Return the in-memory pointer type for the given address space, defaults to 359 /// the pointer type from the data layout. FIXME: The default needs to be 360 /// removed once all the code is updated. 361 virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const { 362 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); 363 } 364 365 /// Return the type for frame index, which is determined by 366 /// the alloca address space specified through the data layout. 367 MVT getFrameIndexTy(const DataLayout &DL) const { 368 return getPointerTy(DL, DL.getAllocaAddrSpace()); 369 } 370 371 /// Return the type for code pointers, which is determined by the program 372 /// address space specified through the data layout. 373 MVT getProgramPointerTy(const DataLayout &DL) const { 374 return getPointerTy(DL, DL.getProgramAddressSpace()); 375 } 376 377 /// Return the type for operands of fence. 378 /// TODO: Let fence operands be of i32 type and remove this. 379 virtual MVT getFenceOperandTy(const DataLayout &DL) const { 380 return getPointerTy(DL); 381 } 382 383 /// Return the type to use for a scalar shift opcode, given the shifted amount 384 /// type. Targets should return a legal type if the input type is legal. 385 /// Targets can return a type that is too small if the input type is illegal. 386 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const; 387 388 /// Returns the type for the shift amount of a shift opcode. For vectors, 389 /// returns the input type. For scalars, behavior depends on \p LegalTypes. If 390 /// \p LegalTypes is true, calls getScalarShiftAmountTy, otherwise uses 391 /// pointer type. If getScalarShiftAmountTy or pointer type cannot represent 392 /// all possible shift amounts, returns MVT::i32. In general, \p LegalTypes 393 /// should be set to true for calls during type legalization and after type 394 /// legalization has been completed. 395 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, 396 bool LegalTypes = true) const; 397 398 /// Return the preferred type to use for a shift opcode, given the shifted 399 /// amount type is \p ShiftValueTy. 400 LLVM_READONLY 401 virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const { 402 return ShiftValueTy; 403 } 404 405 /// Returns the type to be used for the index operand of: 406 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT, 407 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR 408 virtual MVT getVectorIdxTy(const DataLayout &DL) const { 409 return getPointerTy(DL); 410 } 411 412 /// Returns the type to be used for the EVL/AVL operand of VP nodes: 413 /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type, 414 /// and must be at least as large as i32. The EVL is implicitly zero-extended 415 /// to any larger type. 416 virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; } 417 418 /// This callback is used to inspect load/store instructions and add 419 /// target-specific MachineMemOperand flags to them. The default 420 /// implementation does nothing. 421 virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const { 422 return MachineMemOperand::MONone; 423 } 424 425 MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, 426 const DataLayout &DL) const; 427 MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, 428 const DataLayout &DL) const; 429 MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, 430 const DataLayout &DL) const; 431 432 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const { 433 return true; 434 } 435 436 /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded 437 /// using generic code in SelectionDAGBuilder. 438 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const { 439 return true; 440 } 441 442 /// Return true if it is profitable to convert a select of FP constants into 443 /// a constant pool load whose address depends on the select condition. The 444 /// parameter may be used to differentiate a select with FP compare from 445 /// integer compare. 446 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const { 447 return true; 448 } 449 450 /// Return true if multiple condition registers are available. 451 bool hasMultipleConditionRegisters() const { 452 return HasMultipleConditionRegisters; 453 } 454 455 /// Return true if the target has BitExtract instructions. 456 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; } 457 458 /// Return the preferred vector type legalization action. 459 virtual TargetLoweringBase::LegalizeTypeAction 460 getPreferredVectorAction(MVT VT) const { 461 // The default action for one element vectors is to scalarize 462 if (VT.getVectorElementCount().isScalar()) 463 return TypeScalarizeVector; 464 // The default action for an odd-width vector is to widen. 465 if (!VT.isPow2VectorType()) 466 return TypeWidenVector; 467 // The default action for other vectors is to promote 468 return TypePromoteInteger; 469 } 470 471 // Return true if the half type should be passed around as i16, but promoted 472 // to float around arithmetic. The default behavior is to pass around as 473 // float and convert around loads/stores/bitcasts and other places where 474 // the size matters. 475 virtual bool softPromoteHalfType() const { return false; } 476 477 // There are two general methods for expanding a BUILD_VECTOR node: 478 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle 479 // them together. 480 // 2. Build the vector on the stack and then load it. 481 // If this function returns true, then method (1) will be used, subject to 482 // the constraint that all of the necessary shuffles are legal (as determined 483 // by isShuffleMaskLegal). If this function returns false, then method (2) is 484 // always used. The vector type, and the number of defined values, are 485 // provided. 486 virtual bool 487 shouldExpandBuildVectorWithShuffles(EVT /* VT */, 488 unsigned DefinedValues) const { 489 return DefinedValues < 3; 490 } 491 492 /// Return true if integer divide is usually cheaper than a sequence of 493 /// several shifts, adds, and multiplies for this target. 494 /// The definition of "cheaper" may depend on whether we're optimizing 495 /// for speed or for size. 496 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; } 497 498 /// Return true if the target can handle a standalone remainder operation. 499 virtual bool hasStandaloneRem(EVT VT) const { 500 return true; 501 } 502 503 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X). 504 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const { 505 // Default behavior is to replace SQRT(X) with X*RSQRT(X). 506 return false; 507 } 508 509 /// Reciprocal estimate status values used by the functions below. 510 enum ReciprocalEstimate : int { 511 Unspecified = -1, 512 Disabled = 0, 513 Enabled = 1 514 }; 515 516 /// Return a ReciprocalEstimate enum value for a square root of the given type 517 /// based on the function's attributes. If the operation is not overridden by 518 /// the function's attributes, "Unspecified" is returned and target defaults 519 /// are expected to be used for instruction selection. 520 int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const; 521 522 /// Return a ReciprocalEstimate enum value for a division of the given type 523 /// based on the function's attributes. If the operation is not overridden by 524 /// the function's attributes, "Unspecified" is returned and target defaults 525 /// are expected to be used for instruction selection. 526 int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const; 527 528 /// Return the refinement step count for a square root of the given type based 529 /// on the function's attributes. If the operation is not overridden by 530 /// the function's attributes, "Unspecified" is returned and target defaults 531 /// are expected to be used for instruction selection. 532 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const; 533 534 /// Return the refinement step count for a division of the given type based 535 /// on the function's attributes. If the operation is not overridden by 536 /// the function's attributes, "Unspecified" is returned and target defaults 537 /// are expected to be used for instruction selection. 538 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const; 539 540 /// Returns true if target has indicated at least one type should be bypassed. 541 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } 542 543 /// Returns map of slow types for division or remainder with corresponding 544 /// fast types 545 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const { 546 return BypassSlowDivWidths; 547 } 548 549 /// Return true only if vscale must be a power of two. 550 virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; } 551 552 /// Return true if Flow Control is an expensive operation that should be 553 /// avoided. 554 bool isJumpExpensive() const { return JumpIsExpensive; } 555 556 /// Return true if selects are only cheaper than branches if the branch is 557 /// unlikely to be predicted right. 558 bool isPredictableSelectExpensive() const { 559 return PredictableSelectIsExpensive; 560 } 561 562 virtual bool fallBackToDAGISel(const Instruction &Inst) const { 563 return false; 564 } 565 566 /// Return true if the following transform is beneficial: 567 /// fold (conv (load x)) -> (load (conv*)x) 568 /// On architectures that don't natively support some vector loads 569 /// efficiently, casting the load to a smaller vector of larger types and 570 /// loading is more efficient, however, this can be undone by optimizations in 571 /// dag combiner. 572 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, 573 const SelectionDAG &DAG, 574 const MachineMemOperand &MMO) const { 575 // Don't do if we could do an indexed load on the original type, but not on 576 // the new one. 577 if (!LoadVT.isSimple() || !BitcastVT.isSimple()) 578 return true; 579 580 MVT LoadMVT = LoadVT.getSimpleVT(); 581 582 // Don't bother doing this if it's just going to be promoted again later, as 583 // doing so might interfere with other combines. 584 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote && 585 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT()) 586 return false; 587 588 bool Fast = false; 589 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT, 590 MMO, &Fast) && Fast; 591 } 592 593 /// Return true if the following transform is beneficial: 594 /// (store (y (conv x)), y*)) -> (store x, (x*)) 595 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, 596 const SelectionDAG &DAG, 597 const MachineMemOperand &MMO) const { 598 // Default to the same logic as loads. 599 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO); 600 } 601 602 /// Return true if it is expected to be cheaper to do a store of a non-zero 603 /// vector constant with the given size and type for the address space than to 604 /// store the individual scalar element constants. 605 virtual bool storeOfVectorConstantIsCheap(EVT MemVT, 606 unsigned NumElem, 607 unsigned AddrSpace) const { 608 return false; 609 } 610 611 /// Allow store merging for the specified type after legalization in addition 612 /// to before legalization. This may transform stores that do not exist 613 /// earlier (for example, stores created from intrinsics). 614 virtual bool mergeStoresAfterLegalization(EVT MemVT) const { 615 return true; 616 } 617 618 /// Returns if it's reasonable to merge stores to MemVT size. 619 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, 620 const MachineFunction &MF) const { 621 return true; 622 } 623 624 /// Return true if it is cheap to speculate a call to intrinsic cttz. 625 virtual bool isCheapToSpeculateCttz() const { 626 return false; 627 } 628 629 /// Return true if it is cheap to speculate a call to intrinsic ctlz. 630 virtual bool isCheapToSpeculateCtlz() const { 631 return false; 632 } 633 634 /// Return true if ctlz instruction is fast. 635 virtual bool isCtlzFast() const { 636 return false; 637 } 638 639 /// Return the maximum number of "x & (x - 1)" operations that can be done 640 /// instead of deferring to a custom CTPOP. 641 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const { 642 return 1; 643 } 644 645 /// Return true if instruction generated for equality comparison is folded 646 /// with instruction generated for signed comparison. 647 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; } 648 649 /// Return true if the heuristic to prefer icmp eq zero should be used in code 650 /// gen prepare. 651 virtual bool preferZeroCompareBranch() const { return false; } 652 653 /// Return true if it is safe to transform an integer-domain bitwise operation 654 /// into the equivalent floating-point operation. This should be set to true 655 /// if the target has IEEE-754-compliant fabs/fneg operations for the input 656 /// type. 657 virtual bool hasBitPreservingFPLogic(EVT VT) const { 658 return false; 659 } 660 661 /// Return true if it is cheaper to split the store of a merged int val 662 /// from a pair of smaller values into multiple stores. 663 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const { 664 return false; 665 } 666 667 /// Return if the target supports combining a 668 /// chain like: 669 /// \code 670 /// %andResult = and %val1, #mask 671 /// %icmpResult = icmp %andResult, 0 672 /// \endcode 673 /// into a single machine instruction of a form like: 674 /// \code 675 /// cc = test %register, #mask 676 /// \endcode 677 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 678 return false; 679 } 680 681 /// Use bitwise logic to make pairs of compares more efficient. For example: 682 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0 683 /// This should be true when it takes more than one instruction to lower 684 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on 685 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win. 686 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const { 687 return false; 688 } 689 690 /// Return the preferred operand type if the target has a quick way to compare 691 /// integer values of the given size. Assume that any legal integer type can 692 /// be compared efficiently. Targets may override this to allow illegal wide 693 /// types to return a vector type if there is support to compare that type. 694 virtual MVT hasFastEqualityCompare(unsigned NumBits) const { 695 MVT VT = MVT::getIntegerVT(NumBits); 696 return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE; 697 } 698 699 /// Return true if the target should transform: 700 /// (X & Y) == Y ---> (~X & Y) == 0 701 /// (X & Y) != Y ---> (~X & Y) != 0 702 /// 703 /// This may be profitable if the target has a bitwise and-not operation that 704 /// sets comparison flags. A target may want to limit the transformation based 705 /// on the type of Y or if Y is a constant. 706 /// 707 /// Note that the transform will not occur if Y is known to be a power-of-2 708 /// because a mask and compare of a single bit can be handled by inverting the 709 /// predicate, for example: 710 /// (X & 8) == 8 ---> (X & 8) != 0 711 virtual bool hasAndNotCompare(SDValue Y) const { 712 return false; 713 } 714 715 /// Return true if the target has a bitwise and-not operation: 716 /// X = ~A & B 717 /// This can be used to simplify select or other instructions. 718 virtual bool hasAndNot(SDValue X) const { 719 // If the target has the more complex version of this operation, assume that 720 // it has this operation too. 721 return hasAndNotCompare(X); 722 } 723 724 /// Return true if the target has a bit-test instruction: 725 /// (X & (1 << Y)) ==/!= 0 726 /// This knowledge can be used to prevent breaking the pattern, 727 /// or creating it if it could be recognized. 728 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; } 729 730 /// There are two ways to clear extreme bits (either low or high): 731 /// Mask: x & (-1 << y) (the instcombine canonical form) 732 /// Shifts: x >> y << y 733 /// Return true if the variant with 2 variable shifts is preferred. 734 /// Return false if there is no preference. 735 virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const { 736 // By default, let's assume that no one prefers shifts. 737 return false; 738 } 739 740 /// Return true if it is profitable to fold a pair of shifts into a mask. 741 /// This is usually true on most targets. But some targets, like Thumb1, 742 /// have immediate shift instructions, but no immediate "and" instruction; 743 /// this makes the fold unprofitable. 744 virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, 745 CombineLevel Level) const { 746 return true; 747 } 748 749 /// Should we tranform the IR-optimal check for whether given truncation 750 /// down into KeptBits would be truncating or not: 751 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 752 /// Into it's more traditional form: 753 /// ((%x << C) a>> C) dstcond %x 754 /// Return true if we should transform. 755 /// Return false if there is no preference. 756 virtual bool shouldTransformSignedTruncationCheck(EVT XVT, 757 unsigned KeptBits) const { 758 // By default, let's assume that no one prefers shifts. 759 return false; 760 } 761 762 /// Given the pattern 763 /// (X & (C l>>/<< Y)) ==/!= 0 764 /// return true if it should be transformed into: 765 /// ((X <</l>> Y) & C) ==/!= 0 766 /// WARNING: if 'X' is a constant, the fold may deadlock! 767 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat() 768 /// here because it can end up being not linked in. 769 virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 770 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, 771 unsigned OldShiftOpcode, unsigned NewShiftOpcode, 772 SelectionDAG &DAG) const { 773 if (hasBitTest(X, Y)) { 774 // One interesting pattern that we'd want to form is 'bit test': 775 // ((1 << Y) & C) ==/!= 0 776 // But we also need to be careful not to try to reverse that fold. 777 778 // Is this '1 << Y' ? 779 if (OldShiftOpcode == ISD::SHL && CC->isOne()) 780 return false; // Keep the 'bit test' pattern. 781 782 // Will it be '1 << Y' after the transform ? 783 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne()) 784 return true; // Do form the 'bit test' pattern. 785 } 786 787 // If 'X' is a constant, and we transform, then we will immediately 788 // try to undo the fold, thus causing endless combine loop. 789 // So by default, let's assume everyone prefers the fold 790 // iff 'X' is not a constant. 791 return !XC; 792 } 793 794 /// These two forms are equivalent: 795 /// sub %y, (xor %x, -1) 796 /// add (add %x, 1), %y 797 /// The variant with two add's is IR-canonical. 798 /// Some targets may prefer one to the other. 799 virtual bool preferIncOfAddToSubOfNot(EVT VT) const { 800 // By default, let's assume that everyone prefers the form with two add's. 801 return true; 802 } 803 804 /// Return true if the target wants to use the optimization that 805 /// turns ext(promotableInst1(...(promotableInstN(load)))) into 806 /// promotedInst1(...(promotedInstN(ext(load)))). 807 bool enableExtLdPromotion() const { return EnableExtLdPromotion; } 808 809 /// Return true if the target can combine store(extractelement VectorTy, 810 /// Idx). 811 /// \p Cost[out] gives the cost of that transformation when this is true. 812 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, 813 unsigned &Cost) const { 814 return false; 815 } 816 817 /// Return true if inserting a scalar into a variable element of an undef 818 /// vector is more efficiently handled by splatting the scalar instead. 819 virtual bool shouldSplatInsEltVarIndex(EVT) const { 820 return false; 821 } 822 823 /// Return true if target always benefits from combining into FMA for a 824 /// given value type. This must typically return false on targets where FMA 825 /// takes more cycles to execute than FADD. 826 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; } 827 828 /// Return true if target always benefits from combining into FMA for a 829 /// given value type. This must typically return false on targets where FMA 830 /// takes more cycles to execute than FADD. 831 virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; } 832 833 /// Return the ValueType of the result of SETCC operations. 834 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, 835 EVT VT) const; 836 837 /// Return the ValueType for comparison libcalls. Comparions libcalls include 838 /// floating point comparion calls, and Ordered/Unordered check calls on 839 /// floating point numbers. 840 virtual 841 MVT::SimpleValueType getCmpLibcallReturnType() const; 842 843 /// For targets without i1 registers, this gives the nature of the high-bits 844 /// of boolean values held in types wider than i1. 845 /// 846 /// "Boolean values" are special true/false values produced by nodes like 847 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND. 848 /// Not to be confused with general values promoted from i1. Some cpus 849 /// distinguish between vectors of boolean and scalars; the isVec parameter 850 /// selects between the two kinds. For example on X86 a scalar boolean should 851 /// be zero extended from i1, while the elements of a vector of booleans 852 /// should be sign extended from i1. 853 /// 854 /// Some cpus also treat floating point types the same way as they treat 855 /// vectors instead of the way they treat scalars. 856 BooleanContent getBooleanContents(bool isVec, bool isFloat) const { 857 if (isVec) 858 return BooleanVectorContents; 859 return isFloat ? BooleanFloatContents : BooleanContents; 860 } 861 862 BooleanContent getBooleanContents(EVT Type) const { 863 return getBooleanContents(Type.isVector(), Type.isFloatingPoint()); 864 } 865 866 /// Promote the given target boolean to a target boolean of the given type. 867 /// A target boolean is an integer value, not necessarily of type i1, the bits 868 /// of which conform to getBooleanContents. 869 /// 870 /// ValVT is the type of values that produced the boolean. 871 SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool, 872 EVT ValVT) const { 873 SDLoc dl(Bool); 874 EVT BoolVT = 875 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT); 876 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(ValVT)); 877 return DAG.getNode(ExtendCode, dl, BoolVT, Bool); 878 } 879 880 /// Return target scheduling preference. 881 Sched::Preference getSchedulingPreference() const { 882 return SchedPreferenceInfo; 883 } 884 885 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics 886 /// for different nodes. This function returns the preference (or none) for 887 /// the given node. 888 virtual Sched::Preference getSchedulingPreference(SDNode *) const { 889 return Sched::None; 890 } 891 892 /// Return the register class that should be used for the specified value 893 /// type. 894 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const { 895 (void)isDivergent; 896 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; 897 assert(RC && "This value type is not natively supported!"); 898 return RC; 899 } 900 901 /// Allows target to decide about the register class of the 902 /// specific value that is live outside the defining block. 903 /// Returns true if the value needs uniform register class. 904 virtual bool requiresUniformRegister(MachineFunction &MF, 905 const Value *) const { 906 return false; 907 } 908 909 /// Return the 'representative' register class for the specified value 910 /// type. 911 /// 912 /// The 'representative' register class is the largest legal super-reg 913 /// register class for the register class of the value type. For example, on 914 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep 915 /// register class is GR64 on x86_64. 916 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const { 917 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy]; 918 return RC; 919 } 920 921 /// Return the cost of the 'representative' register class for the specified 922 /// value type. 923 virtual uint8_t getRepRegClassCostFor(MVT VT) const { 924 return RepRegClassCostForVT[VT.SimpleTy]; 925 } 926 927 /// Return true if SHIFT instructions should be expanded to SHIFT_PARTS 928 /// instructions, and false if a library call is preferred (e.g for code-size 929 /// reasons). 930 virtual bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { 931 return true; 932 } 933 934 /// Return true if the target has native support for the specified value type. 935 /// This means that it has a register that directly holds it without 936 /// promotions or expansions. 937 bool isTypeLegal(EVT VT) const { 938 assert(!VT.isSimple() || 939 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT)); 940 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr; 941 } 942 943 class ValueTypeActionImpl { 944 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum 945 /// that indicates how instruction selection should deal with the type. 946 LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE]; 947 948 public: 949 ValueTypeActionImpl() { 950 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 951 TypeLegal); 952 } 953 954 LegalizeTypeAction getTypeAction(MVT VT) const { 955 return ValueTypeActions[VT.SimpleTy]; 956 } 957 958 void setTypeAction(MVT VT, LegalizeTypeAction Action) { 959 ValueTypeActions[VT.SimpleTy] = Action; 960 } 961 }; 962 963 const ValueTypeActionImpl &getValueTypeActions() const { 964 return ValueTypeActions; 965 } 966 967 /// Return how we should legalize values of this type, either it is already 968 /// legal (return 'Legal') or we need to promote it to a larger type (return 969 /// 'Promote'), or we need to expand it into multiple registers of smaller 970 /// integer type (return 'Expand'). 'Custom' is not an option. 971 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const { 972 return getTypeConversion(Context, VT).first; 973 } 974 LegalizeTypeAction getTypeAction(MVT VT) const { 975 return ValueTypeActions.getTypeAction(VT); 976 } 977 978 /// For types supported by the target, this is an identity function. For 979 /// types that must be promoted to larger types, this returns the larger type 980 /// to promote to. For integer types that are larger than the largest integer 981 /// register, this contains one step in the expansion to get to the smaller 982 /// register. For illegal floating point types, this returns the integer type 983 /// to transform to. 984 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const { 985 return getTypeConversion(Context, VT).second; 986 } 987 988 /// For types supported by the target, this is an identity function. For 989 /// types that must be expanded (i.e. integer types that are larger than the 990 /// largest integer register or illegal floating point types), this returns 991 /// the largest legal type it will be expanded to. 992 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const { 993 assert(!VT.isVector()); 994 while (true) { 995 switch (getTypeAction(Context, VT)) { 996 case TypeLegal: 997 return VT; 998 case TypeExpandInteger: 999 VT = getTypeToTransformTo(Context, VT); 1000 break; 1001 default: 1002 llvm_unreachable("Type is not legal nor is it to be expanded!"); 1003 } 1004 } 1005 } 1006 1007 /// Vector types are broken down into some number of legal first class types. 1008 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8 1009 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64 1010 /// turns into 4 EVT::i32 values with both PPC and X86. 1011 /// 1012 /// This method returns the number of registers needed, and the VT for each 1013 /// register. It also returns the VT and quantity of the intermediate values 1014 /// before they are promoted/expanded. 1015 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 1016 EVT &IntermediateVT, 1017 unsigned &NumIntermediates, 1018 MVT &RegisterVT) const; 1019 1020 /// Certain targets such as MIPS require that some types such as vectors are 1021 /// always broken down into scalars in some contexts. This occurs even if the 1022 /// vector type is legal. 1023 virtual unsigned getVectorTypeBreakdownForCallingConv( 1024 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, 1025 unsigned &NumIntermediates, MVT &RegisterVT) const { 1026 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates, 1027 RegisterVT); 1028 } 1029 1030 struct IntrinsicInfo { 1031 unsigned opc = 0; // target opcode 1032 EVT memVT; // memory VT 1033 1034 // value representing memory location 1035 PointerUnion<const Value *, const PseudoSourceValue *> ptrVal; 1036 1037 int offset = 0; // offset off of ptrVal 1038 uint64_t size = 0; // the size of the memory location 1039 // (taken from memVT if zero) 1040 MaybeAlign align = Align(1); // alignment 1041 1042 MachineMemOperand::Flags flags = MachineMemOperand::MONone; 1043 IntrinsicInfo() = default; 1044 }; 1045 1046 /// Given an intrinsic, checks if on the target the intrinsic will need to map 1047 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns 1048 /// true and store the intrinsic information into the IntrinsicInfo that was 1049 /// passed to the function. 1050 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, 1051 MachineFunction &, 1052 unsigned /*Intrinsic*/) const { 1053 return false; 1054 } 1055 1056 /// Returns true if the target can instruction select the specified FP 1057 /// immediate natively. If false, the legalizer will materialize the FP 1058 /// immediate as a load from a constant pool. 1059 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/, 1060 bool ForCodeSize = false) const { 1061 return false; 1062 } 1063 1064 /// Targets can use this to indicate that they only support *some* 1065 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a 1066 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be 1067 /// legal. 1068 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const { 1069 return true; 1070 } 1071 1072 /// Returns true if the operation can trap for the value type. 1073 /// 1074 /// VT must be a legal type. By default, we optimistically assume most 1075 /// operations don't trap except for integer divide and remainder. 1076 virtual bool canOpTrap(unsigned Op, EVT VT) const; 1077 1078 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there 1079 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a 1080 /// constant pool entry. 1081 virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/, 1082 EVT /*VT*/) const { 1083 return false; 1084 } 1085 1086 /// How to legalize this custom operation? 1087 virtual LegalizeAction getCustomOperationAction(SDNode &Op) const { 1088 return Legal; 1089 } 1090 1091 /// Return how this operation should be treated: either it is legal, needs to 1092 /// be promoted to a larger size, needs to be expanded to some other code 1093 /// sequence, or the target has a custom expander for it. 1094 LegalizeAction getOperationAction(unsigned Op, EVT VT) const { 1095 if (VT.isExtended()) return Expand; 1096 // If a target-specific SDNode requires legalization, require the target 1097 // to provide custom legalization for it. 1098 if (Op >= array_lengthof(OpActions[0])) return Custom; 1099 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op]; 1100 } 1101 1102 /// Custom method defined by each target to indicate if an operation which 1103 /// may require a scale is supported natively by the target. 1104 /// If not, the operation is illegal. 1105 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT, 1106 unsigned Scale) const { 1107 return false; 1108 } 1109 1110 /// Some fixed point operations may be natively supported by the target but 1111 /// only for specific scales. This method allows for checking 1112 /// if the width is supported by the target for a given operation that may 1113 /// depend on scale. 1114 LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, 1115 unsigned Scale) const { 1116 auto Action = getOperationAction(Op, VT); 1117 if (Action != Legal) 1118 return Action; 1119 1120 // This operation is supported in this type but may only work on specific 1121 // scales. 1122 bool Supported; 1123 switch (Op) { 1124 default: 1125 llvm_unreachable("Unexpected fixed point operation."); 1126 case ISD::SMULFIX: 1127 case ISD::SMULFIXSAT: 1128 case ISD::UMULFIX: 1129 case ISD::UMULFIXSAT: 1130 case ISD::SDIVFIX: 1131 case ISD::SDIVFIXSAT: 1132 case ISD::UDIVFIX: 1133 case ISD::UDIVFIXSAT: 1134 Supported = isSupportedFixedPointOperation(Op, VT, Scale); 1135 break; 1136 } 1137 1138 return Supported ? Action : Expand; 1139 } 1140 1141 // If Op is a strict floating-point operation, return the result 1142 // of getOperationAction for the equivalent non-strict operation. 1143 LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const { 1144 unsigned EqOpc; 1145 switch (Op) { 1146 default: llvm_unreachable("Unexpected FP pseudo-opcode"); 1147 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 1148 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break; 1149 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \ 1150 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break; 1151 #include "llvm/IR/ConstrainedOps.def" 1152 } 1153 1154 return getOperationAction(EqOpc, VT); 1155 } 1156 1157 /// Return true if the specified operation is legal on this target or can be 1158 /// made legal with custom lowering. This is used to help guide high-level 1159 /// lowering decisions. LegalOnly is an optional convenience for code paths 1160 /// traversed pre and post legalisation. 1161 bool isOperationLegalOrCustom(unsigned Op, EVT VT, 1162 bool LegalOnly = false) const { 1163 if (LegalOnly) 1164 return isOperationLegal(Op, VT); 1165 1166 return (VT == MVT::Other || isTypeLegal(VT)) && 1167 (getOperationAction(Op, VT) == Legal || 1168 getOperationAction(Op, VT) == Custom); 1169 } 1170 1171 /// Return true if the specified operation is legal on this target or can be 1172 /// made legal using promotion. This is used to help guide high-level lowering 1173 /// decisions. LegalOnly is an optional convenience for code paths traversed 1174 /// pre and post legalisation. 1175 bool isOperationLegalOrPromote(unsigned Op, EVT VT, 1176 bool LegalOnly = false) const { 1177 if (LegalOnly) 1178 return isOperationLegal(Op, VT); 1179 1180 return (VT == MVT::Other || isTypeLegal(VT)) && 1181 (getOperationAction(Op, VT) == Legal || 1182 getOperationAction(Op, VT) == Promote); 1183 } 1184 1185 /// Return true if the specified operation is legal on this target or can be 1186 /// made legal with custom lowering or using promotion. This is used to help 1187 /// guide high-level lowering decisions. LegalOnly is an optional convenience 1188 /// for code paths traversed pre and post legalisation. 1189 bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, 1190 bool LegalOnly = false) const { 1191 if (LegalOnly) 1192 return isOperationLegal(Op, VT); 1193 1194 return (VT == MVT::Other || isTypeLegal(VT)) && 1195 (getOperationAction(Op, VT) == Legal || 1196 getOperationAction(Op, VT) == Custom || 1197 getOperationAction(Op, VT) == Promote); 1198 } 1199 1200 /// Return true if the operation uses custom lowering, regardless of whether 1201 /// the type is legal or not. 1202 bool isOperationCustom(unsigned Op, EVT VT) const { 1203 return getOperationAction(Op, VT) == Custom; 1204 } 1205 1206 /// Return true if lowering to a jump table is allowed. 1207 virtual bool areJTsAllowed(const Function *Fn) const { 1208 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool()) 1209 return false; 1210 1211 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 1212 isOperationLegalOrCustom(ISD::BRIND, MVT::Other); 1213 } 1214 1215 /// Check whether the range [Low,High] fits in a machine word. 1216 bool rangeFitsInWord(const APInt &Low, const APInt &High, 1217 const DataLayout &DL) const { 1218 // FIXME: Using the pointer type doesn't seem ideal. 1219 uint64_t BW = DL.getIndexSizeInBits(0u); 1220 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1; 1221 return Range <= BW; 1222 } 1223 1224 /// Return true if lowering to a jump table is suitable for a set of case 1225 /// clusters which may contain \p NumCases cases, \p Range range of values. 1226 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, 1227 uint64_t Range, ProfileSummaryInfo *PSI, 1228 BlockFrequencyInfo *BFI) const; 1229 1230 /// Returns preferred type for switch condition. 1231 virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, 1232 EVT ConditionVT) const; 1233 1234 /// Return true if lowering to a bit test is suitable for a set of case 1235 /// clusters which contains \p NumDests unique destinations, \p Low and 1236 /// \p High as its lowest and highest case values, and expects \p NumCmps 1237 /// case value comparisons. Check if the number of destinations, comparison 1238 /// metric, and range are all suitable. 1239 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, 1240 const APInt &Low, const APInt &High, 1241 const DataLayout &DL) const { 1242 // FIXME: I don't think NumCmps is the correct metric: a single case and a 1243 // range of cases both require only one branch to lower. Just looking at the 1244 // number of clusters and destinations should be enough to decide whether to 1245 // build bit tests. 1246 1247 // To lower a range with bit tests, the range must fit the bitwidth of a 1248 // machine word. 1249 if (!rangeFitsInWord(Low, High, DL)) 1250 return false; 1251 1252 // Decide whether it's profitable to lower this range with bit tests. Each 1253 // destination requires a bit test and branch, and there is an overall range 1254 // check branch. For a small number of clusters, separate comparisons might 1255 // be cheaper, and for many destinations, splitting the range might be 1256 // better. 1257 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) || 1258 (NumDests == 3 && NumCmps >= 6); 1259 } 1260 1261 /// Return true if the specified operation is illegal on this target or 1262 /// unlikely to be made legal with custom lowering. This is used to help guide 1263 /// high-level lowering decisions. 1264 bool isOperationExpand(unsigned Op, EVT VT) const { 1265 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); 1266 } 1267 1268 /// Return true if the specified operation is legal on this target. 1269 bool isOperationLegal(unsigned Op, EVT VT) const { 1270 return (VT == MVT::Other || isTypeLegal(VT)) && 1271 getOperationAction(Op, VT) == Legal; 1272 } 1273 1274 /// Return how this load with extension should be treated: either it is legal, 1275 /// needs to be promoted to a larger size, needs to be expanded to some other 1276 /// code sequence, or the target has a custom expander for it. 1277 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, 1278 EVT MemVT) const { 1279 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; 1280 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; 1281 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; 1282 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE && 1283 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!"); 1284 unsigned Shift = 4 * ExtType; 1285 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf); 1286 } 1287 1288 /// Return true if the specified load with extension is legal on this target. 1289 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const { 1290 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal; 1291 } 1292 1293 /// Return true if the specified load with extension is legal or custom 1294 /// on this target. 1295 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const { 1296 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal || 1297 getLoadExtAction(ExtType, ValVT, MemVT) == Custom; 1298 } 1299 1300 /// Return how this store with truncation should be treated: either it is 1301 /// legal, needs to be promoted to a larger size, needs to be expanded to some 1302 /// other code sequence, or the target has a custom expander for it. 1303 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const { 1304 if (ValVT.isExtended() || MemVT.isExtended()) return Expand; 1305 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; 1306 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; 1307 assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE && 1308 "Table isn't big enough!"); 1309 return TruncStoreActions[ValI][MemI]; 1310 } 1311 1312 /// Return true if the specified store with truncation is legal on this 1313 /// target. 1314 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const { 1315 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal; 1316 } 1317 1318 /// Return true if the specified store with truncation has solution on this 1319 /// target. 1320 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const { 1321 return isTypeLegal(ValVT) && 1322 (getTruncStoreAction(ValVT, MemVT) == Legal || 1323 getTruncStoreAction(ValVT, MemVT) == Custom); 1324 } 1325 1326 virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, 1327 bool LegalOnly) const { 1328 if (LegalOnly) 1329 return isTruncStoreLegal(ValVT, MemVT); 1330 1331 return isTruncStoreLegalOrCustom(ValVT, MemVT); 1332 } 1333 1334 /// Return how the indexed load should be treated: either it is legal, needs 1335 /// to be promoted to a larger size, needs to be expanded to some other code 1336 /// sequence, or the target has a custom expander for it. 1337 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const { 1338 return getIndexedModeAction(IdxMode, VT, IMAB_Load); 1339 } 1340 1341 /// Return true if the specified indexed load is legal on this target. 1342 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const { 1343 return VT.isSimple() && 1344 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 1345 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 1346 } 1347 1348 /// Return how the indexed store should be treated: either it is legal, needs 1349 /// to be promoted to a larger size, needs to be expanded to some other code 1350 /// sequence, or the target has a custom expander for it. 1351 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const { 1352 return getIndexedModeAction(IdxMode, VT, IMAB_Store); 1353 } 1354 1355 /// Return true if the specified indexed load is legal on this target. 1356 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const { 1357 return VT.isSimple() && 1358 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 1359 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 1360 } 1361 1362 /// Return how the indexed load should be treated: either it is legal, needs 1363 /// to be promoted to a larger size, needs to be expanded to some other code 1364 /// sequence, or the target has a custom expander for it. 1365 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const { 1366 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad); 1367 } 1368 1369 /// Return true if the specified indexed load is legal on this target. 1370 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const { 1371 return VT.isSimple() && 1372 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal || 1373 getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom); 1374 } 1375 1376 /// Return how the indexed store should be treated: either it is legal, needs 1377 /// to be promoted to a larger size, needs to be expanded to some other code 1378 /// sequence, or the target has a custom expander for it. 1379 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const { 1380 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore); 1381 } 1382 1383 /// Return true if the specified indexed load is legal on this target. 1384 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const { 1385 return VT.isSimple() && 1386 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal || 1387 getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom); 1388 } 1389 1390 /// Returns true if the index type for a masked gather/scatter requires 1391 /// extending 1392 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; } 1393 1394 // Returns true if VT is a legal index type for masked gathers/scatters 1395 // on this target 1396 virtual bool shouldRemoveExtendFromGSIndex(EVT IndexVT, EVT DataVT) const { 1397 return false; 1398 } 1399 1400 /// Return how the condition code should be treated: either it is legal, needs 1401 /// to be expanded to some other code sequence, or the target has a custom 1402 /// expander for it. 1403 LegalizeAction 1404 getCondCodeAction(ISD::CondCode CC, MVT VT) const { 1405 assert((unsigned)CC < array_lengthof(CondCodeActions) && 1406 ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) && 1407 "Table isn't big enough!"); 1408 // See setCondCodeAction for how this is encoded. 1409 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); 1410 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3]; 1411 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF); 1412 assert(Action != Promote && "Can't promote condition code!"); 1413 return Action; 1414 } 1415 1416 /// Return true if the specified condition code is legal on this target. 1417 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const { 1418 return getCondCodeAction(CC, VT) == Legal; 1419 } 1420 1421 /// Return true if the specified condition code is legal or custom on this 1422 /// target. 1423 bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const { 1424 return getCondCodeAction(CC, VT) == Legal || 1425 getCondCodeAction(CC, VT) == Custom; 1426 } 1427 1428 /// If the action for this operation is to promote, this method returns the 1429 /// ValueType to promote to. 1430 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const { 1431 assert(getOperationAction(Op, VT) == Promote && 1432 "This operation isn't promoted!"); 1433 1434 // See if this has an explicit type specified. 1435 std::map<std::pair<unsigned, MVT::SimpleValueType>, 1436 MVT::SimpleValueType>::const_iterator PTTI = 1437 PromoteToType.find(std::make_pair(Op, VT.SimpleTy)); 1438 if (PTTI != PromoteToType.end()) return PTTI->second; 1439 1440 assert((VT.isInteger() || VT.isFloatingPoint()) && 1441 "Cannot autopromote this type, add it with AddPromotedToType."); 1442 1443 MVT NVT = VT; 1444 do { 1445 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1); 1446 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid && 1447 "Didn't find type to promote to!"); 1448 } while (!isTypeLegal(NVT) || 1449 getOperationAction(Op, NVT) == Promote); 1450 return NVT; 1451 } 1452 1453 virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, 1454 bool AllowUnknown = false) const { 1455 return getValueType(DL, Ty, AllowUnknown); 1456 } 1457 1458 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM 1459 /// operations except for the pointer size. If AllowUnknown is true, this 1460 /// will return MVT::Other for types with no EVT counterpart (e.g. structs), 1461 /// otherwise it will assert. 1462 EVT getValueType(const DataLayout &DL, Type *Ty, 1463 bool AllowUnknown = false) const { 1464 // Lower scalar pointers to native pointer types. 1465 if (auto *PTy = dyn_cast<PointerType>(Ty)) 1466 return getPointerTy(DL, PTy->getAddressSpace()); 1467 1468 if (auto *VTy = dyn_cast<VectorType>(Ty)) { 1469 Type *EltTy = VTy->getElementType(); 1470 // Lower vectors of pointers to native pointer types. 1471 if (auto *PTy = dyn_cast<PointerType>(EltTy)) { 1472 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace())); 1473 EltTy = PointerTy.getTypeForEVT(Ty->getContext()); 1474 } 1475 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false), 1476 VTy->getElementCount()); 1477 } 1478 1479 return EVT::getEVT(Ty, AllowUnknown); 1480 } 1481 1482 EVT getMemValueType(const DataLayout &DL, Type *Ty, 1483 bool AllowUnknown = false) const { 1484 // Lower scalar pointers to native pointer types. 1485 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) 1486 return getPointerMemTy(DL, PTy->getAddressSpace()); 1487 else if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1488 Type *Elm = VTy->getElementType(); 1489 if (PointerType *PT = dyn_cast<PointerType>(Elm)) { 1490 EVT PointerTy(getPointerMemTy(DL, PT->getAddressSpace())); 1491 Elm = PointerTy.getTypeForEVT(Ty->getContext()); 1492 } 1493 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false), 1494 VTy->getElementCount()); 1495 } 1496 1497 return getValueType(DL, Ty, AllowUnknown); 1498 } 1499 1500 1501 /// Return the MVT corresponding to this LLVM type. See getValueType. 1502 MVT getSimpleValueType(const DataLayout &DL, Type *Ty, 1503 bool AllowUnknown = false) const { 1504 return getValueType(DL, Ty, AllowUnknown).getSimpleVT(); 1505 } 1506 1507 /// Return the desired alignment for ByVal or InAlloca aggregate function 1508 /// arguments in the caller parameter area. This is the actual alignment, not 1509 /// its logarithm. 1510 virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const; 1511 1512 /// Return the type of registers that this ValueType will eventually require. 1513 MVT getRegisterType(MVT VT) const { 1514 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT)); 1515 return RegisterTypeForVT[VT.SimpleTy]; 1516 } 1517 1518 /// Return the type of registers that this ValueType will eventually require. 1519 MVT getRegisterType(LLVMContext &Context, EVT VT) const { 1520 if (VT.isSimple()) { 1521 assert((unsigned)VT.getSimpleVT().SimpleTy < 1522 array_lengthof(RegisterTypeForVT)); 1523 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy]; 1524 } 1525 if (VT.isVector()) { 1526 EVT VT1; 1527 MVT RegisterVT; 1528 unsigned NumIntermediates; 1529 (void)getVectorTypeBreakdown(Context, VT, VT1, 1530 NumIntermediates, RegisterVT); 1531 return RegisterVT; 1532 } 1533 if (VT.isInteger()) { 1534 return getRegisterType(Context, getTypeToTransformTo(Context, VT)); 1535 } 1536 llvm_unreachable("Unsupported extended type!"); 1537 } 1538 1539 /// Return the number of registers that this ValueType will eventually 1540 /// require. 1541 /// 1542 /// This is one for any types promoted to live in larger registers, but may be 1543 /// more than one for types (like i64) that are split into pieces. For types 1544 /// like i140, which are first promoted then expanded, it is the number of 1545 /// registers needed to hold all the bits of the original type. For an i140 1546 /// on a 32 bit machine this means 5 registers. 1547 /// 1548 /// RegisterVT may be passed as a way to override the default settings, for 1549 /// instance with i128 inline assembly operands on SystemZ. 1550 virtual unsigned 1551 getNumRegisters(LLVMContext &Context, EVT VT, 1552 Optional<MVT> RegisterVT = None) const { 1553 if (VT.isSimple()) { 1554 assert((unsigned)VT.getSimpleVT().SimpleTy < 1555 array_lengthof(NumRegistersForVT)); 1556 return NumRegistersForVT[VT.getSimpleVT().SimpleTy]; 1557 } 1558 if (VT.isVector()) { 1559 EVT VT1; 1560 MVT VT2; 1561 unsigned NumIntermediates; 1562 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2); 1563 } 1564 if (VT.isInteger()) { 1565 unsigned BitWidth = VT.getSizeInBits(); 1566 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits(); 1567 return (BitWidth + RegWidth - 1) / RegWidth; 1568 } 1569 llvm_unreachable("Unsupported extended type!"); 1570 } 1571 1572 /// Certain combinations of ABIs, Targets and features require that types 1573 /// are legal for some operations and not for other operations. 1574 /// For MIPS all vector types must be passed through the integer register set. 1575 virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, 1576 CallingConv::ID CC, EVT VT) const { 1577 return getRegisterType(Context, VT); 1578 } 1579 1580 /// Certain targets require unusual breakdowns of certain types. For MIPS, 1581 /// this occurs when a vector type is used, as vector are passed through the 1582 /// integer register set. 1583 virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, 1584 CallingConv::ID CC, 1585 EVT VT) const { 1586 return getNumRegisters(Context, VT); 1587 } 1588 1589 /// Certain targets have context sensitive alignment requirements, where one 1590 /// type has the alignment requirement of another type. 1591 virtual Align getABIAlignmentForCallingConv(Type *ArgTy, 1592 const DataLayout &DL) const { 1593 return DL.getABITypeAlign(ArgTy); 1594 } 1595 1596 /// If true, then instruction selection should seek to shrink the FP constant 1597 /// of the specified type to a smaller type in order to save space and / or 1598 /// reduce runtime. 1599 virtual bool ShouldShrinkFPConstant(EVT) const { return true; } 1600 1601 /// Return true if it is profitable to reduce a load to a smaller type. 1602 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x 1603 virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, 1604 EVT NewVT) const { 1605 // By default, assume that it is cheaper to extract a subvector from a wide 1606 // vector load rather than creating multiple narrow vector loads. 1607 if (NewVT.isVector() && !Load->hasOneUse()) 1608 return false; 1609 1610 return true; 1611 } 1612 1613 /// When splitting a value of the specified type into parts, does the Lo 1614 /// or Hi part come first? This usually follows the endianness, except 1615 /// for ppcf128, where the Hi part always comes first. 1616 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const { 1617 return DL.isBigEndian() || VT == MVT::ppcf128; 1618 } 1619 1620 /// If true, the target has custom DAG combine transformations that it can 1621 /// perform for the specified node. 1622 bool hasTargetDAGCombine(ISD::NodeType NT) const { 1623 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 1624 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); 1625 } 1626 1627 unsigned getGatherAllAliasesMaxDepth() const { 1628 return GatherAllAliasesMaxDepth; 1629 } 1630 1631 /// Returns the size of the platform's va_list object. 1632 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const { 1633 return getPointerTy(DL).getSizeInBits(); 1634 } 1635 1636 /// Get maximum # of store operations permitted for llvm.memset 1637 /// 1638 /// This function returns the maximum number of store operations permitted 1639 /// to replace a call to llvm.memset. The value is set by the target at the 1640 /// performance threshold for such a replacement. If OptSize is true, 1641 /// return the limit for functions that have OptSize attribute. 1642 unsigned getMaxStoresPerMemset(bool OptSize) const { 1643 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset; 1644 } 1645 1646 /// Get maximum # of store operations permitted for llvm.memcpy 1647 /// 1648 /// This function returns the maximum number of store operations permitted 1649 /// to replace a call to llvm.memcpy. The value is set by the target at the 1650 /// performance threshold for such a replacement. If OptSize is true, 1651 /// return the limit for functions that have OptSize attribute. 1652 unsigned getMaxStoresPerMemcpy(bool OptSize) const { 1653 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy; 1654 } 1655 1656 /// \brief Get maximum # of store operations to be glued together 1657 /// 1658 /// This function returns the maximum number of store operations permitted 1659 /// to glue together during lowering of llvm.memcpy. The value is set by 1660 // the target at the performance threshold for such a replacement. 1661 virtual unsigned getMaxGluedStoresPerMemcpy() const { 1662 return MaxGluedStoresPerMemcpy; 1663 } 1664 1665 /// Get maximum # of load operations permitted for memcmp 1666 /// 1667 /// This function returns the maximum number of load operations permitted 1668 /// to replace a call to memcmp. The value is set by the target at the 1669 /// performance threshold for such a replacement. If OptSize is true, 1670 /// return the limit for functions that have OptSize attribute. 1671 unsigned getMaxExpandSizeMemcmp(bool OptSize) const { 1672 return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp; 1673 } 1674 1675 /// Get maximum # of store operations permitted for llvm.memmove 1676 /// 1677 /// This function returns the maximum number of store operations permitted 1678 /// to replace a call to llvm.memmove. The value is set by the target at the 1679 /// performance threshold for such a replacement. If OptSize is true, 1680 /// return the limit for functions that have OptSize attribute. 1681 unsigned getMaxStoresPerMemmove(bool OptSize) const { 1682 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove; 1683 } 1684 1685 /// Determine if the target supports unaligned memory accesses. 1686 /// 1687 /// This function returns true if the target allows unaligned memory accesses 1688 /// of the specified type in the given address space. If true, it also returns 1689 /// whether the unaligned memory access is "fast" in the last argument by 1690 /// reference. This is used, for example, in situations where an array 1691 /// copy/move/set is converted to a sequence of store operations. Its use 1692 /// helps to ensure that such replacements don't generate code that causes an 1693 /// alignment error (trap) on the target machine. 1694 virtual bool allowsMisalignedMemoryAccesses( 1695 EVT, unsigned AddrSpace = 0, Align Alignment = Align(1), 1696 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1697 bool * /*Fast*/ = nullptr) const { 1698 return false; 1699 } 1700 1701 /// LLT handling variant. 1702 virtual bool allowsMisalignedMemoryAccesses( 1703 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1), 1704 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1705 bool * /*Fast*/ = nullptr) const { 1706 return false; 1707 } 1708 1709 /// This function returns true if the memory access is aligned or if the 1710 /// target allows this specific unaligned memory access. If the access is 1711 /// allowed, the optional final parameter returns if the access is also fast 1712 /// (as defined by the target). 1713 bool allowsMemoryAccessForAlignment( 1714 LLVMContext &Context, const DataLayout &DL, EVT VT, 1715 unsigned AddrSpace = 0, Align Alignment = Align(1), 1716 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1717 bool *Fast = nullptr) const; 1718 1719 /// Return true if the memory access of this type is aligned or if the target 1720 /// allows this specific unaligned access for the given MachineMemOperand. 1721 /// If the access is allowed, the optional final parameter returns if the 1722 /// access is also fast (as defined by the target). 1723 bool allowsMemoryAccessForAlignment(LLVMContext &Context, 1724 const DataLayout &DL, EVT VT, 1725 const MachineMemOperand &MMO, 1726 bool *Fast = nullptr) const; 1727 1728 /// Return true if the target supports a memory access of this type for the 1729 /// given address space and alignment. If the access is allowed, the optional 1730 /// final parameter returns if the access is also fast (as defined by the 1731 /// target). 1732 virtual bool 1733 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, 1734 unsigned AddrSpace = 0, Align Alignment = Align(1), 1735 MachineMemOperand::Flags Flags = MachineMemOperand::MONone, 1736 bool *Fast = nullptr) const; 1737 1738 /// Return true if the target supports a memory access of this type for the 1739 /// given MachineMemOperand. If the access is allowed, the optional 1740 /// final parameter returns if the access is also fast (as defined by the 1741 /// target). 1742 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, 1743 const MachineMemOperand &MMO, 1744 bool *Fast = nullptr) const; 1745 1746 /// LLT handling variant. 1747 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty, 1748 const MachineMemOperand &MMO, 1749 bool *Fast = nullptr) const; 1750 1751 /// Returns the target specific optimal type for load and store operations as 1752 /// a result of memset, memcpy, and memmove lowering. 1753 /// It returns EVT::Other if the type should be determined using generic 1754 /// target-independent logic. 1755 virtual EVT 1756 getOptimalMemOpType(const MemOp &Op, 1757 const AttributeList & /*FuncAttributes*/) const { 1758 return MVT::Other; 1759 } 1760 1761 /// LLT returning variant. 1762 virtual LLT 1763 getOptimalMemOpLLT(const MemOp &Op, 1764 const AttributeList & /*FuncAttributes*/) const { 1765 return LLT(); 1766 } 1767 1768 /// Returns true if it's safe to use load / store of the specified type to 1769 /// expand memcpy / memset inline. 1770 /// 1771 /// This is mostly true for all types except for some special cases. For 1772 /// example, on X86 targets without SSE2 f64 load / store are done with fldl / 1773 /// fstpl which also does type conversion. Note the specified type doesn't 1774 /// have to be legal as the hook is used before type legalization. 1775 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; } 1776 1777 /// Return lower limit for number of blocks in a jump table. 1778 virtual unsigned getMinimumJumpTableEntries() const; 1779 1780 /// Return lower limit of the density in a jump table. 1781 unsigned getMinimumJumpTableDensity(bool OptForSize) const; 1782 1783 /// Return upper limit for number of entries in a jump table. 1784 /// Zero if no limit. 1785 unsigned getMaximumJumpTableSize() const; 1786 1787 virtual bool isJumpTableRelative() const; 1788 1789 /// If a physical register, this specifies the register that 1790 /// llvm.savestack/llvm.restorestack should save and restore. 1791 Register getStackPointerRegisterToSaveRestore() const { 1792 return StackPointerRegisterToSaveRestore; 1793 } 1794 1795 /// If a physical register, this returns the register that receives the 1796 /// exception address on entry to an EH pad. 1797 virtual Register 1798 getExceptionPointerRegister(const Constant *PersonalityFn) const { 1799 return Register(); 1800 } 1801 1802 /// If a physical register, this returns the register that receives the 1803 /// exception typeid on entry to a landing pad. 1804 virtual Register 1805 getExceptionSelectorRegister(const Constant *PersonalityFn) const { 1806 return Register(); 1807 } 1808 1809 virtual bool needsFixedCatchObjects() const { 1810 report_fatal_error("Funclet EH is not implemented for this target"); 1811 } 1812 1813 /// Return the minimum stack alignment of an argument. 1814 Align getMinStackArgumentAlignment() const { 1815 return MinStackArgumentAlignment; 1816 } 1817 1818 /// Return the minimum function alignment. 1819 Align getMinFunctionAlignment() const { return MinFunctionAlignment; } 1820 1821 /// Return the preferred function alignment. 1822 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; } 1823 1824 /// Return the preferred loop alignment. 1825 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const; 1826 1827 /// Return the maximum amount of bytes allowed to be emitted when padding for 1828 /// alignment 1829 virtual unsigned 1830 getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const; 1831 1832 /// Should loops be aligned even when the function is marked OptSize (but not 1833 /// MinSize). 1834 virtual bool alignLoopsWithOptSize() const { return false; } 1835 1836 /// If the target has a standard location for the stack protector guard, 1837 /// returns the address of that location. Otherwise, returns nullptr. 1838 /// DEPRECATED: please override useLoadStackGuardNode and customize 1839 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard(). 1840 virtual Value *getIRStackGuard(IRBuilderBase &IRB) const; 1841 1842 /// Inserts necessary declarations for SSP (stack protection) purpose. 1843 /// Should be used only when getIRStackGuard returns nullptr. 1844 virtual void insertSSPDeclarations(Module &M) const; 1845 1846 /// Return the variable that's previously inserted by insertSSPDeclarations, 1847 /// if any, otherwise return nullptr. Should be used only when 1848 /// getIRStackGuard returns nullptr. 1849 virtual Value *getSDagStackGuard(const Module &M) const; 1850 1851 /// If this function returns true, stack protection checks should XOR the 1852 /// frame pointer (or whichever pointer is used to address locals) into the 1853 /// stack guard value before checking it. getIRStackGuard must return nullptr 1854 /// if this returns true. 1855 virtual bool useStackGuardXorFP() const { return false; } 1856 1857 /// If the target has a standard stack protection check function that 1858 /// performs validation and error handling, returns the function. Otherwise, 1859 /// returns nullptr. Must be previously inserted by insertSSPDeclarations. 1860 /// Should be used only when getIRStackGuard returns nullptr. 1861 virtual Function *getSSPStackGuardCheck(const Module &M) const; 1862 1863 /// \returns true if a constant G_UBFX is legal on the target. 1864 virtual bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1, 1865 LLT Ty2) const { 1866 return false; 1867 } 1868 1869 protected: 1870 Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, 1871 bool UseTLS) const; 1872 1873 public: 1874 /// Returns the target-specific address of the unsafe stack pointer. 1875 virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const; 1876 1877 /// Returns the name of the symbol used to emit stack probes or the empty 1878 /// string if not applicable. 1879 virtual bool hasStackProbeSymbol(MachineFunction &MF) const { return false; } 1880 1881 virtual bool hasInlineStackProbe(MachineFunction &MF) const { return false; } 1882 1883 virtual StringRef getStackProbeSymbolName(MachineFunction &MF) const { 1884 return ""; 1885 } 1886 1887 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we 1888 /// are happy to sink it into basic blocks. A cast may be free, but not 1889 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer. 1890 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const; 1891 1892 /// Return true if the pointer arguments to CI should be aligned by aligning 1893 /// the object whose address is being passed. If so then MinSize is set to the 1894 /// minimum size the object must be to be aligned and PrefAlign is set to the 1895 /// preferred alignment. 1896 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/, 1897 Align & /*PrefAlign*/) const { 1898 return false; 1899 } 1900 1901 //===--------------------------------------------------------------------===// 1902 /// \name Helpers for TargetTransformInfo implementations 1903 /// @{ 1904 1905 /// Get the ISD node that corresponds to the Instruction class opcode. 1906 int InstructionOpcodeToISD(unsigned Opcode) const; 1907 1908 /// Estimate the cost of type-legalization and the legalized type. 1909 std::pair<InstructionCost, MVT> getTypeLegalizationCost(const DataLayout &DL, 1910 Type *Ty) const; 1911 1912 /// @} 1913 1914 //===--------------------------------------------------------------------===// 1915 /// \name Helpers for atomic expansion. 1916 /// @{ 1917 1918 /// Returns the maximum atomic operation size (in bits) supported by 1919 /// the backend. Atomic operations greater than this size (as well 1920 /// as ones that are not naturally aligned), will be expanded by 1921 /// AtomicExpandPass into an __atomic_* library call. 1922 unsigned getMaxAtomicSizeInBitsSupported() const { 1923 return MaxAtomicSizeInBitsSupported; 1924 } 1925 1926 /// Returns the size of the smallest cmpxchg or ll/sc instruction 1927 /// the backend supports. Any smaller operations are widened in 1928 /// AtomicExpandPass. 1929 /// 1930 /// Note that *unlike* operations above the maximum size, atomic ops 1931 /// are still natively supported below the minimum; they just 1932 /// require a more complex expansion. 1933 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; } 1934 1935 /// Whether the target supports unaligned atomic operations. 1936 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; } 1937 1938 /// Whether AtomicExpandPass should automatically insert fences and reduce 1939 /// ordering for this atomic. This should be true for most architectures with 1940 /// weak memory ordering. Defaults to false. 1941 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const { 1942 return false; 1943 } 1944 1945 /// Perform a load-linked operation on Addr, returning a "Value *" with the 1946 /// corresponding pointee type. This may entail some non-trivial operations to 1947 /// truncate or reconstruct types that will be illegal in the backend. See 1948 /// ARMISelLowering for an example implementation. 1949 virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, 1950 Value *Addr, AtomicOrdering Ord) const { 1951 llvm_unreachable("Load linked unimplemented on this target"); 1952 } 1953 1954 /// Perform a store-conditional operation to Addr. Return the status of the 1955 /// store. This should be 0 if the store succeeded, non-zero otherwise. 1956 virtual Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, 1957 Value *Addr, AtomicOrdering Ord) const { 1958 llvm_unreachable("Store conditional unimplemented on this target"); 1959 } 1960 1961 /// Perform a masked atomicrmw using a target-specific intrinsic. This 1962 /// represents the core LL/SC loop which will be lowered at a late stage by 1963 /// the backend. 1964 virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, 1965 AtomicRMWInst *AI, 1966 Value *AlignedAddr, Value *Incr, 1967 Value *Mask, Value *ShiftAmt, 1968 AtomicOrdering Ord) const { 1969 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target"); 1970 } 1971 1972 /// Perform a bit test atomicrmw using a target-specific intrinsic. This 1973 /// represents the combined bit test intrinsic which will be lowered at a late 1974 /// stage by the backend. 1975 virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const { 1976 llvm_unreachable( 1977 "Bit test atomicrmw expansion unimplemented on this target"); 1978 } 1979 1980 /// Perform a masked cmpxchg using a target-specific intrinsic. This 1981 /// represents the core LL/SC loop which will be lowered at a late stage by 1982 /// the backend. 1983 virtual Value *emitMaskedAtomicCmpXchgIntrinsic( 1984 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 1985 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 1986 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target"); 1987 } 1988 1989 /// Inserts in the IR a target-specific intrinsic specifying a fence. 1990 /// It is called by AtomicExpandPass before expanding an 1991 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad 1992 /// if shouldInsertFencesForAtomic returns true. 1993 /// 1994 /// Inst is the original atomic instruction, prior to other expansions that 1995 /// may be performed. 1996 /// 1997 /// This function should either return a nullptr, or a pointer to an IR-level 1998 /// Instruction*. Even complex fence sequences can be represented by a 1999 /// single Instruction* through an intrinsic to be lowered later. 2000 /// Backends should override this method to produce target-specific intrinsic 2001 /// for their fences. 2002 /// FIXME: Please note that the default implementation here in terms of 2003 /// IR-level fences exists for historical/compatibility reasons and is 2004 /// *unsound* ! Fences cannot, in general, be used to restore sequential 2005 /// consistency. For example, consider the following example: 2006 /// atomic<int> x = y = 0; 2007 /// int r1, r2, r3, r4; 2008 /// Thread 0: 2009 /// x.store(1); 2010 /// Thread 1: 2011 /// y.store(1); 2012 /// Thread 2: 2013 /// r1 = x.load(); 2014 /// r2 = y.load(); 2015 /// Thread 3: 2016 /// r3 = y.load(); 2017 /// r4 = x.load(); 2018 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all 2019 /// seq_cst. But if they are lowered to monotonic accesses, no amount of 2020 /// IR-level fences can prevent it. 2021 /// @{ 2022 virtual Instruction *emitLeadingFence(IRBuilderBase &Builder, 2023 Instruction *Inst, 2024 AtomicOrdering Ord) const; 2025 2026 virtual Instruction *emitTrailingFence(IRBuilderBase &Builder, 2027 Instruction *Inst, 2028 AtomicOrdering Ord) const; 2029 /// @} 2030 2031 // Emits code that executes when the comparison result in the ll/sc 2032 // expansion of a cmpxchg instruction is such that the store-conditional will 2033 // not execute. This makes it possible to balance out the load-linked with 2034 // a dedicated instruction, if desired. 2035 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would 2036 // be unnecessarily held, except if clrex, inserted by this hook, is executed. 2037 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {} 2038 2039 /// Returns true if arguments should be sign-extended in lib calls. 2040 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { 2041 return IsSigned; 2042 } 2043 2044 /// Returns true if arguments should be extended in lib calls. 2045 virtual bool shouldExtendTypeInLibCall(EVT Type) const { 2046 return true; 2047 } 2048 2049 /// Returns how the given (atomic) load should be expanded by the 2050 /// IR-level AtomicExpand pass. 2051 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const { 2052 return AtomicExpansionKind::None; 2053 } 2054 2055 /// Returns how the given (atomic) load should be cast by the IR-level 2056 /// AtomicExpand pass. 2057 virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const { 2058 if (LI->getType()->isFloatingPointTy()) 2059 return AtomicExpansionKind::CastToInteger; 2060 return AtomicExpansionKind::None; 2061 } 2062 2063 /// Returns how the given (atomic) store should be expanded by the IR-level 2064 /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try 2065 /// to use an atomicrmw xchg. 2066 virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const { 2067 return AtomicExpansionKind::None; 2068 } 2069 2070 /// Returns how the given (atomic) store should be cast by the IR-level 2071 /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger 2072 /// will try to cast the operands to integer values. 2073 virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const { 2074 if (SI->getValueOperand()->getType()->isFloatingPointTy()) 2075 return AtomicExpansionKind::CastToInteger; 2076 return AtomicExpansionKind::None; 2077 } 2078 2079 /// Returns how the given atomic cmpxchg should be expanded by the IR-level 2080 /// AtomicExpand pass. 2081 virtual AtomicExpansionKind 2082 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { 2083 return AtomicExpansionKind::None; 2084 } 2085 2086 /// Returns how the IR-level AtomicExpand pass should expand the given 2087 /// AtomicRMW, if at all. Default is to never expand. 2088 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 2089 return RMW->isFloatingPointOperation() ? 2090 AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None; 2091 } 2092 2093 /// Returns how the given atomic atomicrmw should be cast by the IR-level 2094 /// AtomicExpand pass. 2095 virtual AtomicExpansionKind 2096 shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const { 2097 if (RMWI->getOperation() == AtomicRMWInst::Xchg && 2098 (RMWI->getValOperand()->getType()->isFloatingPointTy() || 2099 RMWI->getValOperand()->getType()->isPointerTy())) 2100 return AtomicExpansionKind::CastToInteger; 2101 2102 return AtomicExpansionKind::None; 2103 } 2104 2105 /// On some platforms, an AtomicRMW that never actually modifies the value 2106 /// (such as fetch_add of 0) can be turned into a fence followed by an 2107 /// atomic load. This may sound useless, but it makes it possible for the 2108 /// processor to keep the cacheline shared, dramatically improving 2109 /// performance. And such idempotent RMWs are useful for implementing some 2110 /// kinds of locks, see for example (justification + benchmarks): 2111 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf 2112 /// This method tries doing that transformation, returning the atomic load if 2113 /// it succeeds, and nullptr otherwise. 2114 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo 2115 /// another round of expansion. 2116 virtual LoadInst * 2117 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { 2118 return nullptr; 2119 } 2120 2121 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND, 2122 /// SIGN_EXTEND, or ANY_EXTEND). 2123 virtual ISD::NodeType getExtendForAtomicOps() const { 2124 return ISD::ZERO_EXTEND; 2125 } 2126 2127 /// Returns how the platform's atomic compare and swap expects its comparison 2128 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is 2129 /// separate from getExtendForAtomicOps, which is concerned with the 2130 /// sign-extension of the instruction's output, whereas here we are concerned 2131 /// with the sign-extension of the input. For targets with compare-and-swap 2132 /// instructions (or sub-word comparisons in their LL/SC loop expansions), 2133 /// the input can be ANY_EXTEND, but the output will still have a specific 2134 /// extension. 2135 virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const { 2136 return ISD::ANY_EXTEND; 2137 } 2138 2139 /// @} 2140 2141 /// Returns true if we should normalize 2142 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and 2143 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely 2144 /// that it saves us from materializing N0 and N1 in an integer register. 2145 /// Targets that are able to perform and/or on flags should return false here. 2146 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, 2147 EVT VT) const { 2148 // If a target has multiple condition registers, then it likely has logical 2149 // operations on those registers. 2150 if (hasMultipleConditionRegisters()) 2151 return false; 2152 // Only do the transform if the value won't be split into multiple 2153 // registers. 2154 LegalizeTypeAction Action = getTypeAction(Context, VT); 2155 return Action != TypeExpandInteger && Action != TypeExpandFloat && 2156 Action != TypeSplitVector; 2157 } 2158 2159 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; } 2160 2161 /// Return true if a select of constants (select Cond, C1, C2) should be 2162 /// transformed into simple math ops with the condition value. For example: 2163 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1 2164 virtual bool convertSelectOfConstantsToMath(EVT VT) const { 2165 return false; 2166 } 2167 2168 /// Return true if it is profitable to transform an integer 2169 /// multiplication-by-constant into simpler operations like shifts and adds. 2170 /// This may be true if the target does not directly support the 2171 /// multiplication operation for the specified type or the sequence of simpler 2172 /// ops is faster than the multiply. 2173 virtual bool decomposeMulByConstant(LLVMContext &Context, 2174 EVT VT, SDValue C) const { 2175 return false; 2176 } 2177 2178 /// Return true if it may be profitable to transform 2179 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 2180 /// This may not be true if c1 and c2 can be represented as immediates but 2181 /// c1*c2 cannot, for example. 2182 /// The target should check if c1, c2 and c1*c2 can be represented as 2183 /// immediates, or have to be materialized into registers. If it is not sure 2184 /// about some cases, a default true can be returned to let the DAGCombiner 2185 /// decide. 2186 /// AddNode is (add x, c1), and ConstNode is c2. 2187 virtual bool isMulAddWithConstProfitable(SDValue AddNode, 2188 SDValue ConstNode) const { 2189 return true; 2190 } 2191 2192 /// Return true if it is more correct/profitable to use strict FP_TO_INT 2193 /// conversion operations - canonicalizing the FP source value instead of 2194 /// converting all cases and then selecting based on value. 2195 /// This may be true if the target throws exceptions for out of bounds 2196 /// conversions or has fast FP CMOV. 2197 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, 2198 bool IsSigned) const { 2199 return false; 2200 } 2201 2202 /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic. 2203 /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always 2204 /// considered beneficial. 2205 /// If optimizing for size, expansion is only considered beneficial for upto 2206 /// 5 multiplies and a divide (if the exponent is negative). 2207 bool isBeneficialToExpandPowI(int Exponent, bool OptForSize) const { 2208 if (Exponent < 0) 2209 Exponent = -Exponent; 2210 return !OptForSize || 2211 (countPopulation((unsigned int)Exponent) + Log2_32(Exponent) < 7); 2212 } 2213 2214 //===--------------------------------------------------------------------===// 2215 // TargetLowering Configuration Methods - These methods should be invoked by 2216 // the derived class constructor to configure this object for the target. 2217 // 2218 protected: 2219 /// Specify how the target extends the result of integer and floating point 2220 /// boolean values from i1 to a wider type. See getBooleanContents. 2221 void setBooleanContents(BooleanContent Ty) { 2222 BooleanContents = Ty; 2223 BooleanFloatContents = Ty; 2224 } 2225 2226 /// Specify how the target extends the result of integer and floating point 2227 /// boolean values from i1 to a wider type. See getBooleanContents. 2228 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) { 2229 BooleanContents = IntTy; 2230 BooleanFloatContents = FloatTy; 2231 } 2232 2233 /// Specify how the target extends the result of a vector boolean value from a 2234 /// vector of i1 to a wider type. See getBooleanContents. 2235 void setBooleanVectorContents(BooleanContent Ty) { 2236 BooleanVectorContents = Ty; 2237 } 2238 2239 /// Specify the target scheduling preference. 2240 void setSchedulingPreference(Sched::Preference Pref) { 2241 SchedPreferenceInfo = Pref; 2242 } 2243 2244 /// Indicate the minimum number of blocks to generate jump tables. 2245 void setMinimumJumpTableEntries(unsigned Val); 2246 2247 /// Indicate the maximum number of entries in jump tables. 2248 /// Set to zero to generate unlimited jump tables. 2249 void setMaximumJumpTableSize(unsigned); 2250 2251 /// If set to a physical register, this specifies the register that 2252 /// llvm.savestack/llvm.restorestack should save and restore. 2253 void setStackPointerRegisterToSaveRestore(Register R) { 2254 StackPointerRegisterToSaveRestore = R; 2255 } 2256 2257 /// Tells the code generator that the target has multiple (allocatable) 2258 /// condition registers that can be used to store the results of comparisons 2259 /// for use by selects and conditional branches. With multiple condition 2260 /// registers, the code generator will not aggressively sink comparisons into 2261 /// the blocks of their users. 2262 void setHasMultipleConditionRegisters(bool hasManyRegs = true) { 2263 HasMultipleConditionRegisters = hasManyRegs; 2264 } 2265 2266 /// Tells the code generator that the target has BitExtract instructions. 2267 /// The code generator will aggressively sink "shift"s into the blocks of 2268 /// their users if the users will generate "and" instructions which can be 2269 /// combined with "shift" to BitExtract instructions. 2270 void setHasExtractBitsInsn(bool hasExtractInsn = true) { 2271 HasExtractBitsInsn = hasExtractInsn; 2272 } 2273 2274 /// Tells the code generator not to expand logic operations on comparison 2275 /// predicates into separate sequences that increase the amount of flow 2276 /// control. 2277 void setJumpIsExpensive(bool isExpensive = true); 2278 2279 /// Tells the code generator which bitwidths to bypass. 2280 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { 2281 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; 2282 } 2283 2284 /// Add the specified register class as an available regclass for the 2285 /// specified value type. This indicates the selector can handle values of 2286 /// that class natively. 2287 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) { 2288 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT)); 2289 RegClassForVT[VT.SimpleTy] = RC; 2290 } 2291 2292 /// Return the largest legal super-reg register class of the register class 2293 /// for the specified type and its associated "cost". 2294 virtual std::pair<const TargetRegisterClass *, uint8_t> 2295 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const; 2296 2297 /// Once all of the register classes are added, this allows us to compute 2298 /// derived properties we expose. 2299 void computeRegisterProperties(const TargetRegisterInfo *TRI); 2300 2301 /// Indicate that the specified operation does not work with the specified 2302 /// type and indicate what to do about it. Note that VT may refer to either 2303 /// the type of a result or that of an operand of Op. 2304 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) { 2305 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!"); 2306 OpActions[(unsigned)VT.SimpleTy][Op] = Action; 2307 } 2308 void setOperationAction(ArrayRef<unsigned> Ops, MVT VT, 2309 LegalizeAction Action) { 2310 for (auto Op : Ops) 2311 setOperationAction(Op, VT, Action); 2312 } 2313 void setOperationAction(ArrayRef<unsigned> Ops, ArrayRef<MVT> VTs, 2314 LegalizeAction Action) { 2315 for (auto VT : VTs) 2316 setOperationAction(Ops, VT, Action); 2317 } 2318 2319 /// Indicate that the specified load with extension does not work with the 2320 /// specified type and indicate what to do about it. 2321 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, 2322 LegalizeAction Action) { 2323 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() && 2324 MemVT.isValid() && "Table isn't big enough!"); 2325 assert((unsigned)Action < 0x10 && "too many bits for bitfield array"); 2326 unsigned Shift = 4 * ExtType; 2327 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift); 2328 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift; 2329 } 2330 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT, 2331 LegalizeAction Action) { 2332 for (auto ExtType : ExtTypes) 2333 setLoadExtAction(ExtType, ValVT, MemVT, Action); 2334 } 2335 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, 2336 ArrayRef<MVT> MemVTs, LegalizeAction Action) { 2337 for (auto MemVT : MemVTs) 2338 setLoadExtAction(ExtTypes, ValVT, MemVT, Action); 2339 } 2340 2341 /// Indicate that the specified truncating store does not work with the 2342 /// specified type and indicate what to do about it. 2343 void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) { 2344 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!"); 2345 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action; 2346 } 2347 2348 /// Indicate that the specified indexed load does or does not work with the 2349 /// specified type and indicate what to do abort it. 2350 /// 2351 /// NOTE: All indexed mode loads are initialized to Expand in 2352 /// TargetLowering.cpp 2353 void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, MVT VT, 2354 LegalizeAction Action) { 2355 for (auto IdxMode : IdxModes) 2356 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action); 2357 } 2358 2359 void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs, 2360 LegalizeAction Action) { 2361 for (auto VT : VTs) 2362 setIndexedLoadAction(IdxModes, VT, Action); 2363 } 2364 2365 /// Indicate that the specified indexed store does or does not work with the 2366 /// specified type and indicate what to do about it. 2367 /// 2368 /// NOTE: All indexed mode stores are initialized to Expand in 2369 /// TargetLowering.cpp 2370 void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, MVT VT, 2371 LegalizeAction Action) { 2372 for (auto IdxMode : IdxModes) 2373 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action); 2374 } 2375 2376 void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs, 2377 LegalizeAction Action) { 2378 for (auto VT : VTs) 2379 setIndexedStoreAction(IdxModes, VT, Action); 2380 } 2381 2382 /// Indicate that the specified indexed masked load does or does not work with 2383 /// the specified type and indicate what to do about it. 2384 /// 2385 /// NOTE: All indexed mode masked loads are initialized to Expand in 2386 /// TargetLowering.cpp 2387 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, 2388 LegalizeAction Action) { 2389 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action); 2390 } 2391 2392 /// Indicate that the specified indexed masked store does or does not work 2393 /// with the specified type and indicate what to do about it. 2394 /// 2395 /// NOTE: All indexed mode masked stores are initialized to Expand in 2396 /// TargetLowering.cpp 2397 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, 2398 LegalizeAction Action) { 2399 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action); 2400 } 2401 2402 /// Indicate that the specified condition code is or isn't supported on the 2403 /// target and indicate what to do about it. 2404 void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, MVT VT, 2405 LegalizeAction Action) { 2406 for (auto CC : CCs) { 2407 assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) && 2408 "Table isn't big enough!"); 2409 assert((unsigned)Action < 0x10 && "too many bits for bitfield array"); 2410 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 2411 /// 32-bit value and the upper 29 bits index into the second dimension of 2412 /// the array to select what 32-bit value to use. 2413 uint32_t Shift = 4 * (VT.SimpleTy & 0x7); 2414 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift); 2415 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift; 2416 } 2417 } 2418 void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, ArrayRef<MVT> VTs, 2419 LegalizeAction Action) { 2420 for (auto VT : VTs) 2421 setCondCodeAction(CCs, VT, Action); 2422 } 2423 2424 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults 2425 /// to trying a larger integer/fp until it can find one that works. If that 2426 /// default is insufficient, this method can be used by the target to override 2427 /// the default. 2428 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 2429 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy; 2430 } 2431 2432 /// Convenience method to set an operation to Promote and specify the type 2433 /// in a single call. 2434 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) { 2435 setOperationAction(Opc, OrigVT, Promote); 2436 AddPromotedToType(Opc, OrigVT, DestVT); 2437 } 2438 2439 /// Targets should invoke this method for each target independent node that 2440 /// they want to provide a custom DAG combiner for by implementing the 2441 /// PerformDAGCombine virtual method. 2442 void setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs) { 2443 for (auto NT : NTs) { 2444 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); 2445 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7); 2446 } 2447 } 2448 2449 /// Set the target's minimum function alignment. 2450 void setMinFunctionAlignment(Align Alignment) { 2451 MinFunctionAlignment = Alignment; 2452 } 2453 2454 /// Set the target's preferred function alignment. This should be set if 2455 /// there is a performance benefit to higher-than-minimum alignment 2456 void setPrefFunctionAlignment(Align Alignment) { 2457 PrefFunctionAlignment = Alignment; 2458 } 2459 2460 /// Set the target's preferred loop alignment. Default alignment is one, it 2461 /// means the target does not care about loop alignment. The target may also 2462 /// override getPrefLoopAlignment to provide per-loop values. 2463 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; } 2464 void setMaxBytesForAlignment(unsigned MaxBytes) { 2465 MaxBytesForAlignment = MaxBytes; 2466 } 2467 2468 /// Set the minimum stack alignment of an argument. 2469 void setMinStackArgumentAlignment(Align Alignment) { 2470 MinStackArgumentAlignment = Alignment; 2471 } 2472 2473 /// Set the maximum atomic operation size supported by the 2474 /// backend. Atomic operations greater than this size (as well as 2475 /// ones that are not naturally aligned), will be expanded by 2476 /// AtomicExpandPass into an __atomic_* library call. 2477 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) { 2478 MaxAtomicSizeInBitsSupported = SizeInBits; 2479 } 2480 2481 /// Sets the minimum cmpxchg or ll/sc size supported by the backend. 2482 void setMinCmpXchgSizeInBits(unsigned SizeInBits) { 2483 MinCmpXchgSizeInBits = SizeInBits; 2484 } 2485 2486 /// Sets whether unaligned atomic operations are supported. 2487 void setSupportsUnalignedAtomics(bool UnalignedSupported) { 2488 SupportsUnalignedAtomics = UnalignedSupported; 2489 } 2490 2491 public: 2492 //===--------------------------------------------------------------------===// 2493 // Addressing mode description hooks (used by LSR etc). 2494 // 2495 2496 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store 2497 /// instructions reading the address. This allows as much computation as 2498 /// possible to be done in the address mode for that operand. This hook lets 2499 /// targets also pass back when this should be done on intrinsics which 2500 /// load/store. 2501 virtual bool getAddrModeArguments(IntrinsicInst * /*I*/, 2502 SmallVectorImpl<Value*> &/*Ops*/, 2503 Type *&/*AccessTy*/) const { 2504 return false; 2505 } 2506 2507 /// This represents an addressing mode of: 2508 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg 2509 /// If BaseGV is null, there is no BaseGV. 2510 /// If BaseOffs is zero, there is no base offset. 2511 /// If HasBaseReg is false, there is no base register. 2512 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with 2513 /// no scale. 2514 struct AddrMode { 2515 GlobalValue *BaseGV = nullptr; 2516 int64_t BaseOffs = 0; 2517 bool HasBaseReg = false; 2518 int64_t Scale = 0; 2519 AddrMode() = default; 2520 }; 2521 2522 /// Return true if the addressing mode represented by AM is legal for this 2523 /// target, for a load/store of the specified type. 2524 /// 2525 /// The type may be VoidTy, in which case only return true if the addressing 2526 /// mode is legal for a load/store of any legal type. TODO: Handle 2527 /// pre/postinc as well. 2528 /// 2529 /// If the address space cannot be determined, it will be -1. 2530 /// 2531 /// TODO: Remove default argument 2532 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 2533 Type *Ty, unsigned AddrSpace, 2534 Instruction *I = nullptr) const; 2535 2536 /// Return the cost of the scaling factor used in the addressing mode 2537 /// represented by AM for this target, for a load/store of the specified type. 2538 /// 2539 /// If the AM is supported, the return value must be >= 0. 2540 /// If the AM is not supported, it returns a negative value. 2541 /// TODO: Handle pre/postinc as well. 2542 /// TODO: Remove default argument 2543 virtual InstructionCost getScalingFactorCost(const DataLayout &DL, 2544 const AddrMode &AM, Type *Ty, 2545 unsigned AS = 0) const { 2546 // Default: assume that any scaling factor used in a legal AM is free. 2547 if (isLegalAddressingMode(DL, AM, Ty, AS)) 2548 return 0; 2549 return -1; 2550 } 2551 2552 /// Return true if the specified immediate is legal icmp immediate, that is 2553 /// the target has icmp instructions which can compare a register against the 2554 /// immediate without having to materialize the immediate into a register. 2555 virtual bool isLegalICmpImmediate(int64_t) const { 2556 return true; 2557 } 2558 2559 /// Return true if the specified immediate is legal add immediate, that is the 2560 /// target has add instructions which can add a register with the immediate 2561 /// without having to materialize the immediate into a register. 2562 virtual bool isLegalAddImmediate(int64_t) const { 2563 return true; 2564 } 2565 2566 /// Return true if the specified immediate is legal for the value input of a 2567 /// store instruction. 2568 virtual bool isLegalStoreImmediate(int64_t Value) const { 2569 // Default implementation assumes that at least 0 works since it is likely 2570 // that a zero register exists or a zero immediate is allowed. 2571 return Value == 0; 2572 } 2573 2574 /// Return true if it's significantly cheaper to shift a vector by a uniform 2575 /// scalar than by an amount which will vary across each lane. On x86 before 2576 /// AVX2 for example, there is a "psllw" instruction for the former case, but 2577 /// no simple instruction for a general "a << b" operation on vectors. 2578 /// This should also apply to lowering for vector funnel shifts (rotates). 2579 virtual bool isVectorShiftByScalarCheap(Type *Ty) const { 2580 return false; 2581 } 2582 2583 /// Given a shuffle vector SVI representing a vector splat, return a new 2584 /// scalar type of size equal to SVI's scalar type if the new type is more 2585 /// profitable. Returns nullptr otherwise. For example under MVE float splats 2586 /// are converted to integer to prevent the need to move from SPR to GPR 2587 /// registers. 2588 virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const { 2589 return nullptr; 2590 } 2591 2592 /// Given a set in interconnected phis of type 'From' that are loaded/stored 2593 /// or bitcast to type 'To', return true if the set should be converted to 2594 /// 'To'. 2595 virtual bool shouldConvertPhiType(Type *From, Type *To) const { 2596 return (From->isIntegerTy() || From->isFloatingPointTy()) && 2597 (To->isIntegerTy() || To->isFloatingPointTy()); 2598 } 2599 2600 /// Returns true if the opcode is a commutative binary operation. 2601 virtual bool isCommutativeBinOp(unsigned Opcode) const { 2602 // FIXME: This should get its info from the td file. 2603 switch (Opcode) { 2604 case ISD::ADD: 2605 case ISD::SMIN: 2606 case ISD::SMAX: 2607 case ISD::UMIN: 2608 case ISD::UMAX: 2609 case ISD::MUL: 2610 case ISD::MULHU: 2611 case ISD::MULHS: 2612 case ISD::SMUL_LOHI: 2613 case ISD::UMUL_LOHI: 2614 case ISD::FADD: 2615 case ISD::FMUL: 2616 case ISD::AND: 2617 case ISD::OR: 2618 case ISD::XOR: 2619 case ISD::SADDO: 2620 case ISD::UADDO: 2621 case ISD::ADDC: 2622 case ISD::ADDE: 2623 case ISD::SADDSAT: 2624 case ISD::UADDSAT: 2625 case ISD::FMINNUM: 2626 case ISD::FMAXNUM: 2627 case ISD::FMINNUM_IEEE: 2628 case ISD::FMAXNUM_IEEE: 2629 case ISD::FMINIMUM: 2630 case ISD::FMAXIMUM: 2631 case ISD::AVGFLOORS: 2632 case ISD::AVGFLOORU: 2633 case ISD::AVGCEILS: 2634 case ISD::AVGCEILU: 2635 return true; 2636 default: return false; 2637 } 2638 } 2639 2640 /// Return true if the node is a math/logic binary operator. 2641 virtual bool isBinOp(unsigned Opcode) const { 2642 // A commutative binop must be a binop. 2643 if (isCommutativeBinOp(Opcode)) 2644 return true; 2645 // These are non-commutative binops. 2646 switch (Opcode) { 2647 case ISD::SUB: 2648 case ISD::SHL: 2649 case ISD::SRL: 2650 case ISD::SRA: 2651 case ISD::ROTL: 2652 case ISD::ROTR: 2653 case ISD::SDIV: 2654 case ISD::UDIV: 2655 case ISD::SREM: 2656 case ISD::UREM: 2657 case ISD::SSUBSAT: 2658 case ISD::USUBSAT: 2659 case ISD::FSUB: 2660 case ISD::FDIV: 2661 case ISD::FREM: 2662 return true; 2663 default: 2664 return false; 2665 } 2666 } 2667 2668 /// Return true if it's free to truncate a value of type FromTy to type 2669 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16 2670 /// by referencing its sub-register AX. 2671 /// Targets must return false when FromTy <= ToTy. 2672 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const { 2673 return false; 2674 } 2675 2676 /// Return true if a truncation from FromTy to ToTy is permitted when deciding 2677 /// whether a call is in tail position. Typically this means that both results 2678 /// would be assigned to the same register or stack slot, but it could mean 2679 /// the target performs adequate checks of its own before proceeding with the 2680 /// tail call. Targets must return false when FromTy <= ToTy. 2681 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const { 2682 return false; 2683 } 2684 2685 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; } 2686 virtual bool isTruncateFree(LLT FromTy, LLT ToTy, const DataLayout &DL, 2687 LLVMContext &Ctx) const { 2688 return isTruncateFree(getApproximateEVTForLLT(FromTy, DL, Ctx), 2689 getApproximateEVTForLLT(ToTy, DL, Ctx)); 2690 } 2691 2692 virtual bool isProfitableToHoist(Instruction *I) const { return true; } 2693 2694 /// Return true if the extension represented by \p I is free. 2695 /// Unlikely the is[Z|FP]ExtFree family which is based on types, 2696 /// this method can use the context provided by \p I to decide 2697 /// whether or not \p I is free. 2698 /// This method extends the behavior of the is[Z|FP]ExtFree family. 2699 /// In other words, if is[Z|FP]Free returns true, then this method 2700 /// returns true as well. The converse is not true. 2701 /// The target can perform the adequate checks by overriding isExtFreeImpl. 2702 /// \pre \p I must be a sign, zero, or fp extension. 2703 bool isExtFree(const Instruction *I) const { 2704 switch (I->getOpcode()) { 2705 case Instruction::FPExt: 2706 if (isFPExtFree(EVT::getEVT(I->getType()), 2707 EVT::getEVT(I->getOperand(0)->getType()))) 2708 return true; 2709 break; 2710 case Instruction::ZExt: 2711 if (isZExtFree(I->getOperand(0)->getType(), I->getType())) 2712 return true; 2713 break; 2714 case Instruction::SExt: 2715 break; 2716 default: 2717 llvm_unreachable("Instruction is not an extension"); 2718 } 2719 return isExtFreeImpl(I); 2720 } 2721 2722 /// Return true if \p Load and \p Ext can form an ExtLoad. 2723 /// For example, in AArch64 2724 /// %L = load i8, i8* %ptr 2725 /// %E = zext i8 %L to i32 2726 /// can be lowered into one load instruction 2727 /// ldrb w0, [x0] 2728 bool isExtLoad(const LoadInst *Load, const Instruction *Ext, 2729 const DataLayout &DL) const { 2730 EVT VT = getValueType(DL, Ext->getType()); 2731 EVT LoadVT = getValueType(DL, Load->getType()); 2732 2733 // If the load has other users and the truncate is not free, the ext 2734 // probably isn't free. 2735 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) && 2736 !isTruncateFree(Ext->getType(), Load->getType())) 2737 return false; 2738 2739 // Check whether the target supports casts folded into loads. 2740 unsigned LType; 2741 if (isa<ZExtInst>(Ext)) 2742 LType = ISD::ZEXTLOAD; 2743 else { 2744 assert(isa<SExtInst>(Ext) && "Unexpected ext type!"); 2745 LType = ISD::SEXTLOAD; 2746 } 2747 2748 return isLoadExtLegal(LType, VT, LoadVT); 2749 } 2750 2751 /// Return true if any actual instruction that defines a value of type FromTy 2752 /// implicitly zero-extends the value to ToTy in the result register. 2753 /// 2754 /// The function should return true when it is likely that the truncate can 2755 /// be freely folded with an instruction defining a value of FromTy. If 2756 /// the defining instruction is unknown (because you're looking at a 2757 /// function argument, PHI, etc.) then the target may require an 2758 /// explicit truncate, which is not necessarily free, but this function 2759 /// does not deal with those cases. 2760 /// Targets must return false when FromTy >= ToTy. 2761 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const { 2762 return false; 2763 } 2764 2765 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; } 2766 virtual bool isZExtFree(LLT FromTy, LLT ToTy, const DataLayout &DL, 2767 LLVMContext &Ctx) const { 2768 return isZExtFree(getApproximateEVTForLLT(FromTy, DL, Ctx), 2769 getApproximateEVTForLLT(ToTy, DL, Ctx)); 2770 } 2771 2772 /// Return true if sign-extension from FromTy to ToTy is cheaper than 2773 /// zero-extension. 2774 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const { 2775 return false; 2776 } 2777 2778 /// Return true if this constant should be sign extended when promoting to 2779 /// a larger type. 2780 virtual bool signExtendConstant(const ConstantInt *C) const { return false; } 2781 2782 /// Return true if sinking I's operands to the same basic block as I is 2783 /// profitable, e.g. because the operands can be folded into a target 2784 /// instruction during instruction selection. After calling the function 2785 /// \p Ops contains the Uses to sink ordered by dominance (dominating users 2786 /// come first). 2787 virtual bool shouldSinkOperands(Instruction *I, 2788 SmallVectorImpl<Use *> &Ops) const { 2789 return false; 2790 } 2791 2792 /// Return true if the target supplies and combines to a paired load 2793 /// two loaded values of type LoadedType next to each other in memory. 2794 /// RequiredAlignment gives the minimal alignment constraints that must be met 2795 /// to be able to select this paired load. 2796 /// 2797 /// This information is *not* used to generate actual paired loads, but it is 2798 /// used to generate a sequence of loads that is easier to combine into a 2799 /// paired load. 2800 /// For instance, something like this: 2801 /// a = load i64* addr 2802 /// b = trunc i64 a to i32 2803 /// c = lshr i64 a, 32 2804 /// d = trunc i64 c to i32 2805 /// will be optimized into: 2806 /// b = load i32* addr1 2807 /// d = load i32* addr2 2808 /// Where addr1 = addr2 +/- sizeof(i32). 2809 /// 2810 /// In other words, unless the target performs a post-isel load combining, 2811 /// this information should not be provided because it will generate more 2812 /// loads. 2813 virtual bool hasPairedLoad(EVT /*LoadedType*/, 2814 Align & /*RequiredAlignment*/) const { 2815 return false; 2816 } 2817 2818 /// Return true if the target has a vector blend instruction. 2819 virtual bool hasVectorBlend() const { return false; } 2820 2821 /// Get the maximum supported factor for interleaved memory accesses. 2822 /// Default to be the minimum interleave factor: 2. 2823 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; } 2824 2825 /// Lower an interleaved load to target specific intrinsics. Return 2826 /// true on success. 2827 /// 2828 /// \p LI is the vector load instruction. 2829 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector. 2830 /// \p Indices is the corresponding indices for each shufflevector. 2831 /// \p Factor is the interleave factor. 2832 virtual bool lowerInterleavedLoad(LoadInst *LI, 2833 ArrayRef<ShuffleVectorInst *> Shuffles, 2834 ArrayRef<unsigned> Indices, 2835 unsigned Factor) const { 2836 return false; 2837 } 2838 2839 /// Lower an interleaved store to target specific intrinsics. Return 2840 /// true on success. 2841 /// 2842 /// \p SI is the vector store instruction. 2843 /// \p SVI is the shufflevector to RE-interleave the stored vector. 2844 /// \p Factor is the interleave factor. 2845 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, 2846 unsigned Factor) const { 2847 return false; 2848 } 2849 2850 /// Return true if zero-extending the specific node Val to type VT2 is free 2851 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or 2852 /// because it's folded such as X86 zero-extending loads). 2853 virtual bool isZExtFree(SDValue Val, EVT VT2) const { 2854 return isZExtFree(Val.getValueType(), VT2); 2855 } 2856 2857 /// Return true if an fpext operation is free (for instance, because 2858 /// single-precision floating-point numbers are implicitly extended to 2859 /// double-precision). 2860 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const { 2861 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() && 2862 "invalid fpext types"); 2863 return false; 2864 } 2865 2866 /// Return true if an fpext operation input to an \p Opcode operation is free 2867 /// (for instance, because half-precision floating-point numbers are 2868 /// implicitly extended to float-precision) for an FMA instruction. 2869 virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, 2870 LLT DestTy, LLT SrcTy) const { 2871 return false; 2872 } 2873 2874 /// Return true if an fpext operation input to an \p Opcode operation is free 2875 /// (for instance, because half-precision floating-point numbers are 2876 /// implicitly extended to float-precision) for an FMA instruction. 2877 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, 2878 EVT DestVT, EVT SrcVT) const { 2879 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 2880 "invalid fpext types"); 2881 return isFPExtFree(DestVT, SrcVT); 2882 } 2883 2884 /// Return true if folding a vector load into ExtVal (a sign, zero, or any 2885 /// extend node) is profitable. 2886 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; } 2887 2888 /// Return true if an fneg operation is free to the point where it is never 2889 /// worthwhile to replace it with a bitwise operation. 2890 virtual bool isFNegFree(EVT VT) const { 2891 assert(VT.isFloatingPoint()); 2892 return false; 2893 } 2894 2895 /// Return true if an fabs operation is free to the point where it is never 2896 /// worthwhile to replace it with a bitwise operation. 2897 virtual bool isFAbsFree(EVT VT) const { 2898 assert(VT.isFloatingPoint()); 2899 return false; 2900 } 2901 2902 /// Return true if an FMA operation is faster than a pair of fmul and fadd 2903 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 2904 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 2905 /// 2906 /// NOTE: This may be called before legalization on types for which FMAs are 2907 /// not legal, but should return true if those types will eventually legalize 2908 /// to types that support FMAs. After legalization, it will only be called on 2909 /// types that support FMAs (via Legal or Custom actions) 2910 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 2911 EVT) const { 2912 return false; 2913 } 2914 2915 /// Return true if an FMA operation is faster than a pair of fmul and fadd 2916 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method 2917 /// returns true, otherwise fmuladd is expanded to fmul + fadd. 2918 /// 2919 /// NOTE: This may be called before legalization on types for which FMAs are 2920 /// not legal, but should return true if those types will eventually legalize 2921 /// to types that support FMAs. After legalization, it will only be called on 2922 /// types that support FMAs (via Legal or Custom actions) 2923 virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 2924 LLT) const { 2925 return false; 2926 } 2927 2928 /// IR version 2929 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const { 2930 return false; 2931 } 2932 2933 /// Returns true if \p MI can be combined with another instruction to 2934 /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD, 2935 /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be 2936 /// distributed into an fadd/fsub. 2937 virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const { 2938 assert((MI.getOpcode() == TargetOpcode::G_FADD || 2939 MI.getOpcode() == TargetOpcode::G_FSUB || 2940 MI.getOpcode() == TargetOpcode::G_FMUL) && 2941 "unexpected node in FMAD forming combine"); 2942 switch (Ty.getScalarSizeInBits()) { 2943 case 16: 2944 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16); 2945 case 32: 2946 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32); 2947 case 64: 2948 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64); 2949 default: 2950 break; 2951 } 2952 2953 return false; 2954 } 2955 2956 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an 2957 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an 2958 /// fadd/fsub. 2959 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const { 2960 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB || 2961 N->getOpcode() == ISD::FMUL) && 2962 "unexpected node in FMAD forming combine"); 2963 return isOperationLegal(ISD::FMAD, N->getValueType(0)); 2964 } 2965 2966 // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather 2967 // than FMUL and ADD is delegated to the machine combiner. 2968 virtual bool generateFMAsInMachineCombiner(EVT VT, 2969 CodeGenOpt::Level OptLevel) const { 2970 return false; 2971 } 2972 2973 /// Return true if it's profitable to narrow operations of type VT1 to 2974 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from 2975 /// i32 to i16. 2976 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const { 2977 return false; 2978 } 2979 2980 /// Return true if pulling a binary operation into a select with an identity 2981 /// constant is profitable. This is the inverse of an IR transform. 2982 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X 2983 virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, 2984 EVT VT) const { 2985 return false; 2986 } 2987 2988 /// Return true if it is beneficial to convert a load of a constant to 2989 /// just the constant itself. 2990 /// On some targets it might be more efficient to use a combination of 2991 /// arithmetic instructions to materialize the constant instead of loading it 2992 /// from a constant pool. 2993 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, 2994 Type *Ty) const { 2995 return false; 2996 } 2997 2998 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type 2999 /// from this source type with this index. This is needed because 3000 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of 3001 /// the first element, and only the target knows which lowering is cheap. 3002 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 3003 unsigned Index) const { 3004 return false; 3005 } 3006 3007 /// Try to convert an extract element of a vector binary operation into an 3008 /// extract element followed by a scalar operation. 3009 virtual bool shouldScalarizeBinop(SDValue VecOp) const { 3010 return false; 3011 } 3012 3013 /// Return true if extraction of a scalar element from the given vector type 3014 /// at the given index is cheap. For example, if scalar operations occur on 3015 /// the same register file as vector operations, then an extract element may 3016 /// be a sub-register rename rather than an actual instruction. 3017 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const { 3018 return false; 3019 } 3020 3021 /// Try to convert math with an overflow comparison into the corresponding DAG 3022 /// node operation. Targets may want to override this independently of whether 3023 /// the operation is legal/custom for the given type because it may obscure 3024 /// matching of other patterns. 3025 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, 3026 bool MathUsed) const { 3027 // TODO: The default logic is inherited from code in CodeGenPrepare. 3028 // The opcode should not make a difference by default? 3029 if (Opcode != ISD::UADDO) 3030 return false; 3031 3032 // Allow the transform as long as we have an integer type that is not 3033 // obviously illegal and unsupported and if the math result is used 3034 // besides the overflow check. On some targets (e.g. SPARC), it is 3035 // not profitable to form on overflow op if the math result has no 3036 // concrete users. 3037 if (VT.isVector()) 3038 return false; 3039 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT)); 3040 } 3041 3042 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR 3043 // even if the vector itself has multiple uses. 3044 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const { 3045 return false; 3046 } 3047 3048 // Return true if CodeGenPrepare should consider splitting large offset of a 3049 // GEP to make the GEP fit into the addressing mode and can be sunk into the 3050 // same blocks of its users. 3051 virtual bool shouldConsiderGEPOffsetSplit() const { return false; } 3052 3053 /// Return true if creating a shift of the type by the given 3054 /// amount is not profitable. 3055 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const { 3056 return false; 3057 } 3058 3059 /// Does this target require the clearing of high-order bits in a register 3060 /// passed to the fp16 to fp conversion library function. 3061 virtual bool shouldKeepZExtForFP16Conv() const { return false; } 3062 3063 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT 3064 /// from min(max(fptoi)) saturation patterns. 3065 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const { 3066 return isOperationLegalOrCustom(Op, VT); 3067 } 3068 3069 //===--------------------------------------------------------------------===// 3070 // Runtime Library hooks 3071 // 3072 3073 /// Rename the default libcall routine name for the specified libcall. 3074 void setLibcallName(RTLIB::Libcall Call, const char *Name) { 3075 LibcallRoutineNames[Call] = Name; 3076 } 3077 void setLibcallName(ArrayRef<RTLIB::Libcall> Calls, const char *Name) { 3078 for (auto Call : Calls) 3079 setLibcallName(Call, Name); 3080 } 3081 3082 /// Get the libcall routine name for the specified libcall. 3083 const char *getLibcallName(RTLIB::Libcall Call) const { 3084 return LibcallRoutineNames[Call]; 3085 } 3086 3087 /// Override the default CondCode to be used to test the result of the 3088 /// comparison libcall against zero. 3089 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { 3090 CmpLibcallCCs[Call] = CC; 3091 } 3092 3093 /// Get the CondCode that's to be used to test the result of the comparison 3094 /// libcall against zero. 3095 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { 3096 return CmpLibcallCCs[Call]; 3097 } 3098 3099 /// Set the CallingConv that should be used for the specified libcall. 3100 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) { 3101 LibcallCallingConvs[Call] = CC; 3102 } 3103 3104 /// Get the CallingConv that should be used for the specified libcall. 3105 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const { 3106 return LibcallCallingConvs[Call]; 3107 } 3108 3109 /// Execute target specific actions to finalize target lowering. 3110 /// This is used to set extra flags in MachineFrameInformation and freezing 3111 /// the set of reserved registers. 3112 /// The default implementation just freezes the set of reserved registers. 3113 virtual void finalizeLowering(MachineFunction &MF) const; 3114 3115 //===----------------------------------------------------------------------===// 3116 // GlobalISel Hooks 3117 //===----------------------------------------------------------------------===// 3118 /// Check whether or not \p MI needs to be moved close to its uses. 3119 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const; 3120 3121 3122 private: 3123 const TargetMachine &TM; 3124 3125 /// Tells the code generator that the target has multiple (allocatable) 3126 /// condition registers that can be used to store the results of comparisons 3127 /// for use by selects and conditional branches. With multiple condition 3128 /// registers, the code generator will not aggressively sink comparisons into 3129 /// the blocks of their users. 3130 bool HasMultipleConditionRegisters; 3131 3132 /// Tells the code generator that the target has BitExtract instructions. 3133 /// The code generator will aggressively sink "shift"s into the blocks of 3134 /// their users if the users will generate "and" instructions which can be 3135 /// combined with "shift" to BitExtract instructions. 3136 bool HasExtractBitsInsn; 3137 3138 /// Tells the code generator to bypass slow divide or remainder 3139 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code 3140 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer 3141 /// div/rem when the operands are positive and less than 256. 3142 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths; 3143 3144 /// Tells the code generator that it shouldn't generate extra flow control 3145 /// instructions and should attempt to combine flow control instructions via 3146 /// predication. 3147 bool JumpIsExpensive; 3148 3149 /// Information about the contents of the high-bits in boolean values held in 3150 /// a type wider than i1. See getBooleanContents. 3151 BooleanContent BooleanContents; 3152 3153 /// Information about the contents of the high-bits in boolean values held in 3154 /// a type wider than i1. See getBooleanContents. 3155 BooleanContent BooleanFloatContents; 3156 3157 /// Information about the contents of the high-bits in boolean vector values 3158 /// when the element type is wider than i1. See getBooleanContents. 3159 BooleanContent BooleanVectorContents; 3160 3161 /// The target scheduling preference: shortest possible total cycles or lowest 3162 /// register usage. 3163 Sched::Preference SchedPreferenceInfo; 3164 3165 /// The minimum alignment that any argument on the stack needs to have. 3166 Align MinStackArgumentAlignment; 3167 3168 /// The minimum function alignment (used when optimizing for size, and to 3169 /// prevent explicitly provided alignment from leading to incorrect code). 3170 Align MinFunctionAlignment; 3171 3172 /// The preferred function alignment (used when alignment unspecified and 3173 /// optimizing for speed). 3174 Align PrefFunctionAlignment; 3175 3176 /// The preferred loop alignment (in log2 bot in bytes). 3177 Align PrefLoopAlignment; 3178 /// The maximum amount of bytes permitted to be emitted for alignment. 3179 unsigned MaxBytesForAlignment; 3180 3181 /// Size in bits of the maximum atomics size the backend supports. 3182 /// Accesses larger than this will be expanded by AtomicExpandPass. 3183 unsigned MaxAtomicSizeInBitsSupported; 3184 3185 /// Size in bits of the minimum cmpxchg or ll/sc operation the 3186 /// backend supports. 3187 unsigned MinCmpXchgSizeInBits; 3188 3189 /// This indicates if the target supports unaligned atomic operations. 3190 bool SupportsUnalignedAtomics; 3191 3192 /// If set to a physical register, this specifies the register that 3193 /// llvm.savestack/llvm.restorestack should save and restore. 3194 Register StackPointerRegisterToSaveRestore; 3195 3196 /// This indicates the default register class to use for each ValueType the 3197 /// target supports natively. 3198 const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE]; 3199 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE]; 3200 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE]; 3201 3202 /// This indicates the "representative" register class to use for each 3203 /// ValueType the target supports natively. This information is used by the 3204 /// scheduler to track register pressure. By default, the representative 3205 /// register class is the largest legal super-reg register class of the 3206 /// register class of the specified type. e.g. On x86, i8, i16, and i32's 3207 /// representative class would be GR32. 3208 const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE]; 3209 3210 /// This indicates the "cost" of the "representative" register class for each 3211 /// ValueType. The cost is used by the scheduler to approximate register 3212 /// pressure. 3213 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE]; 3214 3215 /// For any value types we are promoting or expanding, this contains the value 3216 /// type that we are changing to. For Expanded types, this contains one step 3217 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required 3218 /// (e.g. i64 -> i16). For types natively supported by the system, this holds 3219 /// the same type (e.g. i32 -> i32). 3220 MVT TransformToType[MVT::VALUETYPE_SIZE]; 3221 3222 /// For each operation and each value type, keep a LegalizeAction that 3223 /// indicates how instruction selection should deal with the operation. Most 3224 /// operations are Legal (aka, supported natively by the target), but 3225 /// operations that are not should be described. Note that operations on 3226 /// non-legal value types are not described here. 3227 LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END]; 3228 3229 /// For each load extension type and each value type, keep a LegalizeAction 3230 /// that indicates how instruction selection should deal with a load of a 3231 /// specific value type and extension type. Uses 4-bits to store the action 3232 /// for each of the 4 load ext types. 3233 uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; 3234 3235 /// For each value type pair keep a LegalizeAction that indicates whether a 3236 /// truncating store of a specific value type and truncating type is legal. 3237 LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE]; 3238 3239 /// For each indexed mode and each value type, keep a quad of LegalizeAction 3240 /// that indicates how instruction selection should deal with the load / 3241 /// store / maskedload / maskedstore. 3242 /// 3243 /// The first dimension is the value_type for the reference. The second 3244 /// dimension represents the various modes for load store. 3245 uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE]; 3246 3247 /// For each condition code (ISD::CondCode) keep a LegalizeAction that 3248 /// indicates how instruction selection should deal with the condition code. 3249 /// 3250 /// Because each CC action takes up 4 bits, we need to have the array size be 3251 /// large enough to fit all of the value types. This can be done by rounding 3252 /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8. 3253 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8]; 3254 3255 ValueTypeActionImpl ValueTypeActions; 3256 3257 private: 3258 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const; 3259 3260 /// Targets can specify ISD nodes that they would like PerformDAGCombine 3261 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this 3262 /// array. 3263 unsigned char 3264 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT]; 3265 3266 /// For operations that must be promoted to a specific type, this holds the 3267 /// destination type. This map should be sparse, so don't hold it as an 3268 /// array. 3269 /// 3270 /// Targets add entries to this map with AddPromotedToType(..), clients access 3271 /// this with getTypeToPromoteTo(..). 3272 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType> 3273 PromoteToType; 3274 3275 /// Stores the name each libcall. 3276 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1]; 3277 3278 /// The ISD::CondCode that should be used to test the result of each of the 3279 /// comparison libcall against zero. 3280 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; 3281 3282 /// Stores the CallingConv that should be used for each libcall. 3283 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL]; 3284 3285 /// Set default libcall names and calling conventions. 3286 void InitLibcalls(const Triple &TT); 3287 3288 /// The bits of IndexedModeActions used to store the legalisation actions 3289 /// We store the data as | ML | MS | L | S | each taking 4 bits. 3290 enum IndexedModeActionsBits { 3291 IMAB_Store = 0, 3292 IMAB_Load = 4, 3293 IMAB_MaskedStore = 8, 3294 IMAB_MaskedLoad = 12 3295 }; 3296 3297 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift, 3298 LegalizeAction Action) { 3299 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE && 3300 (unsigned)Action < 0xf && "Table isn't big enough!"); 3301 unsigned Ty = (unsigned)VT.SimpleTy; 3302 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift); 3303 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift; 3304 } 3305 3306 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT, 3307 unsigned Shift) const { 3308 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() && 3309 "Table isn't big enough!"); 3310 unsigned Ty = (unsigned)VT.SimpleTy; 3311 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf); 3312 } 3313 3314 protected: 3315 /// Return true if the extension represented by \p I is free. 3316 /// \pre \p I is a sign, zero, or fp extension and 3317 /// is[Z|FP]ExtFree of the related types is not true. 3318 virtual bool isExtFreeImpl(const Instruction *I) const { return false; } 3319 3320 /// Depth that GatherAllAliases should should continue looking for chain 3321 /// dependencies when trying to find a more preferable chain. As an 3322 /// approximation, this should be more than the number of consecutive stores 3323 /// expected to be merged. 3324 unsigned GatherAllAliasesMaxDepth; 3325 3326 /// \brief Specify maximum number of store instructions per memset call. 3327 /// 3328 /// When lowering \@llvm.memset this field specifies the maximum number of 3329 /// store operations that may be substituted for the call to memset. Targets 3330 /// must set this value based on the cost threshold for that target. Targets 3331 /// should assume that the memset will be done using as many of the largest 3332 /// store operations first, followed by smaller ones, if necessary, per 3333 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine 3334 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte 3335 /// store. This only applies to setting a constant array of a constant size. 3336 unsigned MaxStoresPerMemset; 3337 /// Likewise for functions with the OptSize attribute. 3338 unsigned MaxStoresPerMemsetOptSize; 3339 3340 /// \brief Specify maximum number of store instructions per memcpy call. 3341 /// 3342 /// When lowering \@llvm.memcpy this field specifies the maximum number of 3343 /// store operations that may be substituted for a call to memcpy. Targets 3344 /// must set this value based on the cost threshold for that target. Targets 3345 /// should assume that the memcpy will be done using as many of the largest 3346 /// store operations first, followed by smaller ones, if necessary, per 3347 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine 3348 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store 3349 /// and one 1-byte store. This only applies to copying a constant array of 3350 /// constant size. 3351 unsigned MaxStoresPerMemcpy; 3352 /// Likewise for functions with the OptSize attribute. 3353 unsigned MaxStoresPerMemcpyOptSize; 3354 /// \brief Specify max number of store instructions to glue in inlined memcpy. 3355 /// 3356 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number 3357 /// of store instructions to keep together. This helps in pairing and 3358 // vectorization later on. 3359 unsigned MaxGluedStoresPerMemcpy = 0; 3360 3361 /// \brief Specify maximum number of load instructions per memcmp call. 3362 /// 3363 /// When lowering \@llvm.memcmp this field specifies the maximum number of 3364 /// pairs of load operations that may be substituted for a call to memcmp. 3365 /// Targets must set this value based on the cost threshold for that target. 3366 /// Targets should assume that the memcmp will be done using as many of the 3367 /// largest load operations first, followed by smaller ones, if necessary, per 3368 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine 3369 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load 3370 /// and one 1-byte load. This only applies to copying a constant array of 3371 /// constant size. 3372 unsigned MaxLoadsPerMemcmp; 3373 /// Likewise for functions with the OptSize attribute. 3374 unsigned MaxLoadsPerMemcmpOptSize; 3375 3376 /// \brief Specify maximum number of store instructions per memmove call. 3377 /// 3378 /// When lowering \@llvm.memmove this field specifies the maximum number of 3379 /// store instructions that may be substituted for a call to memmove. Targets 3380 /// must set this value based on the cost threshold for that target. Targets 3381 /// should assume that the memmove will be done using as many of the largest 3382 /// store operations first, followed by smaller ones, if necessary, per 3383 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine 3384 /// with 8-bit alignment would result in nine 1-byte stores. This only 3385 /// applies to copying a constant array of constant size. 3386 unsigned MaxStoresPerMemmove; 3387 /// Likewise for functions with the OptSize attribute. 3388 unsigned MaxStoresPerMemmoveOptSize; 3389 3390 /// Tells the code generator that select is more expensive than a branch if 3391 /// the branch is usually predicted right. 3392 bool PredictableSelectIsExpensive; 3393 3394 /// \see enableExtLdPromotion. 3395 bool EnableExtLdPromotion; 3396 3397 /// Return true if the value types that can be represented by the specified 3398 /// register class are all legal. 3399 bool isLegalRC(const TargetRegisterInfo &TRI, 3400 const TargetRegisterClass &RC) const; 3401 3402 /// Replace/modify any TargetFrameIndex operands with a targte-dependent 3403 /// sequence of memory operands that is recognized by PrologEpilogInserter. 3404 MachineBasicBlock *emitPatchPoint(MachineInstr &MI, 3405 MachineBasicBlock *MBB) const; 3406 3407 bool IsStrictFPEnabled; 3408 }; 3409 3410 /// This class defines information used to lower LLVM code to legal SelectionDAG 3411 /// operators that the target instruction selector can accept natively. 3412 /// 3413 /// This class also defines callbacks that targets must implement to lower 3414 /// target-specific constructs to SelectionDAG operators. 3415 class TargetLowering : public TargetLoweringBase { 3416 public: 3417 struct DAGCombinerInfo; 3418 struct MakeLibCallOptions; 3419 3420 TargetLowering(const TargetLowering &) = delete; 3421 TargetLowering &operator=(const TargetLowering &) = delete; 3422 3423 explicit TargetLowering(const TargetMachine &TM); 3424 3425 bool isPositionIndependent() const; 3426 3427 virtual bool isSDNodeSourceOfDivergence(const SDNode *N, 3428 FunctionLoweringInfo *FLI, 3429 LegacyDivergenceAnalysis *DA) const { 3430 return false; 3431 } 3432 3433 // Lets target to control the following reassociation of operands: (op (op x, 3434 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By 3435 // default consider profitable any case where N0 has single use. This 3436 // behavior reflects the condition replaced by this target hook call in the 3437 // DAGCombiner. Any particular target can implement its own heuristic to 3438 // restrict common combiner. 3439 virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, 3440 SDValue N1) const { 3441 return N0.hasOneUse(); 3442 } 3443 3444 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const { 3445 return false; 3446 } 3447 3448 /// Returns true by value, base pointer and offset pointer and addressing mode 3449 /// by reference if the node's address can be legally represented as 3450 /// pre-indexed load / store address. 3451 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/, 3452 SDValue &/*Offset*/, 3453 ISD::MemIndexedMode &/*AM*/, 3454 SelectionDAG &/*DAG*/) const { 3455 return false; 3456 } 3457 3458 /// Returns true by value, base pointer and offset pointer and addressing mode 3459 /// by reference if this node can be combined with a load / store to form a 3460 /// post-indexed load / store. 3461 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/, 3462 SDValue &/*Base*/, 3463 SDValue &/*Offset*/, 3464 ISD::MemIndexedMode &/*AM*/, 3465 SelectionDAG &/*DAG*/) const { 3466 return false; 3467 } 3468 3469 /// Returns true if the specified base+offset is a legal indexed addressing 3470 /// mode for this target. \p MI is the load or store instruction that is being 3471 /// considered for transformation. 3472 virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset, 3473 bool IsPre, MachineRegisterInfo &MRI) const { 3474 return false; 3475 } 3476 3477 /// Return the entry encoding for a jump table in the current function. The 3478 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 3479 virtual unsigned getJumpTableEncoding() const; 3480 3481 virtual const MCExpr * 3482 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/, 3483 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/, 3484 MCContext &/*Ctx*/) const { 3485 llvm_unreachable("Need to implement this hook if target has custom JTIs"); 3486 } 3487 3488 /// Returns relocation base for the given PIC jumptable. 3489 virtual SDValue getPICJumpTableRelocBase(SDValue Table, 3490 SelectionDAG &DAG) const; 3491 3492 /// This returns the relocation base for the given PIC jumptable, the same as 3493 /// getPICJumpTableRelocBase, but as an MCExpr. 3494 virtual const MCExpr * 3495 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 3496 unsigned JTI, MCContext &Ctx) const; 3497 3498 /// Return true if folding a constant offset with the given GlobalAddress is 3499 /// legal. It is frequently not legal in PIC relocation models. 3500 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; 3501 3502 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 3503 SDValue &Chain) const; 3504 3505 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, 3506 SDValue &NewRHS, ISD::CondCode &CCCode, 3507 const SDLoc &DL, const SDValue OldLHS, 3508 const SDValue OldRHS) const; 3509 3510 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, 3511 SDValue &NewRHS, ISD::CondCode &CCCode, 3512 const SDLoc &DL, const SDValue OldLHS, 3513 const SDValue OldRHS, SDValue &Chain, 3514 bool IsSignaling = false) const; 3515 3516 /// Returns a pair of (return value, chain). 3517 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC. 3518 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, 3519 EVT RetVT, ArrayRef<SDValue> Ops, 3520 MakeLibCallOptions CallOptions, 3521 const SDLoc &dl, 3522 SDValue Chain = SDValue()) const; 3523 3524 /// Check whether parameters to a call that are passed in callee saved 3525 /// registers are the same as from the calling function. This needs to be 3526 /// checked for tail call eligibility. 3527 bool parametersInCSRMatch(const MachineRegisterInfo &MRI, 3528 const uint32_t *CallerPreservedMask, 3529 const SmallVectorImpl<CCValAssign> &ArgLocs, 3530 const SmallVectorImpl<SDValue> &OutVals) const; 3531 3532 //===--------------------------------------------------------------------===// 3533 // TargetLowering Optimization Methods 3534 // 3535 3536 /// A convenience struct that encapsulates a DAG, and two SDValues for 3537 /// returning information from TargetLowering to its clients that want to 3538 /// combine. 3539 struct TargetLoweringOpt { 3540 SelectionDAG &DAG; 3541 bool LegalTys; 3542 bool LegalOps; 3543 SDValue Old; 3544 SDValue New; 3545 3546 explicit TargetLoweringOpt(SelectionDAG &InDAG, 3547 bool LT, bool LO) : 3548 DAG(InDAG), LegalTys(LT), LegalOps(LO) {} 3549 3550 bool LegalTypes() const { return LegalTys; } 3551 bool LegalOperations() const { return LegalOps; } 3552 3553 bool CombineTo(SDValue O, SDValue N) { 3554 Old = O; 3555 New = N; 3556 return true; 3557 } 3558 }; 3559 3560 /// Determines the optimal series of memory ops to replace the memset / memcpy. 3561 /// Return true if the number of memory ops is below the threshold (Limit). 3562 /// Note that this is always the case when Limit is ~0. 3563 /// It returns the types of the sequence of memory ops to perform 3564 /// memset / memcpy by reference. 3565 virtual bool 3566 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit, 3567 const MemOp &Op, unsigned DstAS, unsigned SrcAS, 3568 const AttributeList &FuncAttributes) const; 3569 3570 /// Check to see if the specified operand of the specified instruction is a 3571 /// constant integer. If so, check to see if there are any bits set in the 3572 /// constant that are not demanded. If so, shrink the constant and return 3573 /// true. 3574 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 3575 const APInt &DemandedElts, 3576 TargetLoweringOpt &TLO) const; 3577 3578 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements. 3579 bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, 3580 TargetLoweringOpt &TLO) const; 3581 3582 // Target hook to do target-specific const optimization, which is called by 3583 // ShrinkDemandedConstant. This function should return true if the target 3584 // doesn't want ShrinkDemandedConstant to further optimize the constant. 3585 virtual bool targetShrinkDemandedConstant(SDValue Op, 3586 const APInt &DemandedBits, 3587 const APInt &DemandedElts, 3588 TargetLoweringOpt &TLO) const { 3589 return false; 3590 } 3591 3592 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This 3593 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 3594 /// generalized for targets with other types of implicit widening casts. 3595 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, 3596 TargetLoweringOpt &TLO) const; 3597 3598 /// Look at Op. At this point, we know that only the DemandedBits bits of the 3599 /// result of Op are ever used downstream. If we can use this information to 3600 /// simplify Op, create a new simplified DAG node and return true, returning 3601 /// the original and new nodes in Old and New. Otherwise, analyze the 3602 /// expression and return a mask of KnownOne and KnownZero bits for the 3603 /// expression (used to simplify the caller). The KnownZero/One bits may only 3604 /// be accurate for those bits in the Demanded masks. 3605 /// \p AssumeSingleUse When this parameter is true, this function will 3606 /// attempt to simplify \p Op even if there are multiple uses. 3607 /// Callers are responsible for correctly updating the DAG based on the 3608 /// results of this function, because simply replacing replacing TLO.Old 3609 /// with TLO.New will be incorrect when this parameter is true and TLO.Old 3610 /// has multiple uses. 3611 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3612 const APInt &DemandedElts, KnownBits &Known, 3613 TargetLoweringOpt &TLO, unsigned Depth = 0, 3614 bool AssumeSingleUse = false) const; 3615 3616 /// Helper wrapper around SimplifyDemandedBits, demanding all elements. 3617 /// Adds Op back to the worklist upon success. 3618 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3619 KnownBits &Known, TargetLoweringOpt &TLO, 3620 unsigned Depth = 0, 3621 bool AssumeSingleUse = false) const; 3622 3623 /// Helper wrapper around SimplifyDemandedBits. 3624 /// Adds Op back to the worklist upon success. 3625 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3626 DAGCombinerInfo &DCI) const; 3627 3628 /// Helper wrapper around SimplifyDemandedBits. 3629 /// Adds Op back to the worklist upon success. 3630 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 3631 const APInt &DemandedElts, 3632 DAGCombinerInfo &DCI) const; 3633 3634 /// More limited version of SimplifyDemandedBits that can be used to "look 3635 /// through" ops that don't contribute to the DemandedBits/DemandedElts - 3636 /// bitwise ops etc. 3637 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, 3638 const APInt &DemandedElts, 3639 SelectionDAG &DAG, 3640 unsigned Depth = 0) const; 3641 3642 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all 3643 /// elements. 3644 SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, 3645 SelectionDAG &DAG, 3646 unsigned Depth = 0) const; 3647 3648 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all 3649 /// bits from only some vector elements. 3650 SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, 3651 const APInt &DemandedElts, 3652 SelectionDAG &DAG, 3653 unsigned Depth = 0) const; 3654 3655 /// Look at Vector Op. At this point, we know that only the DemandedElts 3656 /// elements of the result of Op are ever used downstream. If we can use 3657 /// this information to simplify Op, create a new simplified DAG node and 3658 /// return true, storing the original and new nodes in TLO. 3659 /// Otherwise, analyze the expression and return a mask of KnownUndef and 3660 /// KnownZero elements for the expression (used to simplify the caller). 3661 /// The KnownUndef/Zero elements may only be accurate for those bits 3662 /// in the DemandedMask. 3663 /// \p AssumeSingleUse When this parameter is true, this function will 3664 /// attempt to simplify \p Op even if there are multiple uses. 3665 /// Callers are responsible for correctly updating the DAG based on the 3666 /// results of this function, because simply replacing replacing TLO.Old 3667 /// with TLO.New will be incorrect when this parameter is true and TLO.Old 3668 /// has multiple uses. 3669 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, 3670 APInt &KnownUndef, APInt &KnownZero, 3671 TargetLoweringOpt &TLO, unsigned Depth = 0, 3672 bool AssumeSingleUse = false) const; 3673 3674 /// Helper wrapper around SimplifyDemandedVectorElts. 3675 /// Adds Op back to the worklist upon success. 3676 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts, 3677 DAGCombinerInfo &DCI) const; 3678 3679 /// Return true if the target supports simplifying demanded vector elements by 3680 /// converting them to undefs. 3681 virtual bool 3682 shouldSimplifyDemandedVectorElts(SDValue Op, 3683 const TargetLoweringOpt &TLO) const { 3684 return true; 3685 } 3686 3687 /// Determine which of the bits specified in Mask are known to be either zero 3688 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts 3689 /// argument allows us to only collect the known bits that are shared by the 3690 /// requested vector elements. 3691 virtual void computeKnownBitsForTargetNode(const SDValue Op, 3692 KnownBits &Known, 3693 const APInt &DemandedElts, 3694 const SelectionDAG &DAG, 3695 unsigned Depth = 0) const; 3696 3697 /// Determine which of the bits specified in Mask are known to be either zero 3698 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts 3699 /// argument allows us to only collect the known bits that are shared by the 3700 /// requested vector elements. This is for GISel. 3701 virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, 3702 Register R, KnownBits &Known, 3703 const APInt &DemandedElts, 3704 const MachineRegisterInfo &MRI, 3705 unsigned Depth = 0) const; 3706 3707 /// Determine the known alignment for the pointer value \p R. This is can 3708 /// typically be inferred from the number of low known 0 bits. However, for a 3709 /// pointer with a non-integral address space, the alignment value may be 3710 /// independent from the known low bits. 3711 virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, 3712 Register R, 3713 const MachineRegisterInfo &MRI, 3714 unsigned Depth = 0) const; 3715 3716 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0. 3717 /// Default implementation computes low bits based on alignment 3718 /// information. This should preserve known bits passed into it. 3719 virtual void computeKnownBitsForFrameIndex(int FIOp, 3720 KnownBits &Known, 3721 const MachineFunction &MF) const; 3722 3723 /// This method can be implemented by targets that want to expose additional 3724 /// information about sign bits to the DAG Combiner. The DemandedElts 3725 /// argument allows us to only collect the minimum sign bits that are shared 3726 /// by the requested vector elements. 3727 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, 3728 const APInt &DemandedElts, 3729 const SelectionDAG &DAG, 3730 unsigned Depth = 0) const; 3731 3732 /// This method can be implemented by targets that want to expose additional 3733 /// information about sign bits to GlobalISel combiners. The DemandedElts 3734 /// argument allows us to only collect the minimum sign bits that are shared 3735 /// by the requested vector elements. 3736 virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, 3737 Register R, 3738 const APInt &DemandedElts, 3739 const MachineRegisterInfo &MRI, 3740 unsigned Depth = 0) const; 3741 3742 /// Attempt to simplify any target nodes based on the demanded vector 3743 /// elements, returning true on success. Otherwise, analyze the expression and 3744 /// return a mask of KnownUndef and KnownZero elements for the expression 3745 /// (used to simplify the caller). The KnownUndef/Zero elements may only be 3746 /// accurate for those bits in the DemandedMask. 3747 virtual bool SimplifyDemandedVectorEltsForTargetNode( 3748 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, 3749 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const; 3750 3751 /// Attempt to simplify any target nodes based on the demanded bits/elts, 3752 /// returning true on success. Otherwise, analyze the 3753 /// expression and return a mask of KnownOne and KnownZero bits for the 3754 /// expression (used to simplify the caller). The KnownZero/One bits may only 3755 /// be accurate for those bits in the Demanded masks. 3756 virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, 3757 const APInt &DemandedBits, 3758 const APInt &DemandedElts, 3759 KnownBits &Known, 3760 TargetLoweringOpt &TLO, 3761 unsigned Depth = 0) const; 3762 3763 /// More limited version of SimplifyDemandedBits that can be used to "look 3764 /// through" ops that don't contribute to the DemandedBits/DemandedElts - 3765 /// bitwise ops etc. 3766 virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode( 3767 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 3768 SelectionDAG &DAG, unsigned Depth) const; 3769 3770 /// Return true if this function can prove that \p Op is never poison 3771 /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts 3772 /// argument limits the check to the requested vector elements. 3773 virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode( 3774 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 3775 bool PoisonOnly, unsigned Depth) const; 3776 3777 /// Tries to build a legal vector shuffle using the provided parameters 3778 /// or equivalent variations. The Mask argument maybe be modified as the 3779 /// function tries different variations. 3780 /// Returns an empty SDValue if the operation fails. 3781 SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 3782 SDValue N1, MutableArrayRef<int> Mask, 3783 SelectionDAG &DAG) const; 3784 3785 /// This method returns the constant pool value that will be loaded by LD. 3786 /// NOTE: You must check for implicit extensions of the constant by LD. 3787 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const; 3788 3789 /// If \p SNaN is false, \returns true if \p Op is known to never be any 3790 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling 3791 /// NaN. 3792 virtual bool isKnownNeverNaNForTargetNode(SDValue Op, 3793 const SelectionDAG &DAG, 3794 bool SNaN = false, 3795 unsigned Depth = 0) const; 3796 3797 /// Return true if vector \p Op has the same value across all \p DemandedElts, 3798 /// indicating any elements which may be undef in the output \p UndefElts. 3799 virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, 3800 APInt &UndefElts, 3801 unsigned Depth = 0) const; 3802 3803 /// Returns true if the given Opc is considered a canonical constant for the 3804 /// target, which should not be transformed back into a BUILD_VECTOR. 3805 virtual bool isTargetCanonicalConstantNode(SDValue Op) const { 3806 return Op.getOpcode() == ISD::SPLAT_VECTOR; 3807 } 3808 3809 struct DAGCombinerInfo { 3810 void *DC; // The DAG Combiner object. 3811 CombineLevel Level; 3812 bool CalledByLegalizer; 3813 3814 public: 3815 SelectionDAG &DAG; 3816 3817 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc) 3818 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {} 3819 3820 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; } 3821 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; } 3822 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; } 3823 CombineLevel getDAGCombineLevel() { return Level; } 3824 bool isCalledByLegalizer() const { return CalledByLegalizer; } 3825 3826 void AddToWorklist(SDNode *N); 3827 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true); 3828 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true); 3829 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true); 3830 3831 bool recursivelyDeleteUnusedNodes(SDNode *N); 3832 3833 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO); 3834 }; 3835 3836 /// Return if the N is a constant or constant vector equal to the true value 3837 /// from getBooleanContents(). 3838 bool isConstTrueVal(SDValue N) const; 3839 3840 /// Return if the N is a constant or constant vector equal to the false value 3841 /// from getBooleanContents(). 3842 bool isConstFalseVal(SDValue N) const; 3843 3844 /// Return if \p N is a True value when extended to \p VT. 3845 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const; 3846 3847 /// Try to simplify a setcc built with the specified operands and cc. If it is 3848 /// unable to simplify it, return a null SDValue. 3849 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 3850 bool foldBooleans, DAGCombinerInfo &DCI, 3851 const SDLoc &dl) const; 3852 3853 // For targets which wrap address, unwrap for analysis. 3854 virtual SDValue unwrapAddress(SDValue N) const { return N; } 3855 3856 /// Returns true (and the GlobalValue and the offset) if the node is a 3857 /// GlobalAddress + offset. 3858 virtual bool 3859 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; 3860 3861 /// This method will be invoked for all target nodes and for any 3862 /// target-independent nodes that the target has registered with invoke it 3863 /// for. 3864 /// 3865 /// The semantics are as follows: 3866 /// Return Value: 3867 /// SDValue.Val == 0 - No change was made 3868 /// SDValue.Val == N - N was replaced, is dead, and is already handled. 3869 /// otherwise - N should be replaced by the returned Operand. 3870 /// 3871 /// In addition, methods provided by DAGCombinerInfo may be used to perform 3872 /// more complex transformations. 3873 /// 3874 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; 3875 3876 /// Return true if it is profitable to move this shift by a constant amount 3877 /// through its operand, adjusting any immediate operands as necessary to 3878 /// preserve semantics. This transformation may not be desirable if it 3879 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield 3880 /// extraction in AArch64). By default, it returns true. 3881 /// 3882 /// @param N the shift node 3883 /// @param Level the current DAGCombine legalization level. 3884 virtual bool isDesirableToCommuteWithShift(const SDNode *N, 3885 CombineLevel Level) const { 3886 return true; 3887 } 3888 3889 /// Return true if it is profitable to combine an XOR of a logical shift 3890 /// to create a logical shift of NOT. This transformation may not be desirable 3891 /// if it disrupts a particularly auspicious target-specific tree (e.g. 3892 /// BIC on ARM/AArch64). By default, it returns true. 3893 virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const { 3894 return true; 3895 } 3896 3897 /// Return true if the target has native support for the specified value type 3898 /// and it is 'desirable' to use the type for the given node type. e.g. On x86 3899 /// i16 is legal, but undesirable since i16 instruction encodings are longer 3900 /// and some i16 instructions are slow. 3901 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const { 3902 // By default, assume all legal types are desirable. 3903 return isTypeLegal(VT); 3904 } 3905 3906 /// Return true if it is profitable for dag combiner to transform a floating 3907 /// point op of specified opcode to a equivalent op of an integer 3908 /// type. e.g. f32 load -> i32 load can be profitable on ARM. 3909 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/, 3910 EVT /*VT*/) const { 3911 return false; 3912 } 3913 3914 /// This method query the target whether it is beneficial for dag combiner to 3915 /// promote the specified node. If true, it should return the desired 3916 /// promotion type by reference. 3917 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const { 3918 return false; 3919 } 3920 3921 /// Return true if the target supports swifterror attribute. It optimizes 3922 /// loads and stores to reading and writing a specific register. 3923 virtual bool supportSwiftError() const { 3924 return false; 3925 } 3926 3927 /// Return true if the target supports that a subset of CSRs for the given 3928 /// machine function is handled explicitly via copies. 3929 virtual bool supportSplitCSR(MachineFunction *MF) const { 3930 return false; 3931 } 3932 3933 /// Perform necessary initialization to handle a subset of CSRs explicitly 3934 /// via copies. This function is called at the beginning of instruction 3935 /// selection. 3936 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const { 3937 llvm_unreachable("Not Implemented"); 3938 } 3939 3940 /// Insert explicit copies in entry and exit blocks. We copy a subset of 3941 /// CSRs to virtual registers in the entry block, and copy them back to 3942 /// physical registers in the exit blocks. This function is called at the end 3943 /// of instruction selection. 3944 virtual void insertCopiesSplitCSR( 3945 MachineBasicBlock *Entry, 3946 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 3947 llvm_unreachable("Not Implemented"); 3948 } 3949 3950 /// Return the newly negated expression if the cost is not expensive and 3951 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to 3952 /// do the negation. 3953 virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, 3954 bool LegalOps, bool OptForSize, 3955 NegatibleCost &Cost, 3956 unsigned Depth = 0) const; 3957 3958 /// This is the helper function to return the newly negated expression only 3959 /// when the cost is cheaper. 3960 SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, 3961 bool LegalOps, bool OptForSize, 3962 unsigned Depth = 0) const { 3963 NegatibleCost Cost = NegatibleCost::Expensive; 3964 SDValue Neg = 3965 getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); 3966 if (Neg && Cost == NegatibleCost::Cheaper) 3967 return Neg; 3968 // Remove the new created node to avoid the side effect to the DAG. 3969 if (Neg && Neg->use_empty()) 3970 DAG.RemoveDeadNode(Neg.getNode()); 3971 return SDValue(); 3972 } 3973 3974 /// This is the helper function to return the newly negated expression if 3975 /// the cost is not expensive. 3976 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, 3977 bool OptForSize, unsigned Depth = 0) const { 3978 NegatibleCost Cost = NegatibleCost::Expensive; 3979 return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth); 3980 } 3981 3982 //===--------------------------------------------------------------------===// 3983 // Lowering methods - These methods must be implemented by targets so that 3984 // the SelectionDAGBuilder code knows how to lower these. 3985 // 3986 3987 /// Target-specific splitting of values into parts that fit a register 3988 /// storing a legal type 3989 virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, 3990 SDValue Val, SDValue *Parts, 3991 unsigned NumParts, MVT PartVT, 3992 Optional<CallingConv::ID> CC) const { 3993 return false; 3994 } 3995 3996 /// Target-specific combining of register parts into its original value 3997 virtual SDValue 3998 joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, 3999 const SDValue *Parts, unsigned NumParts, 4000 MVT PartVT, EVT ValueVT, 4001 Optional<CallingConv::ID> CC) const { 4002 return SDValue(); 4003 } 4004 4005 /// This hook must be implemented to lower the incoming (formal) arguments, 4006 /// described by the Ins array, into the specified DAG. The implementation 4007 /// should fill in the InVals array with legal-type argument values, and 4008 /// return the resulting token chain value. 4009 virtual SDValue LowerFormalArguments( 4010 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/, 4011 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/, 4012 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const { 4013 llvm_unreachable("Not Implemented"); 4014 } 4015 4016 /// This structure contains all information that is necessary for lowering 4017 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder 4018 /// needs to lower a call, and targets will see this struct in their LowerCall 4019 /// implementation. 4020 struct CallLoweringInfo { 4021 SDValue Chain; 4022 Type *RetTy = nullptr; 4023 bool RetSExt : 1; 4024 bool RetZExt : 1; 4025 bool IsVarArg : 1; 4026 bool IsInReg : 1; 4027 bool DoesNotReturn : 1; 4028 bool IsReturnValueUsed : 1; 4029 bool IsConvergent : 1; 4030 bool IsPatchPoint : 1; 4031 bool IsPreallocated : 1; 4032 bool NoMerge : 1; 4033 4034 // IsTailCall should be modified by implementations of 4035 // TargetLowering::LowerCall that perform tail call conversions. 4036 bool IsTailCall = false; 4037 4038 // Is Call lowering done post SelectionDAG type legalization. 4039 bool IsPostTypeLegalization = false; 4040 4041 unsigned NumFixedArgs = -1; 4042 CallingConv::ID CallConv = CallingConv::C; 4043 SDValue Callee; 4044 ArgListTy Args; 4045 SelectionDAG &DAG; 4046 SDLoc DL; 4047 const CallBase *CB = nullptr; 4048 SmallVector<ISD::OutputArg, 32> Outs; 4049 SmallVector<SDValue, 32> OutVals; 4050 SmallVector<ISD::InputArg, 32> Ins; 4051 SmallVector<SDValue, 4> InVals; 4052 4053 CallLoweringInfo(SelectionDAG &DAG) 4054 : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false), 4055 DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false), 4056 IsPatchPoint(false), IsPreallocated(false), NoMerge(false), 4057 DAG(DAG) {} 4058 4059 CallLoweringInfo &setDebugLoc(const SDLoc &dl) { 4060 DL = dl; 4061 return *this; 4062 } 4063 4064 CallLoweringInfo &setChain(SDValue InChain) { 4065 Chain = InChain; 4066 return *this; 4067 } 4068 4069 // setCallee with target/module-specific attributes 4070 CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType, 4071 SDValue Target, ArgListTy &&ArgsList) { 4072 RetTy = ResultType; 4073 Callee = Target; 4074 CallConv = CC; 4075 NumFixedArgs = ArgsList.size(); 4076 Args = std::move(ArgsList); 4077 4078 DAG.getTargetLoweringInfo().markLibCallAttributes( 4079 &(DAG.getMachineFunction()), CC, Args); 4080 return *this; 4081 } 4082 4083 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType, 4084 SDValue Target, ArgListTy &&ArgsList) { 4085 RetTy = ResultType; 4086 Callee = Target; 4087 CallConv = CC; 4088 NumFixedArgs = ArgsList.size(); 4089 Args = std::move(ArgsList); 4090 return *this; 4091 } 4092 4093 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy, 4094 SDValue Target, ArgListTy &&ArgsList, 4095 const CallBase &Call) { 4096 RetTy = ResultType; 4097 4098 IsInReg = Call.hasRetAttr(Attribute::InReg); 4099 DoesNotReturn = 4100 Call.doesNotReturn() || 4101 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode())); 4102 IsVarArg = FTy->isVarArg(); 4103 IsReturnValueUsed = !Call.use_empty(); 4104 RetSExt = Call.hasRetAttr(Attribute::SExt); 4105 RetZExt = Call.hasRetAttr(Attribute::ZExt); 4106 NoMerge = Call.hasFnAttr(Attribute::NoMerge); 4107 4108 Callee = Target; 4109 4110 CallConv = Call.getCallingConv(); 4111 NumFixedArgs = FTy->getNumParams(); 4112 Args = std::move(ArgsList); 4113 4114 CB = &Call; 4115 4116 return *this; 4117 } 4118 4119 CallLoweringInfo &setInRegister(bool Value = true) { 4120 IsInReg = Value; 4121 return *this; 4122 } 4123 4124 CallLoweringInfo &setNoReturn(bool Value = true) { 4125 DoesNotReturn = Value; 4126 return *this; 4127 } 4128 4129 CallLoweringInfo &setVarArg(bool Value = true) { 4130 IsVarArg = Value; 4131 return *this; 4132 } 4133 4134 CallLoweringInfo &setTailCall(bool Value = true) { 4135 IsTailCall = Value; 4136 return *this; 4137 } 4138 4139 CallLoweringInfo &setDiscardResult(bool Value = true) { 4140 IsReturnValueUsed = !Value; 4141 return *this; 4142 } 4143 4144 CallLoweringInfo &setConvergent(bool Value = true) { 4145 IsConvergent = Value; 4146 return *this; 4147 } 4148 4149 CallLoweringInfo &setSExtResult(bool Value = true) { 4150 RetSExt = Value; 4151 return *this; 4152 } 4153 4154 CallLoweringInfo &setZExtResult(bool Value = true) { 4155 RetZExt = Value; 4156 return *this; 4157 } 4158 4159 CallLoweringInfo &setIsPatchPoint(bool Value = true) { 4160 IsPatchPoint = Value; 4161 return *this; 4162 } 4163 4164 CallLoweringInfo &setIsPreallocated(bool Value = true) { 4165 IsPreallocated = Value; 4166 return *this; 4167 } 4168 4169 CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) { 4170 IsPostTypeLegalization = Value; 4171 return *this; 4172 } 4173 4174 ArgListTy &getArgs() { 4175 return Args; 4176 } 4177 }; 4178 4179 /// This structure is used to pass arguments to makeLibCall function. 4180 struct MakeLibCallOptions { 4181 // By passing type list before soften to makeLibCall, the target hook 4182 // shouldExtendTypeInLibCall can get the original type before soften. 4183 ArrayRef<EVT> OpsVTBeforeSoften; 4184 EVT RetVTBeforeSoften; 4185 bool IsSExt : 1; 4186 bool DoesNotReturn : 1; 4187 bool IsReturnValueUsed : 1; 4188 bool IsPostTypeLegalization : 1; 4189 bool IsSoften : 1; 4190 4191 MakeLibCallOptions() 4192 : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true), 4193 IsPostTypeLegalization(false), IsSoften(false) {} 4194 4195 MakeLibCallOptions &setSExt(bool Value = true) { 4196 IsSExt = Value; 4197 return *this; 4198 } 4199 4200 MakeLibCallOptions &setNoReturn(bool Value = true) { 4201 DoesNotReturn = Value; 4202 return *this; 4203 } 4204 4205 MakeLibCallOptions &setDiscardResult(bool Value = true) { 4206 IsReturnValueUsed = !Value; 4207 return *this; 4208 } 4209 4210 MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) { 4211 IsPostTypeLegalization = Value; 4212 return *this; 4213 } 4214 4215 MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT, 4216 bool Value = true) { 4217 OpsVTBeforeSoften = OpsVT; 4218 RetVTBeforeSoften = RetVT; 4219 IsSoften = Value; 4220 return *this; 4221 } 4222 }; 4223 4224 /// This function lowers an abstract call to a function into an actual call. 4225 /// This returns a pair of operands. The first element is the return value 4226 /// for the function (if RetTy is not VoidTy). The second element is the 4227 /// outgoing token chain. It calls LowerCall to do the actual lowering. 4228 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const; 4229 4230 /// This hook must be implemented to lower calls into the specified 4231 /// DAG. The outgoing arguments to the call are described by the Outs array, 4232 /// and the values to be returned by the call are described by the Ins 4233 /// array. The implementation should fill in the InVals array with legal-type 4234 /// return values from the call, and return the resulting token chain value. 4235 virtual SDValue 4236 LowerCall(CallLoweringInfo &/*CLI*/, 4237 SmallVectorImpl<SDValue> &/*InVals*/) const { 4238 llvm_unreachable("Not Implemented"); 4239 } 4240 4241 /// Target-specific cleanup for formal ByVal parameters. 4242 virtual void HandleByVal(CCState *, unsigned &, Align) const {} 4243 4244 /// This hook should be implemented to check whether the return values 4245 /// described by the Outs array can fit into the return registers. If false 4246 /// is returned, an sret-demotion is performed. 4247 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/, 4248 MachineFunction &/*MF*/, bool /*isVarArg*/, 4249 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/, 4250 LLVMContext &/*Context*/) const 4251 { 4252 // Return true by default to get preexisting behavior. 4253 return true; 4254 } 4255 4256 /// This hook must be implemented to lower outgoing return values, described 4257 /// by the Outs array, into the specified DAG. The implementation should 4258 /// return the resulting token chain value. 4259 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/, 4260 bool /*isVarArg*/, 4261 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/, 4262 const SmallVectorImpl<SDValue> & /*OutVals*/, 4263 const SDLoc & /*dl*/, 4264 SelectionDAG & /*DAG*/) const { 4265 llvm_unreachable("Not Implemented"); 4266 } 4267 4268 /// Return true if result of the specified node is used by a return node 4269 /// only. It also compute and return the input chain for the tail call. 4270 /// 4271 /// This is used to determine whether it is possible to codegen a libcall as 4272 /// tail call at legalization time. 4273 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const { 4274 return false; 4275 } 4276 4277 /// Return true if the target may be able emit the call instruction as a tail 4278 /// call. This is used by optimization passes to determine if it's profitable 4279 /// to duplicate return instructions to enable tailcall optimization. 4280 virtual bool mayBeEmittedAsTailCall(const CallInst *) const { 4281 return false; 4282 } 4283 4284 /// Return the builtin name for the __builtin___clear_cache intrinsic 4285 /// Default is to invoke the clear cache library call 4286 virtual const char * getClearCacheBuiltinName() const { 4287 return "__clear_cache"; 4288 } 4289 4290 /// Return the register ID of the name passed in. Used by named register 4291 /// global variables extension. There is no target-independent behaviour 4292 /// so the default action is to bail. 4293 virtual Register getRegisterByName(const char* RegName, LLT Ty, 4294 const MachineFunction &MF) const { 4295 report_fatal_error("Named registers not implemented for this target"); 4296 } 4297 4298 /// Return the type that should be used to zero or sign extend a 4299 /// zeroext/signext integer return value. FIXME: Some C calling conventions 4300 /// require the return type to be promoted, but this is not true all the time, 4301 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling 4302 /// conventions. The frontend should handle this and include all of the 4303 /// necessary information. 4304 virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, 4305 ISD::NodeType /*ExtendKind*/) const { 4306 EVT MinVT = getRegisterType(Context, MVT::i32); 4307 return VT.bitsLT(MinVT) ? MinVT : VT; 4308 } 4309 4310 /// For some targets, an LLVM struct type must be broken down into multiple 4311 /// simple types, but the calling convention specifies that the entire struct 4312 /// must be passed in a block of consecutive registers. 4313 virtual bool 4314 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, 4315 bool isVarArg, 4316 const DataLayout &DL) const { 4317 return false; 4318 } 4319 4320 /// For most targets, an LLVM type must be broken down into multiple 4321 /// smaller types. Usually the halves are ordered according to the endianness 4322 /// but for some platform that would break. So this method will default to 4323 /// matching the endianness but can be overridden. 4324 virtual bool 4325 shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const { 4326 return DL.isLittleEndian(); 4327 } 4328 4329 /// Returns a 0 terminated array of registers that can be safely used as 4330 /// scratch registers. 4331 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const { 4332 return nullptr; 4333 } 4334 4335 /// This callback is used to prepare for a volatile or atomic load. 4336 /// It takes a chain node as input and returns the chain for the load itself. 4337 /// 4338 /// Having a callback like this is necessary for targets like SystemZ, 4339 /// which allows a CPU to reuse the result of a previous load indefinitely, 4340 /// even if a cache-coherent store is performed by another CPU. The default 4341 /// implementation does nothing. 4342 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, 4343 SelectionDAG &DAG) const { 4344 return Chain; 4345 } 4346 4347 /// Should SelectionDAG lower an atomic store of the given kind as a normal 4348 /// StoreSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to 4349 /// eventually migrate all targets to the using StoreSDNodes, but porting is 4350 /// being done target at a time. 4351 virtual bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const { 4352 assert(SI.isAtomic() && "violated precondition"); 4353 return false; 4354 } 4355 4356 /// Should SelectionDAG lower an atomic load of the given kind as a normal 4357 /// LoadSDNode (as opposed to an AtomicSDNode)? NOTE: The intention is to 4358 /// eventually migrate all targets to the using LoadSDNodes, but porting is 4359 /// being done target at a time. 4360 virtual bool lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const { 4361 assert(LI.isAtomic() && "violated precondition"); 4362 return false; 4363 } 4364 4365 4366 /// This callback is invoked by the type legalizer to legalize nodes with an 4367 /// illegal operand type but legal result types. It replaces the 4368 /// LowerOperation callback in the type Legalizer. The reason we can not do 4369 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to 4370 /// use this callback. 4371 /// 4372 /// TODO: Consider merging with ReplaceNodeResults. 4373 /// 4374 /// The target places new result values for the node in Results (their number 4375 /// and types must exactly match those of the original return values of 4376 /// the node), or leaves Results empty, which indicates that the node is not 4377 /// to be custom lowered after all. 4378 /// The default implementation calls LowerOperation. 4379 virtual void LowerOperationWrapper(SDNode *N, 4380 SmallVectorImpl<SDValue> &Results, 4381 SelectionDAG &DAG) const; 4382 4383 /// This callback is invoked for operations that are unsupported by the 4384 /// target, which are registered to use 'custom' lowering, and whose defined 4385 /// values are all legal. If the target has no operations that require custom 4386 /// lowering, it need not implement this. The default implementation of this 4387 /// aborts. 4388 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; 4389 4390 /// This callback is invoked when a node result type is illegal for the 4391 /// target, and the operation was registered to use 'custom' lowering for that 4392 /// result type. The target places new result values for the node in Results 4393 /// (their number and types must exactly match those of the original return 4394 /// values of the node), or leaves Results empty, which indicates that the 4395 /// node is not to be custom lowered after all. 4396 /// 4397 /// If the target has no operations that require custom lowering, it need not 4398 /// implement this. The default implementation aborts. 4399 virtual void ReplaceNodeResults(SDNode * /*N*/, 4400 SmallVectorImpl<SDValue> &/*Results*/, 4401 SelectionDAG &/*DAG*/) const { 4402 llvm_unreachable("ReplaceNodeResults not implemented for this target!"); 4403 } 4404 4405 /// This method returns the name of a target specific DAG node. 4406 virtual const char *getTargetNodeName(unsigned Opcode) const; 4407 4408 /// This method returns a target specific FastISel object, or null if the 4409 /// target does not support "fast" ISel. 4410 virtual FastISel *createFastISel(FunctionLoweringInfo &, 4411 const TargetLibraryInfo *) const { 4412 return nullptr; 4413 } 4414 4415 bool verifyReturnAddressArgumentIsConstant(SDValue Op, 4416 SelectionDAG &DAG) const; 4417 4418 //===--------------------------------------------------------------------===// 4419 // Inline Asm Support hooks 4420 // 4421 4422 /// This hook allows the target to expand an inline asm call to be explicit 4423 /// llvm code if it wants to. This is useful for turning simple inline asms 4424 /// into LLVM intrinsics, which gives the compiler more information about the 4425 /// behavior of the code. 4426 virtual bool ExpandInlineAsm(CallInst *) const { 4427 return false; 4428 } 4429 4430 enum ConstraintType { 4431 C_Register, // Constraint represents specific register(s). 4432 C_RegisterClass, // Constraint represents any of register(s) in class. 4433 C_Memory, // Memory constraint. 4434 C_Address, // Address constraint. 4435 C_Immediate, // Requires an immediate. 4436 C_Other, // Something else. 4437 C_Unknown // Unsupported constraint. 4438 }; 4439 4440 enum ConstraintWeight { 4441 // Generic weights. 4442 CW_Invalid = -1, // No match. 4443 CW_Okay = 0, // Acceptable. 4444 CW_Good = 1, // Good weight. 4445 CW_Better = 2, // Better weight. 4446 CW_Best = 3, // Best weight. 4447 4448 // Well-known weights. 4449 CW_SpecificReg = CW_Okay, // Specific register operands. 4450 CW_Register = CW_Good, // Register operands. 4451 CW_Memory = CW_Better, // Memory operands. 4452 CW_Constant = CW_Best, // Constant operand. 4453 CW_Default = CW_Okay // Default or don't know type. 4454 }; 4455 4456 /// This contains information for each constraint that we are lowering. 4457 struct AsmOperandInfo : public InlineAsm::ConstraintInfo { 4458 /// This contains the actual string for the code, like "m". TargetLowering 4459 /// picks the 'best' code from ConstraintInfo::Codes that most closely 4460 /// matches the operand. 4461 std::string ConstraintCode; 4462 4463 /// Information about the constraint code, e.g. Register, RegisterClass, 4464 /// Memory, Other, Unknown. 4465 TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown; 4466 4467 /// If this is the result output operand or a clobber, this is null, 4468 /// otherwise it is the incoming operand to the CallInst. This gets 4469 /// modified as the asm is processed. 4470 Value *CallOperandVal = nullptr; 4471 4472 /// The ValueType for the operand value. 4473 MVT ConstraintVT = MVT::Other; 4474 4475 /// Copy constructor for copying from a ConstraintInfo. 4476 AsmOperandInfo(InlineAsm::ConstraintInfo Info) 4477 : InlineAsm::ConstraintInfo(std::move(Info)) {} 4478 4479 /// Return true of this is an input operand that is a matching constraint 4480 /// like "4". 4481 bool isMatchingInputConstraint() const; 4482 4483 /// If this is an input matching constraint, this method returns the output 4484 /// operand it matches. 4485 unsigned getMatchedOperand() const; 4486 }; 4487 4488 using AsmOperandInfoVector = std::vector<AsmOperandInfo>; 4489 4490 /// Split up the constraint string from the inline assembly value into the 4491 /// specific constraints and their prefixes, and also tie in the associated 4492 /// operand values. If this returns an empty vector, and if the constraint 4493 /// string itself isn't empty, there was an error parsing. 4494 virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, 4495 const TargetRegisterInfo *TRI, 4496 const CallBase &Call) const; 4497 4498 /// Examine constraint type and operand type and determine a weight value. 4499 /// The operand object must already have been set up with the operand type. 4500 virtual ConstraintWeight getMultipleConstraintMatchWeight( 4501 AsmOperandInfo &info, int maIndex) const; 4502 4503 /// Examine constraint string and operand type and determine a weight value. 4504 /// The operand object must already have been set up with the operand type. 4505 virtual ConstraintWeight getSingleConstraintMatchWeight( 4506 AsmOperandInfo &info, const char *constraint) const; 4507 4508 /// Determines the constraint code and constraint type to use for the specific 4509 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 4510 /// If the actual operand being passed in is available, it can be passed in as 4511 /// Op, otherwise an empty SDValue can be passed. 4512 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, 4513 SDValue Op, 4514 SelectionDAG *DAG = nullptr) const; 4515 4516 /// Given a constraint, return the type of constraint it is for this target. 4517 virtual ConstraintType getConstraintType(StringRef Constraint) const; 4518 4519 /// Given a physical register constraint (e.g. {edx}), return the register 4520 /// number and the register class for the register. 4521 /// 4522 /// Given a register class constraint, like 'r', if this corresponds directly 4523 /// to an LLVM register class, return a register of 0 and the register class 4524 /// pointer. 4525 /// 4526 /// This should only be used for C_Register constraints. On error, this 4527 /// returns a register number of 0 and a null register class pointer. 4528 virtual std::pair<unsigned, const TargetRegisterClass *> 4529 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 4530 StringRef Constraint, MVT VT) const; 4531 4532 virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const { 4533 if (ConstraintCode == "m") 4534 return InlineAsm::Constraint_m; 4535 if (ConstraintCode == "o") 4536 return InlineAsm::Constraint_o; 4537 if (ConstraintCode == "X") 4538 return InlineAsm::Constraint_X; 4539 if (ConstraintCode == "p") 4540 return InlineAsm::Constraint_p; 4541 return InlineAsm::Constraint_Unknown; 4542 } 4543 4544 /// Try to replace an X constraint, which matches anything, with another that 4545 /// has more specific requirements based on the type of the corresponding 4546 /// operand. This returns null if there is no replacement to make. 4547 virtual const char *LowerXConstraint(EVT ConstraintVT) const; 4548 4549 /// Lower the specified operand into the Ops vector. If it is invalid, don't 4550 /// add anything to Ops. 4551 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, 4552 std::vector<SDValue> &Ops, 4553 SelectionDAG &DAG) const; 4554 4555 // Lower custom output constraints. If invalid, return SDValue(). 4556 virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag, 4557 const SDLoc &DL, 4558 const AsmOperandInfo &OpInfo, 4559 SelectionDAG &DAG) const; 4560 4561 //===--------------------------------------------------------------------===// 4562 // Div utility functions 4563 // 4564 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 4565 SmallVectorImpl<SDNode *> &Created) const; 4566 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, 4567 SmallVectorImpl<SDNode *> &Created) const; 4568 4569 /// Targets may override this function to provide custom SDIV lowering for 4570 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM 4571 /// assumes SDIV is expensive and replaces it with a series of other integer 4572 /// operations. 4573 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, 4574 SelectionDAG &DAG, 4575 SmallVectorImpl<SDNode *> &Created) const; 4576 4577 /// Targets may override this function to provide custom SREM lowering for 4578 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM 4579 /// assumes SREM is expensive and replaces it with a series of other integer 4580 /// operations. 4581 virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, 4582 SelectionDAG &DAG, 4583 SmallVectorImpl<SDNode *> &Created) const; 4584 4585 /// Indicate whether this target prefers to combine FDIVs with the same 4586 /// divisor. If the transform should never be done, return zero. If the 4587 /// transform should be done, return the minimum number of divisor uses 4588 /// that must exist. 4589 virtual unsigned combineRepeatedFPDivisors() const { 4590 return 0; 4591 } 4592 4593 /// Hooks for building estimates in place of slower divisions and square 4594 /// roots. 4595 4596 /// Return either a square root or its reciprocal estimate value for the input 4597 /// operand. 4598 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or 4599 /// 'Enabled' as set by a potential default override attribute. 4600 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson 4601 /// refinement iterations required to generate a sufficient (though not 4602 /// necessarily IEEE-754 compliant) estimate is returned in that parameter. 4603 /// The boolean UseOneConstNR output is used to select a Newton-Raphson 4604 /// algorithm implementation that uses either one or two constants. 4605 /// The boolean Reciprocal is used to select whether the estimate is for the 4606 /// square root of the input operand or the reciprocal of its square root. 4607 /// A target may choose to implement its own refinement within this function. 4608 /// If that's true, then return '0' as the number of RefinementSteps to avoid 4609 /// any further refinement of the estimate. 4610 /// An empty SDValue return means no estimate sequence can be created. 4611 virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 4612 int Enabled, int &RefinementSteps, 4613 bool &UseOneConstNR, bool Reciprocal) const { 4614 return SDValue(); 4615 } 4616 4617 /// Try to convert the fminnum/fmaxnum to a compare/select sequence. This is 4618 /// required for correctness since InstCombine might have canonicalized a 4619 /// fcmp+select sequence to a FMINNUM/FMAXNUM intrinsic. If we were to fall 4620 /// through to the default expansion/soften to libcall, we might introduce a 4621 /// link-time dependency on libm into a file that originally did not have one. 4622 SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const; 4623 4624 /// Return a reciprocal estimate value for the input operand. 4625 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or 4626 /// 'Enabled' as set by a potential default override attribute. 4627 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson 4628 /// refinement iterations required to generate a sufficient (though not 4629 /// necessarily IEEE-754 compliant) estimate is returned in that parameter. 4630 /// A target may choose to implement its own refinement within this function. 4631 /// If that's true, then return '0' as the number of RefinementSteps to avoid 4632 /// any further refinement of the estimate. 4633 /// An empty SDValue return means no estimate sequence can be created. 4634 virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 4635 int Enabled, int &RefinementSteps) const { 4636 return SDValue(); 4637 } 4638 4639 /// Return a target-dependent comparison result if the input operand is 4640 /// suitable for use with a square root estimate calculation. For example, the 4641 /// comparison may check if the operand is NAN, INF, zero, normal, etc. The 4642 /// result should be used as the condition operand for a select or branch. 4643 virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, 4644 const DenormalMode &Mode) const; 4645 4646 /// Return a target-dependent result if the input operand is not suitable for 4647 /// use with a square root estimate calculation. 4648 virtual SDValue getSqrtResultForDenormInput(SDValue Operand, 4649 SelectionDAG &DAG) const { 4650 return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType()); 4651 } 4652 4653 //===--------------------------------------------------------------------===// 4654 // Legalization utility functions 4655 // 4656 4657 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, 4658 /// respectively, each computing an n/2-bit part of the result. 4659 /// \param Result A vector that will be filled with the parts of the result 4660 /// in little-endian order. 4661 /// \param LL Low bits of the LHS of the MUL. You can use this parameter 4662 /// if you want to control how low bits are extracted from the LHS. 4663 /// \param LH High bits of the LHS of the MUL. See LL for meaning. 4664 /// \param RL Low bits of the RHS of the MUL. See LL for meaning 4665 /// \param RH High bits of the RHS of the MUL. See LL for meaning. 4666 /// \returns true if the node has been expanded, false if it has not 4667 bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, 4668 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT, 4669 SelectionDAG &DAG, MulExpansionKind Kind, 4670 SDValue LL = SDValue(), SDValue LH = SDValue(), 4671 SDValue RL = SDValue(), SDValue RH = SDValue()) const; 4672 4673 /// Expand a MUL into two nodes. One that computes the high bits of 4674 /// the result and one that computes the low bits. 4675 /// \param HiLoVT The value type to use for the Lo and Hi nodes. 4676 /// \param LL Low bits of the LHS of the MUL. You can use this parameter 4677 /// if you want to control how low bits are extracted from the LHS. 4678 /// \param LH High bits of the LHS of the MUL. See LL for meaning. 4679 /// \param RL Low bits of the RHS of the MUL. See LL for meaning 4680 /// \param RH High bits of the RHS of the MUL. See LL for meaning. 4681 /// \returns true if the node has been expanded. false if it has not 4682 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 4683 SelectionDAG &DAG, MulExpansionKind Kind, 4684 SDValue LL = SDValue(), SDValue LH = SDValue(), 4685 SDValue RL = SDValue(), SDValue RH = SDValue()) const; 4686 4687 /// Expand funnel shift. 4688 /// \param N Node to expand 4689 /// \returns The expansion if successful, SDValue() otherwise 4690 SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const; 4691 4692 /// Expand rotations. 4693 /// \param N Node to expand 4694 /// \param AllowVectorOps expand vector rotate, this should only be performed 4695 /// if the legalization is happening outside of LegalizeVectorOps 4696 /// \returns The expansion if successful, SDValue() otherwise 4697 SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const; 4698 4699 /// Expand shift-by-parts. 4700 /// \param N Node to expand 4701 /// \param Lo lower-output-part after conversion 4702 /// \param Hi upper-output-part after conversion 4703 void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, 4704 SelectionDAG &DAG) const; 4705 4706 /// Expand float(f32) to SINT(i64) conversion 4707 /// \param N Node to expand 4708 /// \param Result output after conversion 4709 /// \returns True, if the expansion was successful, false otherwise 4710 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const; 4711 4712 /// Expand float to UINT conversion 4713 /// \param N Node to expand 4714 /// \param Result output after conversion 4715 /// \param Chain output chain after conversion 4716 /// \returns True, if the expansion was successful, false otherwise 4717 bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, 4718 SelectionDAG &DAG) const; 4719 4720 /// Expand UINT(i64) to double(f64) conversion 4721 /// \param N Node to expand 4722 /// \param Result output after conversion 4723 /// \param Chain output chain after conversion 4724 /// \returns True, if the expansion was successful, false otherwise 4725 bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, 4726 SelectionDAG &DAG) const; 4727 4728 /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs. 4729 SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const; 4730 4731 /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max. 4732 /// \param N Node to expand 4733 /// \returns The expansion result 4734 SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const; 4735 4736 /// Expand check for floating point class. 4737 /// \param ResultVT The type of intrinsic call result. 4738 /// \param Op The tested value. 4739 /// \param Test The test to perform. 4740 /// \param Flags The optimization flags. 4741 /// \returns The expansion result or SDValue() if it fails. 4742 SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, unsigned Test, 4743 SDNodeFlags Flags, const SDLoc &DL, 4744 SelectionDAG &DAG) const; 4745 4746 /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes, 4747 /// vector nodes can only succeed if all operations are legal/custom. 4748 /// \param N Node to expand 4749 /// \returns The expansion result or SDValue() if it fails. 4750 SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const; 4751 4752 /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes, 4753 /// vector nodes can only succeed if all operations are legal/custom. 4754 /// \param N Node to expand 4755 /// \returns The expansion result or SDValue() if it fails. 4756 SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const; 4757 4758 /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes, 4759 /// vector nodes can only succeed if all operations are legal/custom. 4760 /// \param N Node to expand 4761 /// \returns The expansion result or SDValue() if it fails. 4762 SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const; 4763 4764 /// Expand ABS nodes. Expands vector/scalar ABS nodes, 4765 /// vector nodes can only succeed if all operations are legal/custom. 4766 /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size)) 4767 /// \param N Node to expand 4768 /// \param IsNegative indicate negated abs 4769 /// \returns The expansion result or SDValue() if it fails. 4770 SDValue expandABS(SDNode *N, SelectionDAG &DAG, 4771 bool IsNegative = false) const; 4772 4773 /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64 4774 /// scalar types. Returns SDValue() if expand fails. 4775 /// \param N Node to expand 4776 /// \returns The expansion result or SDValue() if it fails. 4777 SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const; 4778 4779 /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes. 4780 /// Returns SDValue() if expand fails. 4781 /// \param N Node to expand 4782 /// \returns The expansion result or SDValue() if it fails. 4783 SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const; 4784 4785 /// Turn load of vector type into a load of the individual elements. 4786 /// \param LD load to expand 4787 /// \returns BUILD_VECTOR and TokenFactor nodes. 4788 std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD, 4789 SelectionDAG &DAG) const; 4790 4791 // Turn a store of a vector type into stores of the individual elements. 4792 /// \param ST Store with a vector value type 4793 /// \returns TokenFactor of the individual store chains. 4794 SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const; 4795 4796 /// Expands an unaligned load to 2 half-size loads for an integer, and 4797 /// possibly more for vectors. 4798 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD, 4799 SelectionDAG &DAG) const; 4800 4801 /// Expands an unaligned store to 2 half-size stores for integer values, and 4802 /// possibly more for vectors. 4803 SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const; 4804 4805 /// Increments memory address \p Addr according to the type of the value 4806 /// \p DataVT that should be stored. If the data is stored in compressed 4807 /// form, the memory address should be incremented according to the number of 4808 /// the stored elements. This number is equal to the number of '1's bits 4809 /// in the \p Mask. 4810 /// \p DataVT is a vector type. \p Mask is a vector value. 4811 /// \p DataVT and \p Mask have the same number of vector elements. 4812 SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, 4813 EVT DataVT, SelectionDAG &DAG, 4814 bool IsCompressedMemory) const; 4815 4816 /// Get a pointer to vector element \p Idx located in memory for a vector of 4817 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of 4818 /// bounds the returned pointer is unspecified, but will be within the vector 4819 /// bounds. 4820 SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, 4821 SDValue Index) const; 4822 4823 /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located 4824 /// in memory for a vector of type \p VecVT starting at a base address of 4825 /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the 4826 /// returned pointer is unspecified, but the value returned will be such that 4827 /// the entire subvector would be within the vector bounds. 4828 SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, 4829 EVT SubVecVT, SDValue Index) const; 4830 4831 /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This 4832 /// method accepts integers as its arguments. 4833 SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const; 4834 4835 /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This 4836 /// method accepts integers as its arguments. 4837 SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const; 4838 4839 /// Method for building the DAG expansion of ISD::[US]SHLSAT. This 4840 /// method accepts integers as its arguments. 4841 SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const; 4842 4843 /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This 4844 /// method accepts integers as its arguments. 4845 SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const; 4846 4847 /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This 4848 /// method accepts integers as its arguments. 4849 /// Note: This method may fail if the division could not be performed 4850 /// within the type. Clients must retry with a wider type if this happens. 4851 SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 4852 SDValue LHS, SDValue RHS, 4853 unsigned Scale, SelectionDAG &DAG) const; 4854 4855 /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion 4856 /// always suceeds and populates the Result and Overflow arguments. 4857 void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, 4858 SelectionDAG &DAG) const; 4859 4860 /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion 4861 /// always suceeds and populates the Result and Overflow arguments. 4862 void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, 4863 SelectionDAG &DAG) const; 4864 4865 /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether 4866 /// expansion was successful and populates the Result and Overflow arguments. 4867 bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, 4868 SelectionDAG &DAG) const; 4869 4870 /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified, 4871 /// only the first Count elements of the vector are used. 4872 SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const; 4873 4874 /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation. 4875 SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const; 4876 4877 /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal. 4878 /// Returns true if the expansion was successful. 4879 bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const; 4880 4881 /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This 4882 /// method accepts vectors as its arguments. 4883 SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const; 4884 4885 /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC 4886 /// on the current target. A VP_SETCC will additionally be given a Mask 4887 /// and/or EVL not equal to SDValue(). 4888 /// 4889 /// If the SETCC has been legalized using AND / OR, then the legalized node 4890 /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert 4891 /// will be set to false. This will also hold if the VP_SETCC has been 4892 /// legalized using VP_AND / VP_OR. 4893 /// 4894 /// If the SETCC / VP_SETCC has been legalized by using 4895 /// getSetCCSwappedOperands(), then the values of LHS and RHS will be 4896 /// swapped, CC will be set to the new condition, and NeedInvert will be set 4897 /// to false. 4898 /// 4899 /// If the SETCC / VP_SETCC has been legalized using the inverse condcode, 4900 /// then LHS and RHS will be unchanged, CC will set to the inverted condcode, 4901 /// and NeedInvert will be set to true. The caller must invert the result of 4902 /// the SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to 4903 /// swap the effect of a true/false result. 4904 /// 4905 /// \returns true if the SETCC / VP_SETCC has been legalized, false if it 4906 /// hasn't. 4907 bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, 4908 SDValue &RHS, SDValue &CC, SDValue Mask, 4909 SDValue EVL, bool &NeedInvert, const SDLoc &dl, 4910 SDValue &Chain, bool IsSignaling = false) const; 4911 4912 //===--------------------------------------------------------------------===// 4913 // Instruction Emitting Hooks 4914 // 4915 4916 /// This method should be implemented by targets that mark instructions with 4917 /// the 'usesCustomInserter' flag. These instructions are special in various 4918 /// ways, which require special support to insert. The specified MachineInstr 4919 /// is created but not inserted into any basic blocks, and this method is 4920 /// called to expand it into a sequence of instructions, potentially also 4921 /// creating new basic blocks and control flow. 4922 /// As long as the returned basic block is different (i.e., we created a new 4923 /// one), the custom inserter is free to modify the rest of \p MBB. 4924 virtual MachineBasicBlock * 4925 EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const; 4926 4927 /// This method should be implemented by targets that mark instructions with 4928 /// the 'hasPostISelHook' flag. These instructions must be adjusted after 4929 /// instruction selection by target hooks. e.g. To fill in optional defs for 4930 /// ARM 's' setting instructions. 4931 virtual void AdjustInstrPostInstrSelection(MachineInstr &MI, 4932 SDNode *Node) const; 4933 4934 /// If this function returns true, SelectionDAGBuilder emits a 4935 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector. 4936 virtual bool useLoadStackGuardNode() const { 4937 return false; 4938 } 4939 4940 virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, 4941 const SDLoc &DL) const { 4942 llvm_unreachable("not implemented for this target"); 4943 } 4944 4945 /// Lower TLS global address SDNode for target independent emulated TLS model. 4946 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 4947 SelectionDAG &DAG) const; 4948 4949 /// Expands target specific indirect branch for the case of JumpTable 4950 /// expanasion. 4951 virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr, 4952 SelectionDAG &DAG) const { 4953 return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr); 4954 } 4955 4956 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits))) 4957 // If we're comparing for equality to zero and isCtlzFast is true, expose the 4958 // fact that this can be implemented as a ctlz/srl pair, so that the dag 4959 // combiner can fold the new nodes. 4960 SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const; 4961 4962 private: 4963 SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 4964 const SDLoc &DL, DAGCombinerInfo &DCI) const; 4965 SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 4966 const SDLoc &DL, DAGCombinerInfo &DCI) const; 4967 4968 SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0, 4969 SDValue N1, ISD::CondCode Cond, 4970 DAGCombinerInfo &DCI, 4971 const SDLoc &DL) const; 4972 4973 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 4974 SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift( 4975 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 4976 DAGCombinerInfo &DCI, const SDLoc &DL) const; 4977 4978 SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 4979 SDValue CompTargetNode, ISD::CondCode Cond, 4980 DAGCombinerInfo &DCI, const SDLoc &DL, 4981 SmallVectorImpl<SDNode *> &Created) const; 4982 SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, 4983 ISD::CondCode Cond, DAGCombinerInfo &DCI, 4984 const SDLoc &DL) const; 4985 4986 SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 4987 SDValue CompTargetNode, ISD::CondCode Cond, 4988 DAGCombinerInfo &DCI, const SDLoc &DL, 4989 SmallVectorImpl<SDNode *> &Created) const; 4990 SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode, 4991 ISD::CondCode Cond, DAGCombinerInfo &DCI, 4992 const SDLoc &DL) const; 4993 }; 4994 4995 /// Given an LLVM IR type and return type attributes, compute the return value 4996 /// EVTs and flags, and optionally also the offsets, if the return value is 4997 /// being lowered to memory. 4998 void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, 4999 SmallVectorImpl<ISD::OutputArg> &Outs, 5000 const TargetLowering &TLI, const DataLayout &DL); 5001 5002 } // end namespace llvm 5003 5004 #endif // LLVM_CODEGEN_TARGETLOWERING_H 5005