1 //===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file describes the target machine instruction set to the code generator. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #ifndef LLVM_CODEGEN_TARGETINSTRINFO_H 14 #define LLVM_CODEGEN_TARGETINSTRINFO_H 15 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/DenseMapInfo.h" 19 #include "llvm/ADT/Uniformity.h" 20 #include "llvm/CodeGen/MIRFormatter.h" 21 #include "llvm/CodeGen/MachineBasicBlock.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/CodeGen/MachineInstrBuilder.h" 25 #include "llvm/CodeGen/MachineOperand.h" 26 #include "llvm/CodeGen/MachineOutliner.h" 27 #include "llvm/CodeGen/RegisterClassInfo.h" 28 #include "llvm/CodeGen/VirtRegMap.h" 29 #include "llvm/MC/MCInstrInfo.h" 30 #include "llvm/Support/BranchProbability.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include <cassert> 33 #include <cstddef> 34 #include <cstdint> 35 #include <utility> 36 #include <vector> 37 38 namespace llvm { 39 40 class DFAPacketizer; 41 class InstrItineraryData; 42 class LiveIntervals; 43 class LiveVariables; 44 class MachineLoop; 45 class MachineMemOperand; 46 class MachineRegisterInfo; 47 class MCAsmInfo; 48 class MCInst; 49 struct MCSchedModel; 50 class Module; 51 class ScheduleDAG; 52 class ScheduleDAGMI; 53 class ScheduleHazardRecognizer; 54 class SDNode; 55 class SelectionDAG; 56 class SMSchedule; 57 class SwingSchedulerDAG; 58 class RegScavenger; 59 class TargetRegisterClass; 60 class TargetRegisterInfo; 61 class TargetSchedModel; 62 class TargetSubtargetInfo; 63 enum class MachineCombinerPattern; 64 enum class MachineTraceStrategy; 65 66 template <class T> class SmallVectorImpl; 67 68 using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>; 69 70 struct DestSourcePair { 71 const MachineOperand *Destination; 72 const MachineOperand *Source; 73 74 DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src) 75 : Destination(&Dest), Source(&Src) {} 76 }; 77 78 /// Used to describe a register and immediate addition. 79 struct RegImmPair { 80 Register Reg; 81 int64_t Imm; 82 83 RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {} 84 }; 85 86 /// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare. 87 /// It holds the register values, the scale value and the displacement. 88 struct ExtAddrMode { 89 Register BaseReg; 90 Register ScaledReg; 91 int64_t Scale; 92 int64_t Displacement; 93 }; 94 95 //--------------------------------------------------------------------------- 96 /// 97 /// TargetInstrInfo - Interface to description of machine instruction set 98 /// 99 class TargetInstrInfo : public MCInstrInfo { 100 public: 101 TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u, 102 unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u) 103 : CallFrameSetupOpcode(CFSetupOpcode), 104 CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode), 105 ReturnOpcode(ReturnOpcode) {} 106 TargetInstrInfo(const TargetInstrInfo &) = delete; 107 TargetInstrInfo &operator=(const TargetInstrInfo &) = delete; 108 virtual ~TargetInstrInfo(); 109 110 static bool isGenericOpcode(unsigned Opc) { 111 return Opc <= TargetOpcode::GENERIC_OP_END; 112 } 113 114 static bool isGenericAtomicRMWOpcode(unsigned Opc) { 115 return Opc >= TargetOpcode::GENERIC_ATOMICRMW_OP_START && 116 Opc <= TargetOpcode::GENERIC_ATOMICRMW_OP_END; 117 } 118 119 /// Given a machine instruction descriptor, returns the register 120 /// class constraint for OpNum, or NULL. 121 virtual 122 const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 123 const TargetRegisterInfo *TRI, 124 const MachineFunction &MF) const; 125 126 /// Return true if the instruction is trivially rematerializable, meaning it 127 /// has no side effects and requires no operands that aren't always available. 128 /// This means the only allowed uses are constants and unallocatable physical 129 /// registers so that the instructions result is independent of the place 130 /// in the function. 131 bool isTriviallyReMaterializable(const MachineInstr &MI) const { 132 return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF || 133 (MI.getDesc().isRematerializable() && 134 (isReallyTriviallyReMaterializable(MI) || 135 isReallyTriviallyReMaterializableGeneric(MI))); 136 } 137 138 /// Given \p MO is a PhysReg use return if it can be ignored for the purpose 139 /// of instruction rematerialization or sinking. 140 virtual bool isIgnorableUse(const MachineOperand &MO) const { 141 return false; 142 } 143 144 protected: 145 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is 146 /// set, this hook lets the target specify whether the instruction is actually 147 /// trivially rematerializable, taking into consideration its operands. This 148 /// predicate must return false if the instruction has any side effects other 149 /// than producing a value, or if it requres any address registers that are 150 /// not always available. 151 /// Requirements must be check as stated in isTriviallyReMaterializable() . 152 virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const { 153 return false; 154 } 155 156 /// This method commutes the operands of the given machine instruction MI. 157 /// The operands to be commuted are specified by their indices OpIdx1 and 158 /// OpIdx2. 159 /// 160 /// If a target has any instructions that are commutable but require 161 /// converting to different instructions or making non-trivial changes 162 /// to commute them, this method can be overloaded to do that. 163 /// The default implementation simply swaps the commutable operands. 164 /// 165 /// If NewMI is false, MI is modified in place and returned; otherwise, a 166 /// new machine instruction is created and returned. 167 /// 168 /// Do not call this method for a non-commutable instruction. 169 /// Even though the instruction is commutable, the method may still 170 /// fail to commute the operands, null pointer is returned in such cases. 171 virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI, 172 unsigned OpIdx1, 173 unsigned OpIdx2) const; 174 175 /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable 176 /// operand indices to (ResultIdx1, ResultIdx2). 177 /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be 178 /// predefined to some indices or be undefined (designated by the special 179 /// value 'CommuteAnyOperandIndex'). 180 /// The predefined result indices cannot be re-defined. 181 /// The function returns true iff after the result pair redefinition 182 /// the fixed result pair is equal to or equivalent to the source pair of 183 /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that 184 /// the pairs (x,y) and (y,x) are equivalent. 185 static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2, 186 unsigned CommutableOpIdx1, 187 unsigned CommutableOpIdx2); 188 189 private: 190 /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is 191 /// set and the target hook isReallyTriviallyReMaterializable returns false, 192 /// this function does target-independent tests to determine if the 193 /// instruction is really trivially rematerializable. 194 bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI) const; 195 196 public: 197 /// These methods return the opcode of the frame setup/destroy instructions 198 /// if they exist (-1 otherwise). Some targets use pseudo instructions in 199 /// order to abstract away the difference between operating with a frame 200 /// pointer and operating without, through the use of these two instructions. 201 /// 202 unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; } 203 unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; } 204 205 /// Returns true if the argument is a frame pseudo instruction. 206 bool isFrameInstr(const MachineInstr &I) const { 207 return I.getOpcode() == getCallFrameSetupOpcode() || 208 I.getOpcode() == getCallFrameDestroyOpcode(); 209 } 210 211 /// Returns true if the argument is a frame setup pseudo instruction. 212 bool isFrameSetup(const MachineInstr &I) const { 213 return I.getOpcode() == getCallFrameSetupOpcode(); 214 } 215 216 /// Returns size of the frame associated with the given frame instruction. 217 /// For frame setup instruction this is frame that is set up space set up 218 /// after the instruction. For frame destroy instruction this is the frame 219 /// freed by the caller. 220 /// Note, in some cases a call frame (or a part of it) may be prepared prior 221 /// to the frame setup instruction. It occurs in the calls that involve 222 /// inalloca arguments. This function reports only the size of the frame part 223 /// that is set up between the frame setup and destroy pseudo instructions. 224 int64_t getFrameSize(const MachineInstr &I) const { 225 assert(isFrameInstr(I) && "Not a frame instruction"); 226 assert(I.getOperand(0).getImm() >= 0); 227 return I.getOperand(0).getImm(); 228 } 229 230 /// Returns the total frame size, which is made up of the space set up inside 231 /// the pair of frame start-stop instructions and the space that is set up 232 /// prior to the pair. 233 int64_t getFrameTotalSize(const MachineInstr &I) const { 234 if (isFrameSetup(I)) { 235 assert(I.getOperand(1).getImm() >= 0 && 236 "Frame size must not be negative"); 237 return getFrameSize(I) + I.getOperand(1).getImm(); 238 } 239 return getFrameSize(I); 240 } 241 242 unsigned getCatchReturnOpcode() const { return CatchRetOpcode; } 243 unsigned getReturnOpcode() const { return ReturnOpcode; } 244 245 /// Returns the actual stack pointer adjustment made by an instruction 246 /// as part of a call sequence. By default, only call frame setup/destroy 247 /// instructions adjust the stack, but targets may want to override this 248 /// to enable more fine-grained adjustment, or adjust by a different value. 249 virtual int getSPAdjust(const MachineInstr &MI) const; 250 251 /// Return true if the instruction is a "coalescable" extension instruction. 252 /// That is, it's like a copy where it's legal for the source to overlap the 253 /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's 254 /// expected the pre-extension value is available as a subreg of the result 255 /// register. This also returns the sub-register index in SubIdx. 256 virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, 257 Register &DstReg, unsigned &SubIdx) const { 258 return false; 259 } 260 261 /// If the specified machine instruction is a direct 262 /// load from a stack slot, return the virtual or physical register number of 263 /// the destination along with the FrameIndex of the loaded stack slot. If 264 /// not, return 0. This predicate must return 0 if the instruction has 265 /// any side effects other than loading from the stack slot. 266 virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, 267 int &FrameIndex) const { 268 return 0; 269 } 270 271 /// Optional extension of isLoadFromStackSlot that returns the number of 272 /// bytes loaded from the stack. This must be implemented if a backend 273 /// supports partial stack slot spills/loads to further disambiguate 274 /// what the load does. 275 virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, 276 int &FrameIndex, 277 unsigned &MemBytes) const { 278 MemBytes = 0; 279 return isLoadFromStackSlot(MI, FrameIndex); 280 } 281 282 /// Check for post-frame ptr elimination stack locations as well. 283 /// This uses a heuristic so it isn't reliable for correctness. 284 virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI, 285 int &FrameIndex) const { 286 return 0; 287 } 288 289 /// If the specified machine instruction has a load from a stack slot, 290 /// return true along with the FrameIndices of the loaded stack slot and the 291 /// machine mem operands containing the reference. 292 /// If not, return false. Unlike isLoadFromStackSlot, this returns true for 293 /// any instructions that loads from the stack. This is just a hint, as some 294 /// cases may be missed. 295 virtual bool hasLoadFromStackSlot( 296 const MachineInstr &MI, 297 SmallVectorImpl<const MachineMemOperand *> &Accesses) const; 298 299 /// If the specified machine instruction is a direct 300 /// store to a stack slot, return the virtual or physical register number of 301 /// the source reg along with the FrameIndex of the loaded stack slot. If 302 /// not, return 0. This predicate must return 0 if the instruction has 303 /// any side effects other than storing to the stack slot. 304 virtual unsigned isStoreToStackSlot(const MachineInstr &MI, 305 int &FrameIndex) const { 306 return 0; 307 } 308 309 /// Optional extension of isStoreToStackSlot that returns the number of 310 /// bytes stored to the stack. This must be implemented if a backend 311 /// supports partial stack slot spills/loads to further disambiguate 312 /// what the store does. 313 virtual unsigned isStoreToStackSlot(const MachineInstr &MI, 314 int &FrameIndex, 315 unsigned &MemBytes) const { 316 MemBytes = 0; 317 return isStoreToStackSlot(MI, FrameIndex); 318 } 319 320 /// Check for post-frame ptr elimination stack locations as well. 321 /// This uses a heuristic, so it isn't reliable for correctness. 322 virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI, 323 int &FrameIndex) const { 324 return 0; 325 } 326 327 /// If the specified machine instruction has a store to a stack slot, 328 /// return true along with the FrameIndices of the loaded stack slot and the 329 /// machine mem operands containing the reference. 330 /// If not, return false. Unlike isStoreToStackSlot, 331 /// this returns true for any instructions that stores to the 332 /// stack. This is just a hint, as some cases may be missed. 333 virtual bool hasStoreToStackSlot( 334 const MachineInstr &MI, 335 SmallVectorImpl<const MachineMemOperand *> &Accesses) const; 336 337 /// Return true if the specified machine instruction 338 /// is a copy of one stack slot to another and has no other effect. 339 /// Provide the identity of the two frame indices. 340 virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, 341 int &SrcFrameIndex) const { 342 return false; 343 } 344 345 /// Compute the size in bytes and offset within a stack slot of a spilled 346 /// register or subregister. 347 /// 348 /// \param [out] Size in bytes of the spilled value. 349 /// \param [out] Offset in bytes within the stack slot. 350 /// \returns true if both Size and Offset are successfully computed. 351 /// 352 /// Not all subregisters have computable spill slots. For example, 353 /// subregisters registers may not be byte-sized, and a pair of discontiguous 354 /// subregisters has no single offset. 355 /// 356 /// Targets with nontrivial bigendian implementations may need to override 357 /// this, particularly to support spilled vector registers. 358 virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, 359 unsigned &Size, unsigned &Offset, 360 const MachineFunction &MF) const; 361 362 /// Return true if the given instruction is terminator that is unspillable, 363 /// according to isUnspillableTerminatorImpl. 364 bool isUnspillableTerminator(const MachineInstr *MI) const { 365 return MI->isTerminator() && isUnspillableTerminatorImpl(MI); 366 } 367 368 /// Returns the size in bytes of the specified MachineInstr, or ~0U 369 /// when this function is not implemented by a target. 370 virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const { 371 return ~0U; 372 } 373 374 /// Return true if the instruction is as cheap as a move instruction. 375 /// 376 /// Targets for different archs need to override this, and different 377 /// micro-architectures can also be finely tuned inside. 378 virtual bool isAsCheapAsAMove(const MachineInstr &MI) const { 379 return MI.isAsCheapAsAMove(); 380 } 381 382 /// Return true if the instruction should be sunk by MachineSink. 383 /// 384 /// MachineSink determines on its own whether the instruction is safe to sink; 385 /// this gives the target a hook to override the default behavior with regards 386 /// to which instructions should be sunk. 387 virtual bool shouldSink(const MachineInstr &MI) const { return true; } 388 389 /// Return false if the instruction should not be hoisted by MachineLICM. 390 /// 391 /// MachineLICM determines on its own whether the instruction is safe to 392 /// hoist; this gives the target a hook to extend this assessment and prevent 393 /// an instruction being hoisted from a given loop for target specific 394 /// reasons. 395 virtual bool shouldHoist(const MachineInstr &MI, 396 const MachineLoop *FromLoop) const { 397 return true; 398 } 399 400 /// Re-issue the specified 'original' instruction at the 401 /// specific location targeting a new destination register. 402 /// The register in Orig->getOperand(0).getReg() will be substituted by 403 /// DestReg:SubIdx. Any existing subreg index is preserved or composed with 404 /// SubIdx. 405 virtual void reMaterialize(MachineBasicBlock &MBB, 406 MachineBasicBlock::iterator MI, Register DestReg, 407 unsigned SubIdx, const MachineInstr &Orig, 408 const TargetRegisterInfo &TRI) const; 409 410 /// Clones instruction or the whole instruction bundle \p Orig and 411 /// insert into \p MBB before \p InsertBefore. The target may update operands 412 /// that are required to be unique. 413 /// 414 /// \p Orig must not return true for MachineInstr::isNotDuplicable(). 415 virtual MachineInstr &duplicate(MachineBasicBlock &MBB, 416 MachineBasicBlock::iterator InsertBefore, 417 const MachineInstr &Orig) const; 418 419 /// This method must be implemented by targets that 420 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target 421 /// may be able to convert a two-address instruction into one or more true 422 /// three-address instructions on demand. This allows the X86 target (for 423 /// example) to convert ADD and SHL instructions into LEA instructions if they 424 /// would require register copies due to two-addressness. 425 /// 426 /// This method returns a null pointer if the transformation cannot be 427 /// performed, otherwise it returns the last new instruction. 428 /// 429 /// If \p LIS is not nullptr, the LiveIntervals info should be updated for 430 /// replacing \p MI with new instructions, even though this function does not 431 /// remove MI. 432 virtual MachineInstr *convertToThreeAddress(MachineInstr &MI, 433 LiveVariables *LV, 434 LiveIntervals *LIS) const { 435 return nullptr; 436 } 437 438 // This constant can be used as an input value of operand index passed to 439 // the method findCommutedOpIndices() to tell the method that the 440 // corresponding operand index is not pre-defined and that the method 441 // can pick any commutable operand. 442 static const unsigned CommuteAnyOperandIndex = ~0U; 443 444 /// This method commutes the operands of the given machine instruction MI. 445 /// 446 /// The operands to be commuted are specified by their indices OpIdx1 and 447 /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value 448 /// 'CommuteAnyOperandIndex', which means that the method is free to choose 449 /// any arbitrarily chosen commutable operand. If both arguments are set to 450 /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable 451 /// operands; then commutes them if such operands could be found. 452 /// 453 /// If NewMI is false, MI is modified in place and returned; otherwise, a 454 /// new machine instruction is created and returned. 455 /// 456 /// Do not call this method for a non-commutable instruction or 457 /// for non-commuable operands. 458 /// Even though the instruction is commutable, the method may still 459 /// fail to commute the operands, null pointer is returned in such cases. 460 MachineInstr * 461 commuteInstruction(MachineInstr &MI, bool NewMI = false, 462 unsigned OpIdx1 = CommuteAnyOperandIndex, 463 unsigned OpIdx2 = CommuteAnyOperandIndex) const; 464 465 /// Returns true iff the routine could find two commutable operands in the 466 /// given machine instruction. 467 /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments. 468 /// If any of the INPUT values is set to the special value 469 /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable 470 /// operand, then returns its index in the corresponding argument. 471 /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method 472 /// looks for 2 commutable operands. 473 /// If INPUT values refer to some operands of MI, then the method simply 474 /// returns true if the corresponding operands are commutable and returns 475 /// false otherwise. 476 /// 477 /// For example, calling this method this way: 478 /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex; 479 /// findCommutedOpIndices(MI, Op1, Op2); 480 /// can be interpreted as a query asking to find an operand that would be 481 /// commutable with the operand#1. 482 virtual bool findCommutedOpIndices(const MachineInstr &MI, 483 unsigned &SrcOpIdx1, 484 unsigned &SrcOpIdx2) const; 485 486 /// Returns true if the target has a preference on the operands order of 487 /// the given machine instruction. And specify if \p Commute is required to 488 /// get the desired operands order. 489 virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const { 490 return false; 491 } 492 493 /// A pair composed of a register and a sub-register index. 494 /// Used to give some type checking when modeling Reg:SubReg. 495 struct RegSubRegPair { 496 Register Reg; 497 unsigned SubReg; 498 499 RegSubRegPair(Register Reg = Register(), unsigned SubReg = 0) 500 : Reg(Reg), SubReg(SubReg) {} 501 502 bool operator==(const RegSubRegPair& P) const { 503 return Reg == P.Reg && SubReg == P.SubReg; 504 } 505 bool operator!=(const RegSubRegPair& P) const { 506 return !(*this == P); 507 } 508 }; 509 510 /// A pair composed of a pair of a register and a sub-register index, 511 /// and another sub-register index. 512 /// Used to give some type checking when modeling Reg:SubReg1, SubReg2. 513 struct RegSubRegPairAndIdx : RegSubRegPair { 514 unsigned SubIdx; 515 516 RegSubRegPairAndIdx(Register Reg = Register(), unsigned SubReg = 0, 517 unsigned SubIdx = 0) 518 : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {} 519 }; 520 521 /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI 522 /// and \p DefIdx. 523 /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of 524 /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef 525 /// flag are not added to this list. 526 /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce 527 /// two elements: 528 /// - %1:sub1, sub0 529 /// - %2<:0>, sub1 530 /// 531 /// \returns true if it is possible to build such an input sequence 532 /// with the pair \p MI, \p DefIdx. False otherwise. 533 /// 534 /// \pre MI.isRegSequence() or MI.isRegSequenceLike(). 535 /// 536 /// \note The generic implementation does not provide any support for 537 /// MI.isRegSequenceLike(). In other words, one has to override 538 /// getRegSequenceLikeInputs for target specific instructions. 539 bool 540 getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx, 541 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const; 542 543 /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI 544 /// and \p DefIdx. 545 /// \p [out] InputReg of the equivalent EXTRACT_SUBREG. 546 /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce: 547 /// - %1:sub1, sub0 548 /// 549 /// \returns true if it is possible to build such an input sequence 550 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set. 551 /// False otherwise. 552 /// 553 /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike(). 554 /// 555 /// \note The generic implementation does not provide any support for 556 /// MI.isExtractSubregLike(). In other words, one has to override 557 /// getExtractSubregLikeInputs for target specific instructions. 558 bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx, 559 RegSubRegPairAndIdx &InputReg) const; 560 561 /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI 562 /// and \p DefIdx. 563 /// \p [out] BaseReg and \p [out] InsertedReg contain 564 /// the equivalent inputs of INSERT_SUBREG. 565 /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce: 566 /// - BaseReg: %0:sub0 567 /// - InsertedReg: %1:sub1, sub3 568 /// 569 /// \returns true if it is possible to build such an input sequence 570 /// with the pair \p MI, \p DefIdx and the operand has no undef flag set. 571 /// False otherwise. 572 /// 573 /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike(). 574 /// 575 /// \note The generic implementation does not provide any support for 576 /// MI.isInsertSubregLike(). In other words, one has to override 577 /// getInsertSubregLikeInputs for target specific instructions. 578 bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx, 579 RegSubRegPair &BaseReg, 580 RegSubRegPairAndIdx &InsertedReg) const; 581 582 /// Return true if two machine instructions would produce identical values. 583 /// By default, this is only true when the two instructions 584 /// are deemed identical except for defs. If this function is called when the 585 /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for 586 /// aggressive checks. 587 virtual bool produceSameValue(const MachineInstr &MI0, 588 const MachineInstr &MI1, 589 const MachineRegisterInfo *MRI = nullptr) const; 590 591 /// \returns true if a branch from an instruction with opcode \p BranchOpc 592 /// bytes is capable of jumping to a position \p BrOffset bytes away. 593 virtual bool isBranchOffsetInRange(unsigned BranchOpc, 594 int64_t BrOffset) const { 595 llvm_unreachable("target did not implement"); 596 } 597 598 /// \returns The block that branch instruction \p MI jumps to. 599 virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const { 600 llvm_unreachable("target did not implement"); 601 } 602 603 /// Insert an unconditional indirect branch at the end of \p MBB to \p 604 /// NewDestBB. Optionally, insert the clobbered register restoring in \p 605 /// RestoreBB. \p BrOffset indicates the offset of \p NewDestBB relative to 606 /// the offset of the position to insert the new branch. 607 virtual void insertIndirectBranch(MachineBasicBlock &MBB, 608 MachineBasicBlock &NewDestBB, 609 MachineBasicBlock &RestoreBB, 610 const DebugLoc &DL, int64_t BrOffset = 0, 611 RegScavenger *RS = nullptr) const { 612 llvm_unreachable("target did not implement"); 613 } 614 615 /// Analyze the branching code at the end of MBB, returning 616 /// true if it cannot be understood (e.g. it's a switch dispatch or isn't 617 /// implemented for a target). Upon success, this returns false and returns 618 /// with the following information in various cases: 619 /// 620 /// 1. If this block ends with no branches (it just falls through to its succ) 621 /// just return false, leaving TBB/FBB null. 622 /// 2. If this block ends with only an unconditional branch, it sets TBB to be 623 /// the destination block. 624 /// 3. If this block ends with a conditional branch and it falls through to a 625 /// successor block, it sets TBB to be the branch destination block and a 626 /// list of operands that evaluate the condition. These operands can be 627 /// passed to other TargetInstrInfo methods to create new branches. 628 /// 4. If this block ends with a conditional branch followed by an 629 /// unconditional branch, it returns the 'true' destination in TBB, the 630 /// 'false' destination in FBB, and a list of operands that evaluate the 631 /// condition. These operands can be passed to other TargetInstrInfo 632 /// methods to create new branches. 633 /// 634 /// Note that removeBranch and insertBranch must be implemented to support 635 /// cases where this method returns success. 636 /// 637 /// If AllowModify is true, then this routine is allowed to modify the basic 638 /// block (e.g. delete instructions after the unconditional branch). 639 /// 640 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid 641 /// before calling this function. 642 virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 643 MachineBasicBlock *&FBB, 644 SmallVectorImpl<MachineOperand> &Cond, 645 bool AllowModify = false) const { 646 return true; 647 } 648 649 /// Represents a predicate at the MachineFunction level. The control flow a 650 /// MachineBranchPredicate represents is: 651 /// 652 /// Reg = LHS `Predicate` RHS == ConditionDef 653 /// if Reg then goto TrueDest else goto FalseDest 654 /// 655 struct MachineBranchPredicate { 656 enum ComparePredicate { 657 PRED_EQ, // True if two values are equal 658 PRED_NE, // True if two values are not equal 659 PRED_INVALID // Sentinel value 660 }; 661 662 ComparePredicate Predicate = PRED_INVALID; 663 MachineOperand LHS = MachineOperand::CreateImm(0); 664 MachineOperand RHS = MachineOperand::CreateImm(0); 665 MachineBasicBlock *TrueDest = nullptr; 666 MachineBasicBlock *FalseDest = nullptr; 667 MachineInstr *ConditionDef = nullptr; 668 669 /// SingleUseCondition is true if ConditionDef is dead except for the 670 /// branch(es) at the end of the basic block. 671 /// 672 bool SingleUseCondition = false; 673 674 explicit MachineBranchPredicate() = default; 675 }; 676 677 /// Analyze the branching code at the end of MBB and parse it into the 678 /// MachineBranchPredicate structure if possible. Returns false on success 679 /// and true on failure. 680 /// 681 /// If AllowModify is true, then this routine is allowed to modify the basic 682 /// block (e.g. delete instructions after the unconditional branch). 683 /// 684 virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB, 685 MachineBranchPredicate &MBP, 686 bool AllowModify = false) const { 687 return true; 688 } 689 690 /// Remove the branching code at the end of the specific MBB. 691 /// This is only invoked in cases where analyzeBranch returns success. It 692 /// returns the number of instructions that were removed. 693 /// If \p BytesRemoved is non-null, report the change in code size from the 694 /// removed instructions. 695 virtual unsigned removeBranch(MachineBasicBlock &MBB, 696 int *BytesRemoved = nullptr) const { 697 llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!"); 698 } 699 700 /// Insert branch code into the end of the specified MachineBasicBlock. The 701 /// operands to this method are the same as those returned by analyzeBranch. 702 /// This is only invoked in cases where analyzeBranch returns success. It 703 /// returns the number of instructions inserted. If \p BytesAdded is non-null, 704 /// report the change in code size from the added instructions. 705 /// 706 /// It is also invoked by tail merging to add unconditional branches in 707 /// cases where analyzeBranch doesn't apply because there was no original 708 /// branch to analyze. At least this much must be implemented, else tail 709 /// merging needs to be disabled. 710 /// 711 /// The CFG information in MBB.Predecessors and MBB.Successors must be valid 712 /// before calling this function. 713 virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 714 MachineBasicBlock *FBB, 715 ArrayRef<MachineOperand> Cond, 716 const DebugLoc &DL, 717 int *BytesAdded = nullptr) const { 718 llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!"); 719 } 720 721 unsigned insertUnconditionalBranch(MachineBasicBlock &MBB, 722 MachineBasicBlock *DestBB, 723 const DebugLoc &DL, 724 int *BytesAdded = nullptr) const { 725 return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL, 726 BytesAdded); 727 } 728 729 /// Object returned by analyzeLoopForPipelining. Allows software pipelining 730 /// implementations to query attributes of the loop being pipelined and to 731 /// apply target-specific updates to the loop once pipelining is complete. 732 class PipelinerLoopInfo { 733 public: 734 virtual ~PipelinerLoopInfo(); 735 /// Return true if the given instruction should not be pipelined and should 736 /// be ignored. An example could be a loop comparison, or induction variable 737 /// update with no users being pipelined. 738 virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0; 739 740 /// Return true if the proposed schedule should used. Otherwise return 741 /// false to not pipeline the loop. This function should be used to ensure 742 /// that pipelined loops meet target-specific quality heuristics. 743 virtual bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS) { 744 return true; 745 } 746 747 /// Create a condition to determine if the trip count of the loop is greater 748 /// than TC, where TC is always one more than for the previous prologue or 749 /// 0 if this is being called for the outermost prologue. 750 /// 751 /// If the trip count is statically known to be greater than TC, return 752 /// true. If the trip count is statically known to be not greater than TC, 753 /// return false. Otherwise return nullopt and fill out Cond with the test 754 /// condition. 755 /// 756 /// Note: This hook is guaranteed to be called from the innermost to the 757 /// outermost prologue of the loop being software pipelined. 758 virtual std::optional<bool> 759 createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB, 760 SmallVectorImpl<MachineOperand> &Cond) = 0; 761 762 /// Modify the loop such that the trip count is 763 /// OriginalTC + TripCountAdjust. 764 virtual void adjustTripCount(int TripCountAdjust) = 0; 765 766 /// Called when the loop's preheader has been modified to NewPreheader. 767 virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0; 768 769 /// Called when the loop is being removed. Any instructions in the preheader 770 /// should be removed. 771 /// 772 /// Once this function is called, no other functions on this object are 773 /// valid; the loop has been removed. 774 virtual void disposed() = 0; 775 }; 776 777 /// Analyze loop L, which must be a single-basic-block loop, and if the 778 /// conditions can be understood enough produce a PipelinerLoopInfo object. 779 virtual std::unique_ptr<PipelinerLoopInfo> 780 analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const { 781 return nullptr; 782 } 783 784 /// Analyze the loop code, return true if it cannot be understood. Upon 785 /// success, this function returns false and returns information about the 786 /// induction variable and compare instruction used at the end. 787 virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst, 788 MachineInstr *&CmpInst) const { 789 return true; 790 } 791 792 /// Generate code to reduce the loop iteration by one and check if the loop 793 /// is finished. Return the value/register of the new loop count. We need 794 /// this function when peeling off one or more iterations of a loop. This 795 /// function assumes the nth iteration is peeled first. 796 virtual unsigned reduceLoopCount(MachineBasicBlock &MBB, 797 MachineBasicBlock &PreHeader, 798 MachineInstr *IndVar, MachineInstr &Cmp, 799 SmallVectorImpl<MachineOperand> &Cond, 800 SmallVectorImpl<MachineInstr *> &PrevInsts, 801 unsigned Iter, unsigned MaxIter) const { 802 llvm_unreachable("Target didn't implement ReduceLoopCount"); 803 } 804 805 /// Delete the instruction OldInst and everything after it, replacing it with 806 /// an unconditional branch to NewDest. This is used by the tail merging pass. 807 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 808 MachineBasicBlock *NewDest) const; 809 810 /// Return true if it's legal to split the given basic 811 /// block at the specified instruction (i.e. instruction would be the start 812 /// of a new basic block). 813 virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB, 814 MachineBasicBlock::iterator MBBI) const { 815 return true; 816 } 817 818 /// Return true if it's profitable to predicate 819 /// instructions with accumulated instruction latency of "NumCycles" 820 /// of the specified basic block, where the probability of the instructions 821 /// being executed is given by Probability, and Confidence is a measure 822 /// of our confidence that it will be properly predicted. 823 virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, 824 unsigned ExtraPredCycles, 825 BranchProbability Probability) const { 826 return false; 827 } 828 829 /// Second variant of isProfitableToIfCvt. This one 830 /// checks for the case where two basic blocks from true and false path 831 /// of a if-then-else (diamond) are predicated on mutually exclusive 832 /// predicates, where the probability of the true path being taken is given 833 /// by Probability, and Confidence is a measure of our confidence that it 834 /// will be properly predicted. 835 virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles, 836 unsigned ExtraTCycles, 837 MachineBasicBlock &FMBB, unsigned NumFCycles, 838 unsigned ExtraFCycles, 839 BranchProbability Probability) const { 840 return false; 841 } 842 843 /// Return true if it's profitable for if-converter to duplicate instructions 844 /// of specified accumulated instruction latencies in the specified MBB to 845 /// enable if-conversion. 846 /// The probability of the instructions being executed is given by 847 /// Probability, and Confidence is a measure of our confidence that it 848 /// will be properly predicted. 849 virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, 850 unsigned NumCycles, 851 BranchProbability Probability) const { 852 return false; 853 } 854 855 /// Return the increase in code size needed to predicate a contiguous run of 856 /// NumInsts instructions. 857 virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, 858 unsigned NumInsts) const { 859 return 0; 860 } 861 862 /// Return an estimate for the code size reduction (in bytes) which will be 863 /// caused by removing the given branch instruction during if-conversion. 864 virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const { 865 return getInstSizeInBytes(MI); 866 } 867 868 /// Return true if it's profitable to unpredicate 869 /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually 870 /// exclusive predicates. 871 /// e.g. 872 /// subeq r0, r1, #1 873 /// addne r0, r1, #1 874 /// => 875 /// sub r0, r1, #1 876 /// addne r0, r1, #1 877 /// 878 /// This may be profitable is conditional instructions are always executed. 879 virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, 880 MachineBasicBlock &FMBB) const { 881 return false; 882 } 883 884 /// Return true if it is possible to insert a select 885 /// instruction that chooses between TrueReg and FalseReg based on the 886 /// condition code in Cond. 887 /// 888 /// When successful, also return the latency in cycles from TrueReg, 889 /// FalseReg, and Cond to the destination register. In most cases, a select 890 /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1 891 /// 892 /// Some x86 implementations have 2-cycle cmov instructions. 893 /// 894 /// @param MBB Block where select instruction would be inserted. 895 /// @param Cond Condition returned by analyzeBranch. 896 /// @param DstReg Virtual dest register that the result should write to. 897 /// @param TrueReg Virtual register to select when Cond is true. 898 /// @param FalseReg Virtual register to select when Cond is false. 899 /// @param CondCycles Latency from Cond+Branch to select output. 900 /// @param TrueCycles Latency from TrueReg to select output. 901 /// @param FalseCycles Latency from FalseReg to select output. 902 virtual bool canInsertSelect(const MachineBasicBlock &MBB, 903 ArrayRef<MachineOperand> Cond, Register DstReg, 904 Register TrueReg, Register FalseReg, 905 int &CondCycles, int &TrueCycles, 906 int &FalseCycles) const { 907 return false; 908 } 909 910 /// Insert a select instruction into MBB before I that will copy TrueReg to 911 /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false. 912 /// 913 /// This function can only be called after canInsertSelect() returned true. 914 /// The condition in Cond comes from analyzeBranch, and it can be assumed 915 /// that the same flags or registers required by Cond are available at the 916 /// insertion point. 917 /// 918 /// @param MBB Block where select instruction should be inserted. 919 /// @param I Insertion point. 920 /// @param DL Source location for debugging. 921 /// @param DstReg Virtual register to be defined by select instruction. 922 /// @param Cond Condition as computed by analyzeBranch. 923 /// @param TrueReg Virtual register to copy when Cond is true. 924 /// @param FalseReg Virtual register to copy when Cons is false. 925 virtual void insertSelect(MachineBasicBlock &MBB, 926 MachineBasicBlock::iterator I, const DebugLoc &DL, 927 Register DstReg, ArrayRef<MachineOperand> Cond, 928 Register TrueReg, Register FalseReg) const { 929 llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!"); 930 } 931 932 /// Analyze the given select instruction, returning true if 933 /// it cannot be understood. It is assumed that MI->isSelect() is true. 934 /// 935 /// When successful, return the controlling condition and the operands that 936 /// determine the true and false result values. 937 /// 938 /// Result = SELECT Cond, TrueOp, FalseOp 939 /// 940 /// Some targets can optimize select instructions, for example by predicating 941 /// the instruction defining one of the operands. Such targets should set 942 /// Optimizable. 943 /// 944 /// @param MI Select instruction to analyze. 945 /// @param Cond Condition controlling the select. 946 /// @param TrueOp Operand number of the value selected when Cond is true. 947 /// @param FalseOp Operand number of the value selected when Cond is false. 948 /// @param Optimizable Returned as true if MI is optimizable. 949 /// @returns False on success. 950 virtual bool analyzeSelect(const MachineInstr &MI, 951 SmallVectorImpl<MachineOperand> &Cond, 952 unsigned &TrueOp, unsigned &FalseOp, 953 bool &Optimizable) const { 954 assert(MI.getDesc().isSelect() && "MI must be a select instruction"); 955 return true; 956 } 957 958 /// Given a select instruction that was understood by 959 /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by 960 /// merging it with one of its operands. Returns NULL on failure. 961 /// 962 /// When successful, returns the new select instruction. The client is 963 /// responsible for deleting MI. 964 /// 965 /// If both sides of the select can be optimized, PreferFalse is used to pick 966 /// a side. 967 /// 968 /// @param MI Optimizable select instruction. 969 /// @param NewMIs Set that record all MIs in the basic block up to \p 970 /// MI. Has to be updated with any newly created MI or deleted ones. 971 /// @param PreferFalse Try to optimize FalseOp instead of TrueOp. 972 /// @returns Optimized instruction or NULL. 973 virtual MachineInstr *optimizeSelect(MachineInstr &MI, 974 SmallPtrSetImpl<MachineInstr *> &NewMIs, 975 bool PreferFalse = false) const { 976 // This function must be implemented if Optimizable is ever set. 977 llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!"); 978 } 979 980 /// Emit instructions to copy a pair of physical registers. 981 /// 982 /// This function should support copies within any legal register class as 983 /// well as any cross-class copies created during instruction selection. 984 /// 985 /// The source and destination registers may overlap, which may require a 986 /// careful implementation when multiple copy instructions are required for 987 /// large registers. See for example the ARM target. 988 virtual void copyPhysReg(MachineBasicBlock &MBB, 989 MachineBasicBlock::iterator MI, const DebugLoc &DL, 990 MCRegister DestReg, MCRegister SrcReg, 991 bool KillSrc) const { 992 llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!"); 993 } 994 995 /// Allow targets to tell MachineVerifier whether a specific register 996 /// MachineOperand can be used as part of PC-relative addressing. 997 /// PC-relative addressing modes in many CISC architectures contain 998 /// (non-PC) registers as offsets or scaling values, which inherently 999 /// tags the corresponding MachineOperand with OPERAND_PCREL. 1000 /// 1001 /// @param MO The MachineOperand in question. MO.isReg() should always 1002 /// be true. 1003 /// @return Whether this operand is allowed to be used PC-relatively. 1004 virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const { 1005 return false; 1006 } 1007 1008 /// Return an index for MachineJumpTableInfo if \p insn is an indirect jump 1009 /// using a jump table, otherwise -1. 1010 virtual int getJumpTableIndex(const MachineInstr &MI) const { return -1; } 1011 1012 protected: 1013 /// Target-dependent implementation for IsCopyInstr. 1014 /// If the specific machine instruction is a instruction that moves/copies 1015 /// value from one register to another register return destination and source 1016 /// registers as machine operands. 1017 virtual std::optional<DestSourcePair> 1018 isCopyInstrImpl(const MachineInstr &MI) const { 1019 return std::nullopt; 1020 } 1021 1022 /// Return true if the given terminator MI is not expected to spill. This 1023 /// sets the live interval as not spillable and adjusts phi node lowering to 1024 /// not introduce copies after the terminator. Use with care, these are 1025 /// currently used for hardware loop intrinsics in very controlled situations, 1026 /// created prior to registry allocation in loops that only have single phi 1027 /// users for the terminators value. They may run out of registers if not used 1028 /// carefully. 1029 virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const { 1030 return false; 1031 } 1032 1033 public: 1034 /// If the specific machine instruction is a instruction that moves/copies 1035 /// value from one register to another register return destination and source 1036 /// registers as machine operands. 1037 /// For COPY-instruction the method naturally returns destination and source 1038 /// registers as machine operands, for all other instructions the method calls 1039 /// target-dependent implementation. 1040 std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const { 1041 if (MI.isCopy()) { 1042 return DestSourcePair{MI.getOperand(0), MI.getOperand(1)}; 1043 } 1044 return isCopyInstrImpl(MI); 1045 } 1046 1047 /// If the specific machine instruction is an instruction that adds an 1048 /// immediate value and a physical register, and stores the result in 1049 /// the given physical register \c Reg, return a pair of the source 1050 /// register and the offset which has been added. 1051 virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI, 1052 Register Reg) const { 1053 return std::nullopt; 1054 } 1055 1056 /// Returns true if MI is an instruction that defines Reg to have a constant 1057 /// value and the value is recorded in ImmVal. The ImmVal is a result that 1058 /// should be interpreted as modulo size of Reg. 1059 virtual bool getConstValDefinedInReg(const MachineInstr &MI, 1060 const Register Reg, 1061 int64_t &ImmVal) const { 1062 return false; 1063 } 1064 1065 /// Store the specified register of the given register class to the specified 1066 /// stack frame index. The store instruction is to be added to the given 1067 /// machine basic block before the specified machine instruction. If isKill 1068 /// is true, the register operand is the last use and must be marked kill. If 1069 /// \p SrcReg is being directly spilled as part of assigning a virtual 1070 /// register, \p VReg is the register being assigned. This additional register 1071 /// argument is needed for certain targets when invoked from RegAllocFast to 1072 /// map the spilled physical register to its virtual register. A null register 1073 /// can be passed elsewhere. 1074 virtual void storeRegToStackSlot(MachineBasicBlock &MBB, 1075 MachineBasicBlock::iterator MI, 1076 Register SrcReg, bool isKill, int FrameIndex, 1077 const TargetRegisterClass *RC, 1078 const TargetRegisterInfo *TRI, 1079 Register VReg) const { 1080 llvm_unreachable("Target didn't implement " 1081 "TargetInstrInfo::storeRegToStackSlot!"); 1082 } 1083 1084 /// Load the specified register of the given register class from the specified 1085 /// stack frame index. The load instruction is to be added to the given 1086 /// machine basic block before the specified machine instruction. If \p 1087 /// DestReg is being directly reloaded as part of assigning a virtual 1088 /// register, \p VReg is the register being assigned. This additional register 1089 /// argument is needed for certain targets when invoked from RegAllocFast to 1090 /// map the loaded physical register to its virtual register. A null register 1091 /// can be passed elsewhere. 1092 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, 1093 MachineBasicBlock::iterator MI, 1094 Register DestReg, int FrameIndex, 1095 const TargetRegisterClass *RC, 1096 const TargetRegisterInfo *TRI, 1097 Register VReg) const { 1098 llvm_unreachable("Target didn't implement " 1099 "TargetInstrInfo::loadRegFromStackSlot!"); 1100 } 1101 1102 /// This function is called for all pseudo instructions 1103 /// that remain after register allocation. Many pseudo instructions are 1104 /// created to help register allocation. This is the place to convert them 1105 /// into real instructions. The target can edit MI in place, or it can insert 1106 /// new instructions and erase MI. The function should return true if 1107 /// anything was changed. 1108 virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; } 1109 1110 /// Check whether the target can fold a load that feeds a subreg operand 1111 /// (or a subreg operand that feeds a store). 1112 /// For example, X86 may want to return true if it can fold 1113 /// movl (%esp), %eax 1114 /// subb, %al, ... 1115 /// Into: 1116 /// subb (%esp), ... 1117 /// 1118 /// Ideally, we'd like the target implementation of foldMemoryOperand() to 1119 /// reject subregs - but since this behavior used to be enforced in the 1120 /// target-independent code, moving this responsibility to the targets 1121 /// has the potential of causing nasty silent breakage in out-of-tree targets. 1122 virtual bool isSubregFoldable() const { return false; } 1123 1124 /// For a patchpoint, stackmap, or statepoint intrinsic, return the range of 1125 /// operands which can't be folded into stack references. Operands outside 1126 /// of the range are most likely foldable but it is not guaranteed. 1127 /// These instructions are unique in that stack references for some operands 1128 /// have the same execution cost (e.g. none) as the unfolded register forms. 1129 /// The ranged return is guaranteed to include all operands which can't be 1130 /// folded at zero cost. 1131 virtual std::pair<unsigned, unsigned> 1132 getPatchpointUnfoldableRange(const MachineInstr &MI) const; 1133 1134 /// Attempt to fold a load or store of the specified stack 1135 /// slot into the specified machine instruction for the specified operand(s). 1136 /// If this is possible, a new instruction is returned with the specified 1137 /// operand folded, otherwise NULL is returned. 1138 /// The new instruction is inserted before MI, and the client is responsible 1139 /// for removing the old instruction. 1140 /// If VRM is passed, the assigned physregs can be inspected by target to 1141 /// decide on using an opcode (note that those assignments can still change). 1142 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, 1143 int FI, 1144 LiveIntervals *LIS = nullptr, 1145 VirtRegMap *VRM = nullptr) const; 1146 1147 /// Same as the previous version except it allows folding of any load and 1148 /// store from / to any address, not just from a specific stack slot. 1149 MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, 1150 MachineInstr &LoadMI, 1151 LiveIntervals *LIS = nullptr) const; 1152 1153 /// This function defines the logic to lower COPY instruction to 1154 /// target specific instruction(s). 1155 void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const; 1156 1157 /// Return true when there is potentially a faster code sequence 1158 /// for an instruction chain ending in \p Root. All potential patterns are 1159 /// returned in the \p Pattern vector. Pattern should be sorted in priority 1160 /// order since the pattern evaluator stops checking as soon as it finds a 1161 /// faster sequence. 1162 /// \param Root - Instruction that could be combined with one of its operands 1163 /// \param Patterns - Vector of possible combination patterns 1164 virtual bool 1165 getMachineCombinerPatterns(MachineInstr &Root, 1166 SmallVectorImpl<MachineCombinerPattern> &Patterns, 1167 bool DoRegPressureReduce) const; 1168 1169 /// Return true if target supports reassociation of instructions in machine 1170 /// combiner pass to reduce register pressure for a given BB. 1171 virtual bool 1172 shouldReduceRegisterPressure(const MachineBasicBlock *MBB, 1173 const RegisterClassInfo *RegClassInfo) const { 1174 return false; 1175 } 1176 1177 /// Fix up the placeholder we may add in genAlternativeCodeSequence(). 1178 virtual void 1179 finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, 1180 SmallVectorImpl<MachineInstr *> &InsInstrs) const {} 1181 1182 /// Return true when a code sequence can improve throughput. It 1183 /// should be called only for instructions in loops. 1184 /// \param Pattern - combiner pattern 1185 virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const; 1186 1187 /// Return true if the input \P Inst is part of a chain of dependent ops 1188 /// that are suitable for reassociation, otherwise return false. 1189 /// If the instruction's operands must be commuted to have a previous 1190 /// instruction of the same type define the first source operand, \P Commuted 1191 /// will be set to true. 1192 bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const; 1193 1194 /// Return true when \P Inst is both associative and commutative. If \P Invert 1195 /// is true, then the inverse of \P Inst operation must be tested. 1196 virtual bool isAssociativeAndCommutative(const MachineInstr &Inst, 1197 bool Invert = false) const { 1198 return false; 1199 } 1200 1201 /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add 1202 /// for sub and vice versa). 1203 virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const { 1204 return std::nullopt; 1205 } 1206 1207 /// Return true when \P Opcode1 or its inversion is equal to \P Opcode2. 1208 bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const; 1209 1210 /// Return true when \P Inst has reassociable operands in the same \P MBB. 1211 virtual bool hasReassociableOperands(const MachineInstr &Inst, 1212 const MachineBasicBlock *MBB) const; 1213 1214 /// Return true when \P Inst has reassociable sibling. 1215 virtual bool hasReassociableSibling(const MachineInstr &Inst, 1216 bool &Commuted) const; 1217 1218 /// When getMachineCombinerPatterns() finds patterns, this function generates 1219 /// the instructions that could replace the original code sequence. The client 1220 /// has to decide whether the actual replacement is beneficial or not. 1221 /// \param Root - Instruction that could be combined with one of its operands 1222 /// \param Pattern - Combination pattern for Root 1223 /// \param InsInstrs - Vector of new instructions that implement P 1224 /// \param DelInstrs - Old instructions, including Root, that could be 1225 /// replaced by InsInstr 1226 /// \param InstIdxForVirtReg - map of virtual register to instruction in 1227 /// InsInstr that defines it 1228 virtual void genAlternativeCodeSequence( 1229 MachineInstr &Root, MachineCombinerPattern Pattern, 1230 SmallVectorImpl<MachineInstr *> &InsInstrs, 1231 SmallVectorImpl<MachineInstr *> &DelInstrs, 1232 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const; 1233 1234 /// When calculate the latency of the root instruction, accumulate the 1235 /// latency of the sequence to the root latency. 1236 /// \param Root - Instruction that could be combined with one of its operands 1237 virtual bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const { 1238 return true; 1239 } 1240 1241 /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to 1242 /// reduce critical path length. 1243 void reassociateOps(MachineInstr &Root, MachineInstr &Prev, 1244 MachineCombinerPattern Pattern, 1245 SmallVectorImpl<MachineInstr *> &InsInstrs, 1246 SmallVectorImpl<MachineInstr *> &DelInstrs, 1247 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const; 1248 1249 /// Reassociation of some instructions requires inverse operations (e.g. 1250 /// (X + A) - Y => (X - Y) + A). This method returns a pair of new opcodes 1251 /// (new root opcode, new prev opcode) that must be used to reassociate \P 1252 /// Root and \P Prev accoring to \P Pattern. 1253 std::pair<unsigned, unsigned> 1254 getReassociationOpcodes(MachineCombinerPattern Pattern, 1255 const MachineInstr &Root, 1256 const MachineInstr &Prev) const; 1257 1258 /// The limit on resource length extension we accept in MachineCombiner Pass. 1259 virtual int getExtendResourceLenLimit() const { return 0; } 1260 1261 /// This is an architecture-specific helper function of reassociateOps. 1262 /// Set special operand attributes for new instructions after reassociation. 1263 virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, 1264 MachineInstr &NewMI1, 1265 MachineInstr &NewMI2) const {} 1266 1267 /// Return true when a target supports MachineCombiner. 1268 virtual bool useMachineCombiner() const { return false; } 1269 1270 /// Return a strategy that MachineCombiner must use when creating traces. 1271 virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const; 1272 1273 /// Return true if the given SDNode can be copied during scheduling 1274 /// even if it has glue. 1275 virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; } 1276 1277 protected: 1278 /// Target-dependent implementation for foldMemoryOperand. 1279 /// Target-independent code in foldMemoryOperand will 1280 /// take care of adding a MachineMemOperand to the newly created instruction. 1281 /// The instruction and any auxiliary instructions necessary will be inserted 1282 /// at InsertPt. 1283 virtual MachineInstr * 1284 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, 1285 ArrayRef<unsigned> Ops, 1286 MachineBasicBlock::iterator InsertPt, int FrameIndex, 1287 LiveIntervals *LIS = nullptr, 1288 VirtRegMap *VRM = nullptr) const { 1289 return nullptr; 1290 } 1291 1292 /// Target-dependent implementation for foldMemoryOperand. 1293 /// Target-independent code in foldMemoryOperand will 1294 /// take care of adding a MachineMemOperand to the newly created instruction. 1295 /// The instruction and any auxiliary instructions necessary will be inserted 1296 /// at InsertPt. 1297 virtual MachineInstr *foldMemoryOperandImpl( 1298 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 1299 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, 1300 LiveIntervals *LIS = nullptr) const { 1301 return nullptr; 1302 } 1303 1304 /// Target-dependent implementation of getRegSequenceInputs. 1305 /// 1306 /// \returns true if it is possible to build the equivalent 1307 /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise. 1308 /// 1309 /// \pre MI.isRegSequenceLike(). 1310 /// 1311 /// \see TargetInstrInfo::getRegSequenceInputs. 1312 virtual bool getRegSequenceLikeInputs( 1313 const MachineInstr &MI, unsigned DefIdx, 1314 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const { 1315 return false; 1316 } 1317 1318 /// Target-dependent implementation of getExtractSubregInputs. 1319 /// 1320 /// \returns true if it is possible to build the equivalent 1321 /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. 1322 /// 1323 /// \pre MI.isExtractSubregLike(). 1324 /// 1325 /// \see TargetInstrInfo::getExtractSubregInputs. 1326 virtual bool getExtractSubregLikeInputs(const MachineInstr &MI, 1327 unsigned DefIdx, 1328 RegSubRegPairAndIdx &InputReg) const { 1329 return false; 1330 } 1331 1332 /// Target-dependent implementation of getInsertSubregInputs. 1333 /// 1334 /// \returns true if it is possible to build the equivalent 1335 /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise. 1336 /// 1337 /// \pre MI.isInsertSubregLike(). 1338 /// 1339 /// \see TargetInstrInfo::getInsertSubregInputs. 1340 virtual bool 1341 getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, 1342 RegSubRegPair &BaseReg, 1343 RegSubRegPairAndIdx &InsertedReg) const { 1344 return false; 1345 } 1346 1347 public: 1348 /// unfoldMemoryOperand - Separate a single instruction which folded a load or 1349 /// a store or a load and a store into two or more instruction. If this is 1350 /// possible, returns true as well as the new instructions by reference. 1351 virtual bool 1352 unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg, 1353 bool UnfoldLoad, bool UnfoldStore, 1354 SmallVectorImpl<MachineInstr *> &NewMIs) const { 1355 return false; 1356 } 1357 1358 virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, 1359 SmallVectorImpl<SDNode *> &NewNodes) const { 1360 return false; 1361 } 1362 1363 /// Returns the opcode of the would be new 1364 /// instruction after load / store are unfolded from an instruction of the 1365 /// specified opcode. It returns zero if the specified unfolding is not 1366 /// possible. If LoadRegIndex is non-null, it is filled in with the operand 1367 /// index of the operand which will hold the register holding the loaded 1368 /// value. 1369 virtual unsigned 1370 getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, 1371 unsigned *LoadRegIndex = nullptr) const { 1372 return 0; 1373 } 1374 1375 /// This is used by the pre-regalloc scheduler to determine if two loads are 1376 /// loading from the same base address. It should only return true if the base 1377 /// pointers are the same and the only differences between the two addresses 1378 /// are the offset. It also returns the offsets by reference. 1379 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 1380 int64_t &Offset1, 1381 int64_t &Offset2) const { 1382 return false; 1383 } 1384 1385 /// This is a used by the pre-regalloc scheduler to determine (in conjunction 1386 /// with areLoadsFromSameBasePtr) if two loads should be scheduled together. 1387 /// On some targets if two loads are loading from 1388 /// addresses in the same cache line, it's better if they are scheduled 1389 /// together. This function takes two integers that represent the load offsets 1390 /// from the common base address. It returns true if it decides it's desirable 1391 /// to schedule the two loads together. "NumLoads" is the number of loads that 1392 /// have already been scheduled after Load1. 1393 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 1394 int64_t Offset1, int64_t Offset2, 1395 unsigned NumLoads) const { 1396 return false; 1397 } 1398 1399 /// Get the base operand and byte offset of an instruction that reads/writes 1400 /// memory. This is a convenience function for callers that are only prepared 1401 /// to handle a single base operand. 1402 bool getMemOperandWithOffset(const MachineInstr &MI, 1403 const MachineOperand *&BaseOp, int64_t &Offset, 1404 bool &OffsetIsScalable, 1405 const TargetRegisterInfo *TRI) const; 1406 1407 /// Get zero or more base operands and the byte offset of an instruction that 1408 /// reads/writes memory. Note that there may be zero base operands if the 1409 /// instruction accesses a constant address. 1410 /// It returns false if MI does not read/write memory. 1411 /// It returns false if base operands and offset could not be determined. 1412 /// It is not guaranteed to always recognize base operands and offsets in all 1413 /// cases. 1414 virtual bool getMemOperandsWithOffsetWidth( 1415 const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps, 1416 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 1417 const TargetRegisterInfo *TRI) const { 1418 return false; 1419 } 1420 1421 /// Return true if the instruction contains a base register and offset. If 1422 /// true, the function also sets the operand position in the instruction 1423 /// for the base register and offset. 1424 virtual bool getBaseAndOffsetPosition(const MachineInstr &MI, 1425 unsigned &BasePos, 1426 unsigned &OffsetPos) const { 1427 return false; 1428 } 1429 1430 /// Target dependent implementation to get the values constituting the address 1431 /// MachineInstr that is accessing memory. These values are returned as a 1432 /// struct ExtAddrMode which contains all relevant information to make up the 1433 /// address. 1434 virtual std::optional<ExtAddrMode> 1435 getAddrModeFromMemoryOp(const MachineInstr &MemI, 1436 const TargetRegisterInfo *TRI) const { 1437 return std::nullopt; 1438 } 1439 1440 /// Returns true if MI's Def is NullValueReg, and the MI 1441 /// does not change the Zero value. i.e. cases such as rax = shr rax, X where 1442 /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this 1443 /// function can return true even if becomes zero. Specifically cases such as 1444 /// NullValueReg = shl NullValueReg, 63. 1445 virtual bool preservesZeroValueInReg(const MachineInstr *MI, 1446 const Register NullValueReg, 1447 const TargetRegisterInfo *TRI) const { 1448 return false; 1449 } 1450 1451 /// If the instruction is an increment of a constant value, return the amount. 1452 virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const { 1453 return false; 1454 } 1455 1456 /// Returns true if the two given memory operations should be scheduled 1457 /// adjacent. Note that you have to add: 1458 /// DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI)); 1459 /// or 1460 /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); 1461 /// to TargetPassConfig::createMachineScheduler() to have an effect. 1462 /// 1463 /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations. 1464 /// \p NumLoads is the number of loads that will be in the cluster if this 1465 /// hook returns true. 1466 /// \p NumBytes is the number of bytes that will be loaded from all the 1467 /// clustered loads if this hook returns true. 1468 virtual bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 1469 ArrayRef<const MachineOperand *> BaseOps2, 1470 unsigned NumLoads, unsigned NumBytes) const { 1471 llvm_unreachable("target did not implement shouldClusterMemOps()"); 1472 } 1473 1474 /// Reverses the branch condition of the specified condition list, 1475 /// returning false on success and true if it cannot be reversed. 1476 virtual bool 1477 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 1478 return true; 1479 } 1480 1481 /// Insert a noop into the instruction stream at the specified point. 1482 virtual void insertNoop(MachineBasicBlock &MBB, 1483 MachineBasicBlock::iterator MI) const; 1484 1485 /// Insert noops into the instruction stream at the specified point. 1486 virtual void insertNoops(MachineBasicBlock &MBB, 1487 MachineBasicBlock::iterator MI, 1488 unsigned Quantity) const; 1489 1490 /// Return the noop instruction to use for a noop. 1491 virtual MCInst getNop() const; 1492 1493 /// Return true for post-incremented instructions. 1494 virtual bool isPostIncrement(const MachineInstr &MI) const { return false; } 1495 1496 /// Returns true if the instruction is already predicated. 1497 virtual bool isPredicated(const MachineInstr &MI) const { return false; } 1498 1499 /// Assumes the instruction is already predicated and returns true if the 1500 /// instruction can be predicated again. 1501 virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const { 1502 assert(isPredicated(MI) && "Instruction is not predicated"); 1503 return false; 1504 } 1505 1506 // Returns a MIRPrinter comment for this machine operand. 1507 virtual std::string 1508 createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, 1509 unsigned OpIdx, const TargetRegisterInfo *TRI) const; 1510 1511 /// Returns true if the instruction is a 1512 /// terminator instruction that has not been predicated. 1513 bool isUnpredicatedTerminator(const MachineInstr &MI) const; 1514 1515 /// Returns true if MI is an unconditional tail call. 1516 virtual bool isUnconditionalTailCall(const MachineInstr &MI) const { 1517 return false; 1518 } 1519 1520 /// Returns true if the tail call can be made conditional on BranchCond. 1521 virtual bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond, 1522 const MachineInstr &TailCall) const { 1523 return false; 1524 } 1525 1526 /// Replace the conditional branch in MBB with a conditional tail call. 1527 virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB, 1528 SmallVectorImpl<MachineOperand> &Cond, 1529 const MachineInstr &TailCall) const { 1530 llvm_unreachable("Target didn't implement replaceBranchWithTailCall!"); 1531 } 1532 1533 /// Convert the instruction into a predicated instruction. 1534 /// It returns true if the operation was successful. 1535 virtual bool PredicateInstruction(MachineInstr &MI, 1536 ArrayRef<MachineOperand> Pred) const; 1537 1538 /// Returns true if the first specified predicate 1539 /// subsumes the second, e.g. GE subsumes GT. 1540 virtual bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1, 1541 ArrayRef<MachineOperand> Pred2) const { 1542 return false; 1543 } 1544 1545 /// If the specified instruction defines any predicate 1546 /// or condition code register(s) used for predication, returns true as well 1547 /// as the definition predicate(s) by reference. 1548 /// SkipDead should be set to false at any point that dead 1549 /// predicate instructions should be considered as being defined. 1550 /// A dead predicate instruction is one that is guaranteed to be removed 1551 /// after a call to PredicateInstruction. 1552 virtual bool ClobbersPredicate(MachineInstr &MI, 1553 std::vector<MachineOperand> &Pred, 1554 bool SkipDead) const { 1555 return false; 1556 } 1557 1558 /// Return true if the specified instruction can be predicated. 1559 /// By default, this returns true for every instruction with a 1560 /// PredicateOperand. 1561 virtual bool isPredicable(const MachineInstr &MI) const { 1562 return MI.getDesc().isPredicable(); 1563 } 1564 1565 /// Return true if it's safe to move a machine 1566 /// instruction that defines the specified register class. 1567 virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const { 1568 return true; 1569 } 1570 1571 /// Test if the given instruction should be considered a scheduling boundary. 1572 /// This primarily includes labels and terminators. 1573 virtual bool isSchedulingBoundary(const MachineInstr &MI, 1574 const MachineBasicBlock *MBB, 1575 const MachineFunction &MF) const; 1576 1577 /// Measure the specified inline asm to determine an approximation of its 1578 /// length. 1579 virtual unsigned getInlineAsmLength( 1580 const char *Str, const MCAsmInfo &MAI, 1581 const TargetSubtargetInfo *STI = nullptr) const; 1582 1583 /// Allocate and return a hazard recognizer to use for this target when 1584 /// scheduling the machine instructions before register allocation. 1585 virtual ScheduleHazardRecognizer * 1586 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, 1587 const ScheduleDAG *DAG) const; 1588 1589 /// Allocate and return a hazard recognizer to use for this target when 1590 /// scheduling the machine instructions before register allocation. 1591 virtual ScheduleHazardRecognizer * 1592 CreateTargetMIHazardRecognizer(const InstrItineraryData *, 1593 const ScheduleDAGMI *DAG) const; 1594 1595 /// Allocate and return a hazard recognizer to use for this target when 1596 /// scheduling the machine instructions after register allocation. 1597 virtual ScheduleHazardRecognizer * 1598 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, 1599 const ScheduleDAG *DAG) const; 1600 1601 /// Allocate and return a hazard recognizer to use for by non-scheduling 1602 /// passes. 1603 virtual ScheduleHazardRecognizer * 1604 CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 1605 return nullptr; 1606 } 1607 1608 /// Provide a global flag for disabling the PreRA hazard recognizer that 1609 /// targets may choose to honor. 1610 bool usePreRAHazardRecognizer() const; 1611 1612 /// For a comparison instruction, return the source registers 1613 /// in SrcReg and SrcReg2 if having two register operands, and the value it 1614 /// compares against in CmpValue. Return true if the comparison instruction 1615 /// can be analyzed. 1616 virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, 1617 Register &SrcReg2, int64_t &Mask, 1618 int64_t &Value) const { 1619 return false; 1620 } 1621 1622 /// See if the comparison instruction can be converted 1623 /// into something more efficient. E.g., on ARM most instructions can set the 1624 /// flags register, obviating the need for a separate CMP. 1625 virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, 1626 Register SrcReg2, int64_t Mask, 1627 int64_t Value, 1628 const MachineRegisterInfo *MRI) const { 1629 return false; 1630 } 1631 virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; } 1632 1633 /// Try to remove the load by folding it to a register operand at the use. 1634 /// We fold the load instructions if and only if the 1635 /// def and use are in the same BB. We only look at one load and see 1636 /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register 1637 /// defined by the load we are trying to fold. DefMI returns the machine 1638 /// instruction that defines FoldAsLoadDefReg, and the function returns 1639 /// the machine instruction generated due to folding. 1640 virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI, 1641 const MachineRegisterInfo *MRI, 1642 Register &FoldAsLoadDefReg, 1643 MachineInstr *&DefMI) const { 1644 return nullptr; 1645 } 1646 1647 /// 'Reg' is known to be defined by a move immediate instruction, 1648 /// try to fold the immediate into the use instruction. 1649 /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true, 1650 /// then the caller may assume that DefMI has been erased from its parent 1651 /// block. The caller may assume that it will not be erased by this 1652 /// function otherwise. 1653 virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 1654 Register Reg, MachineRegisterInfo *MRI) const { 1655 return false; 1656 } 1657 1658 /// Return the number of u-operations the given machine 1659 /// instruction will be decoded to on the target cpu. The itinerary's 1660 /// IssueWidth is the number of microops that can be dispatched each 1661 /// cycle. An instruction with zero microops takes no dispatch resources. 1662 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, 1663 const MachineInstr &MI) const; 1664 1665 /// Return true for pseudo instructions that don't consume any 1666 /// machine resources in their current form. These are common cases that the 1667 /// scheduler should consider free, rather than conservatively handling them 1668 /// as instructions with no itinerary. 1669 bool isZeroCost(unsigned Opcode) const { 1670 return Opcode <= TargetOpcode::COPY; 1671 } 1672 1673 virtual int getOperandLatency(const InstrItineraryData *ItinData, 1674 SDNode *DefNode, unsigned DefIdx, 1675 SDNode *UseNode, unsigned UseIdx) const; 1676 1677 /// Compute and return the use operand latency of a given pair of def and use. 1678 /// In most cases, the static scheduling itinerary was enough to determine the 1679 /// operand latency. But it may not be possible for instructions with variable 1680 /// number of defs / uses. 1681 /// 1682 /// This is a raw interface to the itinerary that may be directly overridden 1683 /// by a target. Use computeOperandLatency to get the best estimate of 1684 /// latency. 1685 virtual int getOperandLatency(const InstrItineraryData *ItinData, 1686 const MachineInstr &DefMI, unsigned DefIdx, 1687 const MachineInstr &UseMI, 1688 unsigned UseIdx) const; 1689 1690 /// Compute the instruction latency of a given instruction. 1691 /// If the instruction has higher cost when predicated, it's returned via 1692 /// PredCost. 1693 virtual unsigned getInstrLatency(const InstrItineraryData *ItinData, 1694 const MachineInstr &MI, 1695 unsigned *PredCost = nullptr) const; 1696 1697 virtual unsigned getPredicationCost(const MachineInstr &MI) const; 1698 1699 virtual int getInstrLatency(const InstrItineraryData *ItinData, 1700 SDNode *Node) const; 1701 1702 /// Return the default expected latency for a def based on its opcode. 1703 unsigned defaultDefLatency(const MCSchedModel &SchedModel, 1704 const MachineInstr &DefMI) const; 1705 1706 /// Return true if this opcode has high latency to its result. 1707 virtual bool isHighLatencyDef(int opc) const { return false; } 1708 1709 /// Compute operand latency between a def of 'Reg' 1710 /// and a use in the current loop. Return true if the target considered 1711 /// it 'high'. This is used by optimization passes such as machine LICM to 1712 /// determine whether it makes sense to hoist an instruction out even in a 1713 /// high register pressure situation. 1714 virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel, 1715 const MachineRegisterInfo *MRI, 1716 const MachineInstr &DefMI, unsigned DefIdx, 1717 const MachineInstr &UseMI, 1718 unsigned UseIdx) const { 1719 return false; 1720 } 1721 1722 /// Compute operand latency of a def of 'Reg'. Return true 1723 /// if the target considered it 'low'. 1724 virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel, 1725 const MachineInstr &DefMI, 1726 unsigned DefIdx) const; 1727 1728 /// Perform target-specific instruction verification. 1729 virtual bool verifyInstruction(const MachineInstr &MI, 1730 StringRef &ErrInfo) const { 1731 return true; 1732 } 1733 1734 /// Return the current execution domain and bit mask of 1735 /// possible domains for instruction. 1736 /// 1737 /// Some micro-architectures have multiple execution domains, and multiple 1738 /// opcodes that perform the same operation in different domains. For 1739 /// example, the x86 architecture provides the por, orps, and orpd 1740 /// instructions that all do the same thing. There is a latency penalty if a 1741 /// register is written in one domain and read in another. 1742 /// 1743 /// This function returns a pair (domain, mask) containing the execution 1744 /// domain of MI, and a bit mask of possible domains. The setExecutionDomain 1745 /// function can be used to change the opcode to one of the domains in the 1746 /// bit mask. Instructions whose execution domain can't be changed should 1747 /// return a 0 mask. 1748 /// 1749 /// The execution domain numbers don't have any special meaning except domain 1750 /// 0 is used for instructions that are not associated with any interesting 1751 /// execution domain. 1752 /// 1753 virtual std::pair<uint16_t, uint16_t> 1754 getExecutionDomain(const MachineInstr &MI) const { 1755 return std::make_pair(0, 0); 1756 } 1757 1758 /// Change the opcode of MI to execute in Domain. 1759 /// 1760 /// The bit (1 << Domain) must be set in the mask returned from 1761 /// getExecutionDomain(MI). 1762 virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {} 1763 1764 /// Returns the preferred minimum clearance 1765 /// before an instruction with an unwanted partial register update. 1766 /// 1767 /// Some instructions only write part of a register, and implicitly need to 1768 /// read the other parts of the register. This may cause unwanted stalls 1769 /// preventing otherwise unrelated instructions from executing in parallel in 1770 /// an out-of-order CPU. 1771 /// 1772 /// For example, the x86 instruction cvtsi2ss writes its result to bits 1773 /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so 1774 /// the instruction needs to wait for the old value of the register to become 1775 /// available: 1776 /// 1777 /// addps %xmm1, %xmm0 1778 /// movaps %xmm0, (%rax) 1779 /// cvtsi2ss %rbx, %xmm0 1780 /// 1781 /// In the code above, the cvtsi2ss instruction needs to wait for the addps 1782 /// instruction before it can issue, even though the high bits of %xmm0 1783 /// probably aren't needed. 1784 /// 1785 /// This hook returns the preferred clearance before MI, measured in 1786 /// instructions. Other defs of MI's operand OpNum are avoided in the last N 1787 /// instructions before MI. It should only return a positive value for 1788 /// unwanted dependencies. If the old bits of the defined register have 1789 /// useful values, or if MI is determined to otherwise read the dependency, 1790 /// the hook should return 0. 1791 /// 1792 /// The unwanted dependency may be handled by: 1793 /// 1794 /// 1. Allocating the same register for an MI def and use. That makes the 1795 /// unwanted dependency identical to a required dependency. 1796 /// 1797 /// 2. Allocating a register for the def that has no defs in the previous N 1798 /// instructions. 1799 /// 1800 /// 3. Calling breakPartialRegDependency() with the same arguments. This 1801 /// allows the target to insert a dependency breaking instruction. 1802 /// 1803 virtual unsigned 1804 getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum, 1805 const TargetRegisterInfo *TRI) const { 1806 // The default implementation returns 0 for no partial register dependency. 1807 return 0; 1808 } 1809 1810 /// Return the minimum clearance before an instruction that reads an 1811 /// unused register. 1812 /// 1813 /// For example, AVX instructions may copy part of a register operand into 1814 /// the unused high bits of the destination register. 1815 /// 1816 /// vcvtsi2sdq %rax, undef %xmm0, %xmm14 1817 /// 1818 /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a 1819 /// false dependence on any previous write to %xmm0. 1820 /// 1821 /// This hook works similarly to getPartialRegUpdateClearance, except that it 1822 /// does not take an operand index. Instead sets \p OpNum to the index of the 1823 /// unused register. 1824 virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum, 1825 const TargetRegisterInfo *TRI) const { 1826 // The default implementation returns 0 for no undef register dependency. 1827 return 0; 1828 } 1829 1830 /// Insert a dependency-breaking instruction 1831 /// before MI to eliminate an unwanted dependency on OpNum. 1832 /// 1833 /// If it wasn't possible to avoid a def in the last N instructions before MI 1834 /// (see getPartialRegUpdateClearance), this hook will be called to break the 1835 /// unwanted dependency. 1836 /// 1837 /// On x86, an xorps instruction can be used as a dependency breaker: 1838 /// 1839 /// addps %xmm1, %xmm0 1840 /// movaps %xmm0, (%rax) 1841 /// xorps %xmm0, %xmm0 1842 /// cvtsi2ss %rbx, %xmm0 1843 /// 1844 /// An <imp-kill> operand should be added to MI if an instruction was 1845 /// inserted. This ties the instructions together in the post-ra scheduler. 1846 /// 1847 virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum, 1848 const TargetRegisterInfo *TRI) const {} 1849 1850 /// Create machine specific model for scheduling. 1851 virtual DFAPacketizer * 1852 CreateTargetScheduleState(const TargetSubtargetInfo &) const { 1853 return nullptr; 1854 } 1855 1856 /// Sometimes, it is possible for the target 1857 /// to tell, even without aliasing information, that two MIs access different 1858 /// memory addresses. This function returns true if two MIs access different 1859 /// memory addresses and false otherwise. 1860 /// 1861 /// Assumes any physical registers used to compute addresses have the same 1862 /// value for both instructions. (This is the most useful assumption for 1863 /// post-RA scheduling.) 1864 /// 1865 /// See also MachineInstr::mayAlias, which is implemented on top of this 1866 /// function. 1867 virtual bool 1868 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 1869 const MachineInstr &MIb) const { 1870 assert(MIa.mayLoadOrStore() && 1871 "MIa must load from or modify a memory location"); 1872 assert(MIb.mayLoadOrStore() && 1873 "MIb must load from or modify a memory location"); 1874 return false; 1875 } 1876 1877 /// Return the value to use for the MachineCSE's LookAheadLimit, 1878 /// which is a heuristic used for CSE'ing phys reg defs. 1879 virtual unsigned getMachineCSELookAheadLimit() const { 1880 // The default lookahead is small to prevent unprofitable quadratic 1881 // behavior. 1882 return 5; 1883 } 1884 1885 /// Return the maximal number of alias checks on memory operands. For 1886 /// instructions with more than one memory operands, the alias check on a 1887 /// single MachineInstr pair has quadratic overhead and results in 1888 /// unacceptable performance in the worst case. The limit here is to clamp 1889 /// that maximal checks performed. Usually, that's the product of memory 1890 /// operand numbers from that pair of MachineInstr to be checked. For 1891 /// instance, with two MachineInstrs with 4 and 5 memory operands 1892 /// correspondingly, a total of 20 checks are required. With this limit set to 1893 /// 16, their alias check is skipped. We choose to limit the product instead 1894 /// of the individual instruction as targets may have special MachineInstrs 1895 /// with a considerably high number of memory operands, such as `ldm` in ARM. 1896 /// Setting this limit per MachineInstr would result in either too high 1897 /// overhead or too rigid restriction. 1898 virtual unsigned getMemOperandAACheckLimit() const { return 16; } 1899 1900 /// Return an array that contains the ids of the target indices (used for the 1901 /// TargetIndex machine operand) and their names. 1902 /// 1903 /// MIR Serialization is able to serialize only the target indices that are 1904 /// defined by this method. 1905 virtual ArrayRef<std::pair<int, const char *>> 1906 getSerializableTargetIndices() const { 1907 return std::nullopt; 1908 } 1909 1910 /// Decompose the machine operand's target flags into two values - the direct 1911 /// target flag value and any of bit flags that are applied. 1912 virtual std::pair<unsigned, unsigned> 1913 decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const { 1914 return std::make_pair(0u, 0u); 1915 } 1916 1917 /// Return an array that contains the direct target flag values and their 1918 /// names. 1919 /// 1920 /// MIR Serialization is able to serialize only the target flags that are 1921 /// defined by this method. 1922 virtual ArrayRef<std::pair<unsigned, const char *>> 1923 getSerializableDirectMachineOperandTargetFlags() const { 1924 return std::nullopt; 1925 } 1926 1927 /// Return an array that contains the bitmask target flag values and their 1928 /// names. 1929 /// 1930 /// MIR Serialization is able to serialize only the target flags that are 1931 /// defined by this method. 1932 virtual ArrayRef<std::pair<unsigned, const char *>> 1933 getSerializableBitmaskMachineOperandTargetFlags() const { 1934 return std::nullopt; 1935 } 1936 1937 /// Return an array that contains the MMO target flag values and their 1938 /// names. 1939 /// 1940 /// MIR Serialization is able to serialize only the MMO target flags that are 1941 /// defined by this method. 1942 virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>> 1943 getSerializableMachineMemOperandTargetFlags() const { 1944 return std::nullopt; 1945 } 1946 1947 /// Determines whether \p Inst is a tail call instruction. Override this 1948 /// method on targets that do not properly set MCID::Return and MCID::Call on 1949 /// tail call instructions." 1950 virtual bool isTailCall(const MachineInstr &Inst) const { 1951 return Inst.isReturn() && Inst.isCall(); 1952 } 1953 1954 /// True if the instruction is bound to the top of its basic block and no 1955 /// other instructions shall be inserted before it. This can be implemented 1956 /// to prevent register allocator to insert spills before such instructions. 1957 virtual bool isBasicBlockPrologue(const MachineInstr &MI) const { 1958 return false; 1959 } 1960 1961 /// During PHI eleimination lets target to make necessary checks and 1962 /// insert the copy to the PHI destination register in a target specific 1963 /// manner. 1964 virtual MachineInstr *createPHIDestinationCopy( 1965 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 1966 const DebugLoc &DL, Register Src, Register Dst) const { 1967 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst) 1968 .addReg(Src); 1969 } 1970 1971 /// During PHI eleimination lets target to make necessary checks and 1972 /// insert the copy to the PHI destination register in a target specific 1973 /// manner. 1974 virtual MachineInstr *createPHISourceCopy(MachineBasicBlock &MBB, 1975 MachineBasicBlock::iterator InsPt, 1976 const DebugLoc &DL, Register Src, 1977 unsigned SrcSubReg, 1978 Register Dst) const { 1979 return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst) 1980 .addReg(Src, 0, SrcSubReg); 1981 } 1982 1983 /// Returns a \p outliner::OutlinedFunction struct containing target-specific 1984 /// information for a set of outlining candidates. Returns std::nullopt if the 1985 /// candidates are not suitable for outlining. 1986 virtual std::optional<outliner::OutlinedFunction> getOutliningCandidateInfo( 1987 std::vector<outliner::Candidate> &RepeatedSequenceLocs) const { 1988 llvm_unreachable( 1989 "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!"); 1990 } 1991 1992 /// Optional target hook to create the LLVM IR attributes for the outlined 1993 /// function. If overridden, the overriding function must call the default 1994 /// implementation. 1995 virtual void mergeOutliningCandidateAttributes( 1996 Function &F, std::vector<outliner::Candidate> &Candidates) const; 1997 1998 protected: 1999 /// Target-dependent implementation for getOutliningTypeImpl. 2000 virtual outliner::InstrType 2001 getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const { 2002 llvm_unreachable( 2003 "Target didn't implement TargetInstrInfo::getOutliningTypeImpl!"); 2004 } 2005 2006 public: 2007 /// Returns how or if \p MIT should be outlined. \p Flags is the 2008 /// target-specific information returned by isMBBSafeToOutlineFrom. 2009 outliner::InstrType 2010 getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const; 2011 2012 /// Optional target hook that returns true if \p MBB is safe to outline from, 2013 /// and returns any target-specific information in \p Flags. 2014 virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, 2015 unsigned &Flags) const; 2016 2017 /// Optional target hook which partitions \p MBB into outlinable ranges for 2018 /// instruction mapping purposes. Each range is defined by two iterators: 2019 /// [start, end). 2020 /// 2021 /// Ranges are expected to be ordered top-down. That is, ranges closer to the 2022 /// top of the block should come before ranges closer to the end of the block. 2023 /// 2024 /// Ranges cannot overlap. 2025 /// 2026 /// If an entire block is mappable, then its range is [MBB.begin(), MBB.end()) 2027 /// 2028 /// All instructions not present in an outlinable range are considered 2029 /// illegal. 2030 virtual SmallVector< 2031 std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>> 2032 getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const { 2033 return {std::make_pair(MBB.begin(), MBB.end())}; 2034 } 2035 2036 /// Insert a custom frame for outlined functions. 2037 virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, 2038 const outliner::OutlinedFunction &OF) const { 2039 llvm_unreachable( 2040 "Target didn't implement TargetInstrInfo::buildOutlinedFrame!"); 2041 } 2042 2043 /// Insert a call to an outlined function into the program. 2044 /// Returns an iterator to the spot where we inserted the call. This must be 2045 /// implemented by the target. 2046 virtual MachineBasicBlock::iterator 2047 insertOutlinedCall(Module &M, MachineBasicBlock &MBB, 2048 MachineBasicBlock::iterator &It, MachineFunction &MF, 2049 outliner::Candidate &C) const { 2050 llvm_unreachable( 2051 "Target didn't implement TargetInstrInfo::insertOutlinedCall!"); 2052 } 2053 2054 /// Return true if the function can safely be outlined from. 2055 /// A function \p MF is considered safe for outlining if an outlined function 2056 /// produced from instructions in F will produce a program which produces the 2057 /// same output for any set of given inputs. 2058 virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF, 2059 bool OutlineFromLinkOnceODRs) const { 2060 llvm_unreachable("Target didn't implement " 2061 "TargetInstrInfo::isFunctionSafeToOutlineFrom!"); 2062 } 2063 2064 /// Return true if the function should be outlined from by default. 2065 virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const { 2066 return false; 2067 } 2068 2069 /// Produce the expression describing the \p MI loading a value into 2070 /// the physical register \p Reg. This hook should only be used with 2071 /// \p MIs belonging to VReg-less functions. 2072 virtual std::optional<ParamLoadedValue> 2073 describeLoadedValue(const MachineInstr &MI, Register Reg) const; 2074 2075 /// Given the generic extension instruction \p ExtMI, returns true if this 2076 /// extension is a likely candidate for being folded into an another 2077 /// instruction. 2078 virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI, 2079 MachineRegisterInfo &MRI) const { 2080 return false; 2081 } 2082 2083 /// Return MIR formatter to format/parse MIR operands. Target can override 2084 /// this virtual function and return target specific MIR formatter. 2085 virtual const MIRFormatter *getMIRFormatter() const { 2086 if (!Formatter.get()) 2087 Formatter = std::make_unique<MIRFormatter>(); 2088 return Formatter.get(); 2089 } 2090 2091 /// Returns the target-specific default value for tail duplication. 2092 /// This value will be used if the tail-dup-placement-threshold argument is 2093 /// not provided. 2094 virtual unsigned getTailDuplicateSize(CodeGenOpt::Level OptLevel) const { 2095 return OptLevel >= CodeGenOpt::Aggressive ? 4 : 2; 2096 } 2097 2098 /// Returns the callee operand from the given \p MI. 2099 virtual const MachineOperand &getCalleeOperand(const MachineInstr &MI) const { 2100 return MI.getOperand(0); 2101 } 2102 2103 /// Return the uniformity behavior of the given instruction. 2104 virtual InstructionUniformity 2105 getInstructionUniformity(const MachineInstr &MI) const { 2106 return InstructionUniformity::Default; 2107 } 2108 2109 /// Returns true if the given \p MI defines a TargetIndex operand that can be 2110 /// tracked by their offset, can have values, and can have debug info 2111 /// associated with it. If so, sets \p Index and \p Offset of the target index 2112 /// operand. 2113 virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index, 2114 int64_t &Offset) const { 2115 return false; 2116 } 2117 2118 private: 2119 mutable std::unique_ptr<MIRFormatter> Formatter; 2120 unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode; 2121 unsigned CatchRetOpcode; 2122 unsigned ReturnOpcode; 2123 }; 2124 2125 /// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair. 2126 template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> { 2127 using RegInfo = DenseMapInfo<unsigned>; 2128 2129 static inline TargetInstrInfo::RegSubRegPair getEmptyKey() { 2130 return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(), 2131 RegInfo::getEmptyKey()); 2132 } 2133 2134 static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() { 2135 return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(), 2136 RegInfo::getTombstoneKey()); 2137 } 2138 2139 /// Reuse getHashValue implementation from 2140 /// std::pair<unsigned, unsigned>. 2141 static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) { 2142 std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg); 2143 return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal); 2144 } 2145 2146 static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS, 2147 const TargetInstrInfo::RegSubRegPair &RHS) { 2148 return RegInfo::isEqual(LHS.Reg, RHS.Reg) && 2149 RegInfo::isEqual(LHS.SubReg, RHS.SubReg); 2150 } 2151 }; 2152 2153 } // end namespace llvm 2154 2155 #endif // LLVM_CODEGEN_TARGETINSTRINFO_H 2156