1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/TargetInstrInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/BinaryFormat/Dwarf.h"
16 #include "llvm/CodeGen/MachineCombinerPattern.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/MachineScheduler.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
24 #include "llvm/CodeGen/StackMaps.h"
25 #include "llvm/CodeGen/TargetFrameLowering.h"
26 #include "llvm/CodeGen/TargetLowering.h"
27 #include "llvm/CodeGen/TargetRegisterInfo.h"
28 #include "llvm/CodeGen/TargetSchedule.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DebugInfoMetadata.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/MC/MCInstrItineraries.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/raw_ostream.h"
36
37 using namespace llvm;
38
39 static cl::opt<bool> DisableHazardRecognizer(
40 "disable-sched-hazard", cl::Hidden, cl::init(false),
41 cl::desc("Disable hazard detection during preRA scheduling"));
42
43 TargetInstrInfo::~TargetInstrInfo() = default;
44
45 const TargetRegisterClass*
getRegClass(const MCInstrDesc & MCID,unsigned OpNum,const TargetRegisterInfo * TRI,const MachineFunction & MF) const46 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
47 const TargetRegisterInfo *TRI,
48 const MachineFunction &MF) const {
49 if (OpNum >= MCID.getNumOperands())
50 return nullptr;
51
52 short RegClass = MCID.operands()[OpNum].RegClass;
53 if (MCID.operands()[OpNum].isLookupPtrRegClass())
54 return TRI->getPointerRegClass(MF, RegClass);
55
56 // Instructions like INSERT_SUBREG do not have fixed register classes.
57 if (RegClass < 0)
58 return nullptr;
59
60 // Otherwise just look it up normally.
61 return TRI->getRegClass(RegClass);
62 }
63
64 /// insertNoop - Insert a noop into the instruction stream at the specified
65 /// point.
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const66 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
67 MachineBasicBlock::iterator MI) const {
68 llvm_unreachable("Target didn't implement insertNoop!");
69 }
70
71 /// insertNoops - Insert noops into the instruction stream at the specified
72 /// point.
insertNoops(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned Quantity) const73 void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB,
74 MachineBasicBlock::iterator MI,
75 unsigned Quantity) const {
76 for (unsigned i = 0; i < Quantity; ++i)
77 insertNoop(MBB, MI);
78 }
79
isAsmComment(const char * Str,const MCAsmInfo & MAI)80 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
81 return strncmp(Str, MAI.getCommentString().data(),
82 MAI.getCommentString().size()) == 0;
83 }
84
85 /// Measure the specified inline asm to determine an approximation of its
86 /// length.
87 /// Comments (which run till the next SeparatorString or newline) do not
88 /// count as an instruction.
89 /// Any other non-whitespace text is considered an instruction, with
90 /// multiple instructions separated by SeparatorString or newlines.
91 /// Variable-length instructions are not handled here; this function
92 /// may be overloaded in the target code to do that.
93 /// We implement a special case of the .space directive which takes only a
94 /// single integer argument in base 10 that is the size in bytes. This is a
95 /// restricted form of the GAS directive in that we only interpret
96 /// simple--i.e. not a logical or arithmetic expression--size values without
97 /// the optional fill value. This is primarily used for creating arbitrary
98 /// sized inline asm blocks for testing purposes.
getInlineAsmLength(const char * Str,const MCAsmInfo & MAI,const TargetSubtargetInfo * STI) const99 unsigned TargetInstrInfo::getInlineAsmLength(
100 const char *Str,
101 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
102 // Count the number of instructions in the asm.
103 bool AtInsnStart = true;
104 unsigned Length = 0;
105 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
106 for (; *Str; ++Str) {
107 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
108 strlen(MAI.getSeparatorString())) == 0) {
109 AtInsnStart = true;
110 } else if (isAsmComment(Str, MAI)) {
111 // Stop counting as an instruction after a comment until the next
112 // separator.
113 AtInsnStart = false;
114 }
115
116 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
117 unsigned AddLength = MaxInstLength;
118 if (strncmp(Str, ".space", 6) == 0) {
119 char *EStr;
120 int SpaceSize;
121 SpaceSize = strtol(Str + 6, &EStr, 10);
122 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
123 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
124 ++EStr;
125 if (*EStr == '\0' || *EStr == '\n' ||
126 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
127 AddLength = SpaceSize;
128 }
129 Length += AddLength;
130 AtInsnStart = false;
131 }
132 }
133
134 return Length;
135 }
136
137 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
138 /// after it, replacing it with an unconditional branch to NewDest.
139 void
ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,MachineBasicBlock * NewDest) const140 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
141 MachineBasicBlock *NewDest) const {
142 MachineBasicBlock *MBB = Tail->getParent();
143
144 // Remove all the old successors of MBB from the CFG.
145 while (!MBB->succ_empty())
146 MBB->removeSuccessor(MBB->succ_begin());
147
148 // Save off the debug loc before erasing the instruction.
149 DebugLoc DL = Tail->getDebugLoc();
150
151 // Update call site info and remove all the dead instructions
152 // from the end of MBB.
153 while (Tail != MBB->end()) {
154 auto MI = Tail++;
155 if (MI->shouldUpdateCallSiteInfo())
156 MBB->getParent()->eraseCallSiteInfo(&*MI);
157 MBB->erase(MI);
158 }
159
160 // If MBB isn't immediately before MBB, insert a branch to it.
161 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
162 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
163 MBB->addSuccessor(NewDest);
164 }
165
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned Idx1,unsigned Idx2) const166 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
167 bool NewMI, unsigned Idx1,
168 unsigned Idx2) const {
169 const MCInstrDesc &MCID = MI.getDesc();
170 bool HasDef = MCID.getNumDefs();
171 if (HasDef && !MI.getOperand(0).isReg())
172 // No idea how to commute this instruction. Target should implement its own.
173 return nullptr;
174
175 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
176 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
177 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
178 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
179 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
180 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
181 "This only knows how to commute register operands so far");
182
183 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
184 Register Reg1 = MI.getOperand(Idx1).getReg();
185 Register Reg2 = MI.getOperand(Idx2).getReg();
186 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
187 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
188 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
189 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
190 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
191 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
192 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
193 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
194 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
195 // Avoid calling isRenamable for virtual registers since we assert that
196 // renamable property is only queried/set for physical registers.
197 bool Reg1IsRenamable =
198 Reg1.isPhysical() ? MI.getOperand(Idx1).isRenamable() : false;
199 bool Reg2IsRenamable =
200 Reg2.isPhysical() ? MI.getOperand(Idx2).isRenamable() : false;
201 // If destination is tied to either of the commuted source register, then
202 // it must be updated.
203 if (HasDef && Reg0 == Reg1 &&
204 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
205 Reg2IsKill = false;
206 Reg0 = Reg2;
207 SubReg0 = SubReg2;
208 } else if (HasDef && Reg0 == Reg2 &&
209 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
210 Reg1IsKill = false;
211 Reg0 = Reg1;
212 SubReg0 = SubReg1;
213 }
214
215 MachineInstr *CommutedMI = nullptr;
216 if (NewMI) {
217 // Create a new instruction.
218 MachineFunction &MF = *MI.getMF();
219 CommutedMI = MF.CloneMachineInstr(&MI);
220 } else {
221 CommutedMI = &MI;
222 }
223
224 if (HasDef) {
225 CommutedMI->getOperand(0).setReg(Reg0);
226 CommutedMI->getOperand(0).setSubReg(SubReg0);
227 }
228 CommutedMI->getOperand(Idx2).setReg(Reg1);
229 CommutedMI->getOperand(Idx1).setReg(Reg2);
230 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
231 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
232 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
233 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
234 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
235 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
236 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
237 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
238 // Avoid calling setIsRenamable for virtual registers since we assert that
239 // renamable property is only queried/set for physical registers.
240 if (Reg1.isPhysical())
241 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
242 if (Reg2.isPhysical())
243 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
244 return CommutedMI;
245 }
246
commuteInstruction(MachineInstr & MI,bool NewMI,unsigned OpIdx1,unsigned OpIdx2) const247 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
248 unsigned OpIdx1,
249 unsigned OpIdx2) const {
250 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
251 // any commutable operand, which is done in findCommutedOpIndices() method
252 // called below.
253 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
254 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
255 assert(MI.isCommutable() &&
256 "Precondition violation: MI must be commutable.");
257 return nullptr;
258 }
259 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
260 }
261
fixCommutedOpIndices(unsigned & ResultIdx1,unsigned & ResultIdx2,unsigned CommutableOpIdx1,unsigned CommutableOpIdx2)262 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
263 unsigned &ResultIdx2,
264 unsigned CommutableOpIdx1,
265 unsigned CommutableOpIdx2) {
266 if (ResultIdx1 == CommuteAnyOperandIndex &&
267 ResultIdx2 == CommuteAnyOperandIndex) {
268 ResultIdx1 = CommutableOpIdx1;
269 ResultIdx2 = CommutableOpIdx2;
270 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
271 if (ResultIdx2 == CommutableOpIdx1)
272 ResultIdx1 = CommutableOpIdx2;
273 else if (ResultIdx2 == CommutableOpIdx2)
274 ResultIdx1 = CommutableOpIdx1;
275 else
276 return false;
277 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
278 if (ResultIdx1 == CommutableOpIdx1)
279 ResultIdx2 = CommutableOpIdx2;
280 else if (ResultIdx1 == CommutableOpIdx2)
281 ResultIdx2 = CommutableOpIdx1;
282 else
283 return false;
284 } else
285 // Check that the result operand indices match the given commutable
286 // operand indices.
287 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
288 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
289
290 return true;
291 }
292
findCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2) const293 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
294 unsigned &SrcOpIdx1,
295 unsigned &SrcOpIdx2) const {
296 assert(!MI.isBundle() &&
297 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
298
299 const MCInstrDesc &MCID = MI.getDesc();
300 if (!MCID.isCommutable())
301 return false;
302
303 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
304 // is not true, then the target must implement this.
305 unsigned CommutableOpIdx1 = MCID.getNumDefs();
306 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
307 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
308 CommutableOpIdx1, CommutableOpIdx2))
309 return false;
310
311 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
312 // No idea.
313 return false;
314 return true;
315 }
316
isUnpredicatedTerminator(const MachineInstr & MI) const317 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
318 if (!MI.isTerminator()) return false;
319
320 // Conditional branch is a special case.
321 if (MI.isBranch() && !MI.isBarrier())
322 return true;
323 if (!MI.isPredicable())
324 return true;
325 return !isPredicated(MI);
326 }
327
PredicateInstruction(MachineInstr & MI,ArrayRef<MachineOperand> Pred) const328 bool TargetInstrInfo::PredicateInstruction(
329 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
330 bool MadeChange = false;
331
332 assert(!MI.isBundle() &&
333 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
334
335 const MCInstrDesc &MCID = MI.getDesc();
336 if (!MI.isPredicable())
337 return false;
338
339 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
340 if (MCID.operands()[i].isPredicate()) {
341 MachineOperand &MO = MI.getOperand(i);
342 if (MO.isReg()) {
343 MO.setReg(Pred[j].getReg());
344 MadeChange = true;
345 } else if (MO.isImm()) {
346 MO.setImm(Pred[j].getImm());
347 MadeChange = true;
348 } else if (MO.isMBB()) {
349 MO.setMBB(Pred[j].getMBB());
350 MadeChange = true;
351 }
352 ++j;
353 }
354 }
355 return MadeChange;
356 }
357
hasLoadFromStackSlot(const MachineInstr & MI,SmallVectorImpl<const MachineMemOperand * > & Accesses) const358 bool TargetInstrInfo::hasLoadFromStackSlot(
359 const MachineInstr &MI,
360 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
361 size_t StartSize = Accesses.size();
362 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
363 oe = MI.memoperands_end();
364 o != oe; ++o) {
365 if ((*o)->isLoad() &&
366 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
367 Accesses.push_back(*o);
368 }
369 return Accesses.size() != StartSize;
370 }
371
hasStoreToStackSlot(const MachineInstr & MI,SmallVectorImpl<const MachineMemOperand * > & Accesses) const372 bool TargetInstrInfo::hasStoreToStackSlot(
373 const MachineInstr &MI,
374 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
375 size_t StartSize = Accesses.size();
376 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
377 oe = MI.memoperands_end();
378 o != oe; ++o) {
379 if ((*o)->isStore() &&
380 isa_and_nonnull<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
381 Accesses.push_back(*o);
382 }
383 return Accesses.size() != StartSize;
384 }
385
getStackSlotRange(const TargetRegisterClass * RC,unsigned SubIdx,unsigned & Size,unsigned & Offset,const MachineFunction & MF) const386 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
387 unsigned SubIdx, unsigned &Size,
388 unsigned &Offset,
389 const MachineFunction &MF) const {
390 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
391 if (!SubIdx) {
392 Size = TRI->getSpillSize(*RC);
393 Offset = 0;
394 return true;
395 }
396 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
397 // Convert bit size to byte size.
398 if (BitSize % 8)
399 return false;
400
401 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
402 if (BitOffset < 0 || BitOffset % 8)
403 return false;
404
405 Size = BitSize / 8;
406 Offset = (unsigned)BitOffset / 8;
407
408 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
409
410 if (!MF.getDataLayout().isLittleEndian()) {
411 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
412 }
413 return true;
414 }
415
reMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,unsigned SubIdx,const MachineInstr & Orig,const TargetRegisterInfo & TRI) const416 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
417 MachineBasicBlock::iterator I,
418 Register DestReg, unsigned SubIdx,
419 const MachineInstr &Orig,
420 const TargetRegisterInfo &TRI) const {
421 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
422 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
423 MBB.insert(I, MI);
424 }
425
produceSameValue(const MachineInstr & MI0,const MachineInstr & MI1,const MachineRegisterInfo * MRI) const426 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
427 const MachineInstr &MI1,
428 const MachineRegisterInfo *MRI) const {
429 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
430 }
431
duplicate(MachineBasicBlock & MBB,MachineBasicBlock::iterator InsertBefore,const MachineInstr & Orig) const432 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
433 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
434 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
435 MachineFunction &MF = *MBB.getParent();
436 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
437 }
438
439 // If the COPY instruction in MI can be folded to a stack operation, return
440 // the register class to use.
canFoldCopy(const MachineInstr & MI,unsigned FoldIdx)441 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
442 unsigned FoldIdx) {
443 assert(MI.isCopy() && "MI must be a COPY instruction");
444 if (MI.getNumOperands() != 2)
445 return nullptr;
446 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
447
448 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
449 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
450
451 if (FoldOp.getSubReg() || LiveOp.getSubReg())
452 return nullptr;
453
454 Register FoldReg = FoldOp.getReg();
455 Register LiveReg = LiveOp.getReg();
456
457 assert(FoldReg.isVirtual() && "Cannot fold physregs");
458
459 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
460 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
461
462 if (LiveOp.getReg().isPhysical())
463 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
464
465 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
466 return RC;
467
468 // FIXME: Allow folding when register classes are memory compatible.
469 return nullptr;
470 }
471
getNop() const472 MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
473
474 std::pair<unsigned, unsigned>
getPatchpointUnfoldableRange(const MachineInstr & MI) const475 TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const {
476 switch (MI.getOpcode()) {
477 case TargetOpcode::STACKMAP:
478 // StackMapLiveValues are foldable
479 return std::make_pair(0, StackMapOpers(&MI).getVarIdx());
480 case TargetOpcode::PATCHPOINT:
481 // For PatchPoint, the call args are not foldable (even if reported in the
482 // stackmap e.g. via anyregcc).
483 return std::make_pair(0, PatchPointOpers(&MI).getVarIdx());
484 case TargetOpcode::STATEPOINT:
485 // For statepoints, fold deopt and gc arguments, but not call arguments.
486 return std::make_pair(MI.getNumDefs(), StatepointOpers(&MI).getVarIdx());
487 default:
488 llvm_unreachable("unexpected stackmap opcode");
489 }
490 }
491
foldPatchpoint(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,int FrameIndex,const TargetInstrInfo & TII)492 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
493 ArrayRef<unsigned> Ops, int FrameIndex,
494 const TargetInstrInfo &TII) {
495 unsigned StartIdx = 0;
496 unsigned NumDefs = 0;
497 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
498 std::tie(NumDefs, StartIdx) = TII.getPatchpointUnfoldableRange(MI);
499
500 unsigned DefToFoldIdx = MI.getNumOperands();
501
502 // Return false if any operands requested for folding are not foldable (not
503 // part of the stackmap's live values).
504 for (unsigned Op : Ops) {
505 if (Op < NumDefs) {
506 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
507 DefToFoldIdx = Op;
508 } else if (Op < StartIdx) {
509 return nullptr;
510 }
511 if (MI.getOperand(Op).isTied())
512 return nullptr;
513 }
514
515 MachineInstr *NewMI =
516 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
517 MachineInstrBuilder MIB(MF, NewMI);
518
519 // No need to fold return, the meta data, and function arguments
520 for (unsigned i = 0; i < StartIdx; ++i)
521 if (i != DefToFoldIdx)
522 MIB.add(MI.getOperand(i));
523
524 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
525 MachineOperand &MO = MI.getOperand(i);
526 unsigned TiedTo = e;
527 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
528
529 if (is_contained(Ops, i)) {
530 assert(TiedTo == e && "Cannot fold tied operands");
531 unsigned SpillSize;
532 unsigned SpillOffset;
533 // Compute the spill slot size and offset.
534 const TargetRegisterClass *RC =
535 MF.getRegInfo().getRegClass(MO.getReg());
536 bool Valid =
537 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
538 if (!Valid)
539 report_fatal_error("cannot spill patchpoint subregister operand");
540 MIB.addImm(StackMaps::IndirectMemRefOp);
541 MIB.addImm(SpillSize);
542 MIB.addFrameIndex(FrameIndex);
543 MIB.addImm(SpillOffset);
544 } else {
545 MIB.add(MO);
546 if (TiedTo < e) {
547 assert(TiedTo < NumDefs && "Bad tied operand");
548 if (TiedTo > DefToFoldIdx)
549 --TiedTo;
550 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
551 }
552 }
553 }
554 return NewMI;
555 }
556
foldMemoryOperand(MachineInstr & MI,ArrayRef<unsigned> Ops,int FI,LiveIntervals * LIS,VirtRegMap * VRM) const557 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
558 ArrayRef<unsigned> Ops, int FI,
559 LiveIntervals *LIS,
560 VirtRegMap *VRM) const {
561 auto Flags = MachineMemOperand::MONone;
562 for (unsigned OpIdx : Ops)
563 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
564 : MachineMemOperand::MOLoad;
565
566 MachineBasicBlock *MBB = MI.getParent();
567 assert(MBB && "foldMemoryOperand needs an inserted instruction");
568 MachineFunction &MF = *MBB->getParent();
569
570 // If we're not folding a load into a subreg, the size of the load is the
571 // size of the spill slot. But if we are, we need to figure out what the
572 // actual load size is.
573 int64_t MemSize = 0;
574 const MachineFrameInfo &MFI = MF.getFrameInfo();
575 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
576
577 if (Flags & MachineMemOperand::MOStore) {
578 MemSize = MFI.getObjectSize(FI);
579 } else {
580 for (unsigned OpIdx : Ops) {
581 int64_t OpSize = MFI.getObjectSize(FI);
582
583 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
584 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
585 if (SubRegSize > 0 && !(SubRegSize % 8))
586 OpSize = SubRegSize / 8;
587 }
588
589 MemSize = std::max(MemSize, OpSize);
590 }
591 }
592
593 assert(MemSize && "Did not expect a zero-sized stack slot");
594
595 MachineInstr *NewMI = nullptr;
596
597 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
598 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
599 MI.getOpcode() == TargetOpcode::STATEPOINT) {
600 // Fold stackmap/patchpoint.
601 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
602 if (NewMI)
603 MBB->insert(MI, NewMI);
604 } else {
605 // Ask the target to do the actual folding.
606 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
607 }
608
609 if (NewMI) {
610 NewMI->setMemRefs(MF, MI.memoperands());
611 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
612 assert((!(Flags & MachineMemOperand::MOStore) ||
613 NewMI->mayStore()) &&
614 "Folded a def to a non-store!");
615 assert((!(Flags & MachineMemOperand::MOLoad) ||
616 NewMI->mayLoad()) &&
617 "Folded a use to a non-load!");
618 assert(MFI.getObjectOffset(FI) != -1);
619 MachineMemOperand *MMO =
620 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
621 Flags, MemSize, MFI.getObjectAlign(FI));
622 NewMI->addMemOperand(MF, MMO);
623
624 // The pass "x86 speculative load hardening" always attaches symbols to
625 // call instructions. We need copy it form old instruction.
626 NewMI->cloneInstrSymbols(MF, MI);
627
628 return NewMI;
629 }
630
631 // Straight COPY may fold as load/store.
632 if (!MI.isCopy() || Ops.size() != 1)
633 return nullptr;
634
635 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
636 if (!RC)
637 return nullptr;
638
639 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
640 MachineBasicBlock::iterator Pos = MI;
641
642 if (Flags == MachineMemOperand::MOStore)
643 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
644 Register());
645 else
646 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
647 return &*--Pos;
648 }
649
foldMemoryOperand(MachineInstr & MI,ArrayRef<unsigned> Ops,MachineInstr & LoadMI,LiveIntervals * LIS) const650 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
651 ArrayRef<unsigned> Ops,
652 MachineInstr &LoadMI,
653 LiveIntervals *LIS) const {
654 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
655 #ifndef NDEBUG
656 for (unsigned OpIdx : Ops)
657 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
658 #endif
659
660 MachineBasicBlock &MBB = *MI.getParent();
661 MachineFunction &MF = *MBB.getParent();
662
663 // Ask the target to do the actual folding.
664 MachineInstr *NewMI = nullptr;
665 int FrameIndex = 0;
666
667 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
668 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
669 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
670 isLoadFromStackSlot(LoadMI, FrameIndex)) {
671 // Fold stackmap/patchpoint.
672 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
673 if (NewMI)
674 NewMI = &*MBB.insert(MI, NewMI);
675 } else {
676 // Ask the target to do the actual folding.
677 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
678 }
679
680 if (!NewMI)
681 return nullptr;
682
683 // Copy the memoperands from the load to the folded instruction.
684 if (MI.memoperands_empty()) {
685 NewMI->setMemRefs(MF, LoadMI.memoperands());
686 } else {
687 // Handle the rare case of folding multiple loads.
688 NewMI->setMemRefs(MF, MI.memoperands());
689 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
690 E = LoadMI.memoperands_end();
691 I != E; ++I) {
692 NewMI->addMemOperand(MF, *I);
693 }
694 }
695 return NewMI;
696 }
697
hasReassociableOperands(const MachineInstr & Inst,const MachineBasicBlock * MBB) const698 bool TargetInstrInfo::hasReassociableOperands(
699 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
700 const MachineOperand &Op1 = Inst.getOperand(1);
701 const MachineOperand &Op2 = Inst.getOperand(2);
702 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
703
704 // We need virtual register definitions for the operands that we will
705 // reassociate.
706 MachineInstr *MI1 = nullptr;
707 MachineInstr *MI2 = nullptr;
708 if (Op1.isReg() && Op1.getReg().isVirtual())
709 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
710 if (Op2.isReg() && Op2.getReg().isVirtual())
711 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
712
713 // And at least one operand must be defined in MBB.
714 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
715 }
716
areOpcodesEqualOrInverse(unsigned Opcode1,unsigned Opcode2) const717 bool TargetInstrInfo::areOpcodesEqualOrInverse(unsigned Opcode1,
718 unsigned Opcode2) const {
719 return Opcode1 == Opcode2 || getInverseOpcode(Opcode1) == Opcode2;
720 }
721
hasReassociableSibling(const MachineInstr & Inst,bool & Commuted) const722 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
723 bool &Commuted) const {
724 const MachineBasicBlock *MBB = Inst.getParent();
725 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
726 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
727 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
728 unsigned Opcode = Inst.getOpcode();
729
730 // If only one operand has the same or inverse opcode and it's the second
731 // source operand, the operands must be commuted.
732 Commuted = !areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
733 areOpcodesEqualOrInverse(Opcode, MI2->getOpcode());
734 if (Commuted)
735 std::swap(MI1, MI2);
736
737 // 1. The previous instruction must be the same type as Inst.
738 // 2. The previous instruction must also be associative/commutative or be the
739 // inverse of such an operation (this can be different even for
740 // instructions with the same opcode if traits like fast-math-flags are
741 // included).
742 // 3. The previous instruction must have virtual register definitions for its
743 // operands in the same basic block as Inst.
744 // 4. The previous instruction's result must only be used by Inst.
745 return areOpcodesEqualOrInverse(Opcode, MI1->getOpcode()) &&
746 (isAssociativeAndCommutative(*MI1) ||
747 isAssociativeAndCommutative(*MI1, /* Invert */ true)) &&
748 hasReassociableOperands(*MI1, MBB) &&
749 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
750 }
751
752 // 1. The operation must be associative and commutative or be the inverse of
753 // such an operation.
754 // 2. The instruction must have virtual register definitions for its
755 // operands in the same basic block.
756 // 3. The instruction must have a reassociable sibling.
isReassociationCandidate(const MachineInstr & Inst,bool & Commuted) const757 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
758 bool &Commuted) const {
759 return (isAssociativeAndCommutative(Inst) ||
760 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
761 hasReassociableOperands(Inst, Inst.getParent()) &&
762 hasReassociableSibling(Inst, Commuted);
763 }
764
765 // The concept of the reassociation pass is that these operations can benefit
766 // from this kind of transformation:
767 //
768 // A = ? op ?
769 // B = A op X (Prev)
770 // C = B op Y (Root)
771 // -->
772 // A = ? op ?
773 // B = X op Y
774 // C = A op B
775 //
776 // breaking the dependency between A and B, allowing them to be executed in
777 // parallel (or back-to-back in a pipeline) instead of depending on each other.
778
779 // FIXME: This has the potential to be expensive (compile time) while not
780 // improving the code at all. Some ways to limit the overhead:
781 // 1. Track successful transforms; bail out if hit rate gets too low.
782 // 2. Only enable at -O3 or some other non-default optimization level.
783 // 3. Pre-screen pattern candidates here: if an operand of the previous
784 // instruction is known to not increase the critical path, then don't match
785 // that pattern.
getMachineCombinerPatterns(MachineInstr & Root,SmallVectorImpl<MachineCombinerPattern> & Patterns,bool DoRegPressureReduce) const786 bool TargetInstrInfo::getMachineCombinerPatterns(
787 MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
788 bool DoRegPressureReduce) const {
789 bool Commute;
790 if (isReassociationCandidate(Root, Commute)) {
791 // We found a sequence of instructions that may be suitable for a
792 // reassociation of operands to increase ILP. Specify each commutation
793 // possibility for the Prev instruction in the sequence and let the
794 // machine combiner decide if changing the operands is worthwhile.
795 if (Commute) {
796 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
797 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
798 } else {
799 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
800 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
801 }
802 return true;
803 }
804
805 return false;
806 }
807
808 /// Return true when a code sequence can improve loop throughput.
809 bool
isThroughputPattern(MachineCombinerPattern Pattern) const810 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
811 return false;
812 }
813
814 std::pair<unsigned, unsigned>
getReassociationOpcodes(MachineCombinerPattern Pattern,const MachineInstr & Root,const MachineInstr & Prev) const815 TargetInstrInfo::getReassociationOpcodes(MachineCombinerPattern Pattern,
816 const MachineInstr &Root,
817 const MachineInstr &Prev) const {
818 bool AssocCommutRoot = isAssociativeAndCommutative(Root);
819 bool AssocCommutPrev = isAssociativeAndCommutative(Prev);
820
821 // Early exit if both opcodes are associative and commutative. It's a trivial
822 // reassociation when we only change operands order. In this case opcodes are
823 // not required to have inverse versions.
824 if (AssocCommutRoot && AssocCommutPrev) {
825 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
826 return std::make_pair(Root.getOpcode(), Root.getOpcode());
827 }
828
829 // At least one instruction is not associative or commutative.
830 // Since we have matched one of the reassociation patterns, we expect that the
831 // instructions' opcodes are equal or one of them is the inversion of the
832 // other.
833 assert(areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) &&
834 "Incorrectly matched pattern");
835 unsigned AssocCommutOpcode = Root.getOpcode();
836 unsigned InverseOpcode = *getInverseOpcode(Root.getOpcode());
837 if (!AssocCommutRoot)
838 std::swap(AssocCommutOpcode, InverseOpcode);
839
840 // The transformation rule (`+` is any associative and commutative binary
841 // operation, `-` is the inverse):
842 // REASSOC_AX_BY:
843 // (A + X) + Y => A + (X + Y)
844 // (A + X) - Y => A + (X - Y)
845 // (A - X) + Y => A - (X - Y)
846 // (A - X) - Y => A - (X + Y)
847 // REASSOC_XA_BY:
848 // (X + A) + Y => (X + Y) + A
849 // (X + A) - Y => (X - Y) + A
850 // (X - A) + Y => (X + Y) - A
851 // (X - A) - Y => (X - Y) - A
852 // REASSOC_AX_YB:
853 // Y + (A + X) => (Y + X) + A
854 // Y - (A + X) => (Y - X) - A
855 // Y + (A - X) => (Y - X) + A
856 // Y - (A - X) => (Y + X) - A
857 // REASSOC_XA_YB:
858 // Y + (X + A) => (Y + X) + A
859 // Y - (X + A) => (Y - X) - A
860 // Y + (X - A) => (Y + X) - A
861 // Y - (X - A) => (Y - X) + A
862 switch (Pattern) {
863 default:
864 llvm_unreachable("Unexpected pattern");
865 case MachineCombinerPattern::REASSOC_AX_BY:
866 if (!AssocCommutRoot && AssocCommutPrev)
867 return {AssocCommutOpcode, InverseOpcode};
868 if (AssocCommutRoot && !AssocCommutPrev)
869 return {InverseOpcode, InverseOpcode};
870 if (!AssocCommutRoot && !AssocCommutPrev)
871 return {InverseOpcode, AssocCommutOpcode};
872 break;
873 case MachineCombinerPattern::REASSOC_XA_BY:
874 if (!AssocCommutRoot && AssocCommutPrev)
875 return {AssocCommutOpcode, InverseOpcode};
876 if (AssocCommutRoot && !AssocCommutPrev)
877 return {InverseOpcode, AssocCommutOpcode};
878 if (!AssocCommutRoot && !AssocCommutPrev)
879 return {InverseOpcode, InverseOpcode};
880 break;
881 case MachineCombinerPattern::REASSOC_AX_YB:
882 if (!AssocCommutRoot && AssocCommutPrev)
883 return {InverseOpcode, InverseOpcode};
884 if (AssocCommutRoot && !AssocCommutPrev)
885 return {AssocCommutOpcode, InverseOpcode};
886 if (!AssocCommutRoot && !AssocCommutPrev)
887 return {InverseOpcode, AssocCommutOpcode};
888 break;
889 case MachineCombinerPattern::REASSOC_XA_YB:
890 if (!AssocCommutRoot && AssocCommutPrev)
891 return {InverseOpcode, InverseOpcode};
892 if (AssocCommutRoot && !AssocCommutPrev)
893 return {InverseOpcode, AssocCommutOpcode};
894 if (!AssocCommutRoot && !AssocCommutPrev)
895 return {AssocCommutOpcode, InverseOpcode};
896 break;
897 }
898 llvm_unreachable("Unhandled combination");
899 }
900
901 // Return a pair of boolean flags showing if the new root and new prev operands
902 // must be swapped. See visual example of the rule in
903 // TargetInstrInfo::getReassociationOpcodes.
mustSwapOperands(MachineCombinerPattern Pattern)904 static std::pair<bool, bool> mustSwapOperands(MachineCombinerPattern Pattern) {
905 switch (Pattern) {
906 default:
907 llvm_unreachable("Unexpected pattern");
908 case MachineCombinerPattern::REASSOC_AX_BY:
909 return {false, false};
910 case MachineCombinerPattern::REASSOC_XA_BY:
911 return {true, false};
912 case MachineCombinerPattern::REASSOC_AX_YB:
913 return {true, true};
914 case MachineCombinerPattern::REASSOC_XA_YB:
915 return {true, true};
916 }
917 }
918
919 /// Attempt the reassociation transformation to reduce critical path length.
920 /// See the above comments before getMachineCombinerPatterns().
reassociateOps(MachineInstr & Root,MachineInstr & Prev,MachineCombinerPattern Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,DenseMap<unsigned,unsigned> & InstrIdxForVirtReg) const921 void TargetInstrInfo::reassociateOps(
922 MachineInstr &Root, MachineInstr &Prev,
923 MachineCombinerPattern Pattern,
924 SmallVectorImpl<MachineInstr *> &InsInstrs,
925 SmallVectorImpl<MachineInstr *> &DelInstrs,
926 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
927 MachineFunction *MF = Root.getMF();
928 MachineRegisterInfo &MRI = MF->getRegInfo();
929 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
930 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
931 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
932
933 // This array encodes the operand index for each parameter because the
934 // operands may be commuted. Each row corresponds to a pattern value,
935 // and each column specifies the index of A, B, X, Y.
936 unsigned OpIdx[4][4] = {
937 { 1, 1, 2, 2 },
938 { 1, 2, 2, 1 },
939 { 2, 1, 1, 2 },
940 { 2, 2, 1, 1 }
941 };
942
943 int Row;
944 switch (Pattern) {
945 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
946 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
947 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
948 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
949 default: llvm_unreachable("unexpected MachineCombinerPattern");
950 }
951
952 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
953 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
954 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
955 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
956 MachineOperand &OpC = Root.getOperand(0);
957
958 Register RegA = OpA.getReg();
959 Register RegB = OpB.getReg();
960 Register RegX = OpX.getReg();
961 Register RegY = OpY.getReg();
962 Register RegC = OpC.getReg();
963
964 if (RegA.isVirtual())
965 MRI.constrainRegClass(RegA, RC);
966 if (RegB.isVirtual())
967 MRI.constrainRegClass(RegB, RC);
968 if (RegX.isVirtual())
969 MRI.constrainRegClass(RegX, RC);
970 if (RegY.isVirtual())
971 MRI.constrainRegClass(RegY, RC);
972 if (RegC.isVirtual())
973 MRI.constrainRegClass(RegC, RC);
974
975 // Create a new virtual register for the result of (X op Y) instead of
976 // recycling RegB because the MachineCombiner's computation of the critical
977 // path requires a new register definition rather than an existing one.
978 Register NewVR = MRI.createVirtualRegister(RC);
979 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
980
981 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
982 bool KillA = OpA.isKill();
983 bool KillX = OpX.isKill();
984 bool KillY = OpY.isKill();
985 bool KillNewVR = true;
986
987 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
988
989 if (SwapPrevOperands) {
990 std::swap(RegX, RegY);
991 std::swap(KillX, KillY);
992 }
993
994 // Create new instructions for insertion.
995 MachineInstrBuilder MIB1 =
996 BuildMI(*MF, MIMetadata(Prev), TII->get(NewPrevOpc), NewVR)
997 .addReg(RegX, getKillRegState(KillX))
998 .addReg(RegY, getKillRegState(KillY))
999 .setMIFlags(Prev.getFlags());
1000
1001 if (SwapRootOperands) {
1002 std::swap(RegA, NewVR);
1003 std::swap(KillA, KillNewVR);
1004 }
1005
1006 MachineInstrBuilder MIB2 =
1007 BuildMI(*MF, MIMetadata(Root), TII->get(NewRootOpc), RegC)
1008 .addReg(RegA, getKillRegState(KillA))
1009 .addReg(NewVR, getKillRegState(KillNewVR))
1010 .setMIFlags(Root.getFlags());
1011
1012 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
1013
1014 // Record new instructions for insertion and old instructions for deletion.
1015 InsInstrs.push_back(MIB1);
1016 InsInstrs.push_back(MIB2);
1017 DelInstrs.push_back(&Prev);
1018 DelInstrs.push_back(&Root);
1019 }
1020
genAlternativeCodeSequence(MachineInstr & Root,MachineCombinerPattern Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,DenseMap<unsigned,unsigned> & InstIdxForVirtReg) const1021 void TargetInstrInfo::genAlternativeCodeSequence(
1022 MachineInstr &Root, MachineCombinerPattern Pattern,
1023 SmallVectorImpl<MachineInstr *> &InsInstrs,
1024 SmallVectorImpl<MachineInstr *> &DelInstrs,
1025 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
1026 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1027
1028 // Select the previous instruction in the sequence based on the input pattern.
1029 MachineInstr *Prev = nullptr;
1030 switch (Pattern) {
1031 case MachineCombinerPattern::REASSOC_AX_BY:
1032 case MachineCombinerPattern::REASSOC_XA_BY:
1033 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
1034 break;
1035 case MachineCombinerPattern::REASSOC_AX_YB:
1036 case MachineCombinerPattern::REASSOC_XA_YB:
1037 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
1038 break;
1039 default:
1040 break;
1041 }
1042
1043 // Don't reassociate if Prev and Root are in different blocks.
1044 if (Prev->getParent() != Root.getParent())
1045 return;
1046
1047 assert(Prev && "Unknown pattern for machine combiner");
1048
1049 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
1050 }
1051
isReallyTriviallyReMaterializableGeneric(const MachineInstr & MI) const1052 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
1053 const MachineInstr &MI) const {
1054 const MachineFunction &MF = *MI.getMF();
1055 const MachineRegisterInfo &MRI = MF.getRegInfo();
1056
1057 // Remat clients assume operand 0 is the defined register.
1058 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
1059 return false;
1060 Register DefReg = MI.getOperand(0).getReg();
1061
1062 // A sub-register definition can only be rematerialized if the instruction
1063 // doesn't read the other parts of the register. Otherwise it is really a
1064 // read-modify-write operation on the full virtual register which cannot be
1065 // moved safely.
1066 if (DefReg.isVirtual() && MI.getOperand(0).getSubReg() &&
1067 MI.readsVirtualRegister(DefReg))
1068 return false;
1069
1070 // A load from a fixed stack slot can be rematerialized. This may be
1071 // redundant with subsequent checks, but it's target-independent,
1072 // simple, and a common case.
1073 int FrameIdx = 0;
1074 if (isLoadFromStackSlot(MI, FrameIdx) &&
1075 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
1076 return true;
1077
1078 // Avoid instructions obviously unsafe for remat.
1079 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1080 MI.hasUnmodeledSideEffects())
1081 return false;
1082
1083 // Don't remat inline asm. We have no idea how expensive it is
1084 // even if it's side effect free.
1085 if (MI.isInlineAsm())
1086 return false;
1087
1088 // Avoid instructions which load from potentially varying memory.
1089 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1090 return false;
1091
1092 // If any of the registers accessed are non-constant, conservatively assume
1093 // the instruction is not rematerializable.
1094 for (const MachineOperand &MO : MI.operands()) {
1095 if (!MO.isReg()) continue;
1096 Register Reg = MO.getReg();
1097 if (Reg == 0)
1098 continue;
1099
1100 // Check for a well-behaved physical register.
1101 if (Reg.isPhysical()) {
1102 if (MO.isUse()) {
1103 // If the physreg has no defs anywhere, it's just an ambient register
1104 // and we can freely move its uses. Alternatively, if it's allocatable,
1105 // it could get allocated to something with a def during allocation.
1106 if (!MRI.isConstantPhysReg(Reg))
1107 return false;
1108 } else {
1109 // A physreg def. We can't remat it.
1110 return false;
1111 }
1112 continue;
1113 }
1114
1115 // Only allow one virtual-register def. There may be multiple defs of the
1116 // same virtual register, though.
1117 if (MO.isDef() && Reg != DefReg)
1118 return false;
1119
1120 // Don't allow any virtual-register uses. Rematting an instruction with
1121 // virtual register uses would length the live ranges of the uses, which
1122 // is not necessarily a good idea, certainly not "trivial".
1123 if (MO.isUse())
1124 return false;
1125 }
1126
1127 // Everything checked out.
1128 return true;
1129 }
1130
getSPAdjust(const MachineInstr & MI) const1131 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
1132 const MachineFunction *MF = MI.getMF();
1133 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
1134 bool StackGrowsDown =
1135 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
1136
1137 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1138 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1139
1140 if (!isFrameInstr(MI))
1141 return 0;
1142
1143 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1144
1145 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1146 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1147 SPAdj = -SPAdj;
1148
1149 return SPAdj;
1150 }
1151
1152 /// isSchedulingBoundary - Test if the given instruction should be
1153 /// considered a scheduling boundary. This primarily includes labels
1154 /// and terminators.
isSchedulingBoundary(const MachineInstr & MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const1155 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1156 const MachineBasicBlock *MBB,
1157 const MachineFunction &MF) const {
1158 // Terminators and labels can't be scheduled around.
1159 if (MI.isTerminator() || MI.isPosition())
1160 return true;
1161
1162 // INLINEASM_BR can jump to another block
1163 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1164 return true;
1165
1166 // Don't attempt to schedule around any instruction that defines
1167 // a stack-oriented pointer, as it's unlikely to be profitable. This
1168 // saves compile time, because it doesn't require every single
1169 // stack slot reference to depend on the instruction that does the
1170 // modification.
1171 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1172 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1173 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1174 }
1175
1176 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1177 // may choose to honor.
usePreRAHazardRecognizer() const1178 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1179 return !DisableHazardRecognizer;
1180 }
1181
1182 // Default implementation of CreateTargetRAHazardRecognizer.
1183 ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetHazardRecognizer(const TargetSubtargetInfo * STI,const ScheduleDAG * DAG) const1184 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1185 const ScheduleDAG *DAG) const {
1186 // Dummy hazard recognizer allows all instructions to issue.
1187 return new ScheduleHazardRecognizer();
1188 }
1189
1190 // Default implementation of CreateTargetMIHazardRecognizer.
CreateTargetMIHazardRecognizer(const InstrItineraryData * II,const ScheduleDAGMI * DAG) const1191 ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
1192 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1193 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1194 }
1195
1196 // Default implementation of CreateTargetPostRAHazardRecognizer.
1197 ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const1198 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1199 const ScheduleDAG *DAG) const {
1200 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1201 }
1202
1203 // Default implementation of getMemOperandWithOffset.
getMemOperandWithOffset(const MachineInstr & MI,const MachineOperand * & BaseOp,int64_t & Offset,bool & OffsetIsScalable,const TargetRegisterInfo * TRI) const1204 bool TargetInstrInfo::getMemOperandWithOffset(
1205 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1206 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1207 SmallVector<const MachineOperand *, 4> BaseOps;
1208 unsigned Width;
1209 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1210 Width, TRI) ||
1211 BaseOps.size() != 1)
1212 return false;
1213 BaseOp = BaseOps.front();
1214 return true;
1215 }
1216
1217 //===----------------------------------------------------------------------===//
1218 // SelectionDAG latency interface.
1219 //===----------------------------------------------------------------------===//
1220
1221 int
getOperandLatency(const InstrItineraryData * ItinData,SDNode * DefNode,unsigned DefIdx,SDNode * UseNode,unsigned UseIdx) const1222 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1223 SDNode *DefNode, unsigned DefIdx,
1224 SDNode *UseNode, unsigned UseIdx) const {
1225 if (!ItinData || ItinData->isEmpty())
1226 return -1;
1227
1228 if (!DefNode->isMachineOpcode())
1229 return -1;
1230
1231 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1232 if (!UseNode->isMachineOpcode())
1233 return ItinData->getOperandCycle(DefClass, DefIdx);
1234 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1235 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1236 }
1237
getInstrLatency(const InstrItineraryData * ItinData,SDNode * N) const1238 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1239 SDNode *N) const {
1240 if (!ItinData || ItinData->isEmpty())
1241 return 1;
1242
1243 if (!N->isMachineOpcode())
1244 return 1;
1245
1246 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1247 }
1248
1249 //===----------------------------------------------------------------------===//
1250 // MachineInstr latency interface.
1251 //===----------------------------------------------------------------------===//
1252
getNumMicroOps(const InstrItineraryData * ItinData,const MachineInstr & MI) const1253 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1254 const MachineInstr &MI) const {
1255 if (!ItinData || ItinData->isEmpty())
1256 return 1;
1257
1258 unsigned Class = MI.getDesc().getSchedClass();
1259 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1260 if (UOps >= 0)
1261 return UOps;
1262
1263 // The # of u-ops is dynamically determined. The specific target should
1264 // override this function to return the right number.
1265 return 1;
1266 }
1267
1268 /// Return the default expected latency for a def based on it's opcode.
defaultDefLatency(const MCSchedModel & SchedModel,const MachineInstr & DefMI) const1269 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1270 const MachineInstr &DefMI) const {
1271 if (DefMI.isTransient())
1272 return 0;
1273 if (DefMI.mayLoad())
1274 return SchedModel.LoadLatency;
1275 if (isHighLatencyDef(DefMI.getOpcode()))
1276 return SchedModel.HighLatency;
1277 return 1;
1278 }
1279
getPredicationCost(const MachineInstr &) const1280 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1281 return 0;
1282 }
1283
getInstrLatency(const InstrItineraryData * ItinData,const MachineInstr & MI,unsigned * PredCost) const1284 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1285 const MachineInstr &MI,
1286 unsigned *PredCost) const {
1287 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1288 // still have a MinLatency property, which getStageLatency checks.
1289 if (!ItinData)
1290 return MI.mayLoad() ? 2 : 1;
1291
1292 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1293 }
1294
hasLowDefLatency(const TargetSchedModel & SchedModel,const MachineInstr & DefMI,unsigned DefIdx) const1295 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1296 const MachineInstr &DefMI,
1297 unsigned DefIdx) const {
1298 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1299 if (!ItinData || ItinData->isEmpty())
1300 return false;
1301
1302 unsigned DefClass = DefMI.getDesc().getSchedClass();
1303 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1304 return (DefCycle != -1 && DefCycle <= 1);
1305 }
1306
1307 std::optional<ParamLoadedValue>
describeLoadedValue(const MachineInstr & MI,Register Reg) const1308 TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
1309 Register Reg) const {
1310 const MachineFunction *MF = MI.getMF();
1311 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1312 DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});
1313 int64_t Offset;
1314 bool OffsetIsScalable;
1315
1316 // To simplify the sub-register handling, verify that we only need to
1317 // consider physical registers.
1318 assert(MF->getProperties().hasProperty(
1319 MachineFunctionProperties::Property::NoVRegs));
1320
1321 if (auto DestSrc = isCopyInstr(MI)) {
1322 Register DestReg = DestSrc->Destination->getReg();
1323
1324 // If the copy destination is the forwarding reg, describe the forwarding
1325 // reg using the copy source as the backup location. Example:
1326 //
1327 // x0 = MOV x7
1328 // call callee(x0) ; x0 described as x7
1329 if (Reg == DestReg)
1330 return ParamLoadedValue(*DestSrc->Source, Expr);
1331
1332 // Cases where super- or sub-registers needs to be described should
1333 // be handled by the target's hook implementation.
1334 assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) &&
1335 "TargetInstrInfo::describeLoadedValue can't describe super- or "
1336 "sub-regs for copy instructions");
1337 return std::nullopt;
1338 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1339 Register SrcReg = RegImm->Reg;
1340 Offset = RegImm->Imm;
1341 Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset);
1342 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1343 } else if (MI.hasOneMemOperand()) {
1344 // Only describe memory which provably does not escape the function. As
1345 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1346 // callee (or by another thread).
1347 const auto &TII = MF->getSubtarget().getInstrInfo();
1348 const MachineFrameInfo &MFI = MF->getFrameInfo();
1349 const MachineMemOperand *MMO = MI.memoperands()[0];
1350 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1351
1352 // If the address points to "special" memory (e.g. a spill slot), it's
1353 // sufficient to check that it isn't aliased by any high-level IR value.
1354 if (!PSV || PSV->mayAlias(&MFI))
1355 return std::nullopt;
1356
1357 const MachineOperand *BaseOp;
1358 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1359 TRI))
1360 return std::nullopt;
1361
1362 // FIXME: Scalable offsets are not yet handled in the offset code below.
1363 if (OffsetIsScalable)
1364 return std::nullopt;
1365
1366 // TODO: Can currently only handle mem instructions with a single define.
1367 // An example from the x86 target:
1368 // ...
1369 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1370 // ...
1371 //
1372 if (MI.getNumExplicitDefs() != 1)
1373 return std::nullopt;
1374
1375 // TODO: In what way do we need to take Reg into consideration here?
1376
1377 SmallVector<uint64_t, 8> Ops;
1378 DIExpression::appendOffset(Ops, Offset);
1379 Ops.push_back(dwarf::DW_OP_deref_size);
1380 Ops.push_back(MMO->getSize());
1381 Expr = DIExpression::prependOpcodes(Expr, Ops);
1382 return ParamLoadedValue(*BaseOp, Expr);
1383 }
1384
1385 return std::nullopt;
1386 }
1387
1388 /// Both DefMI and UseMI must be valid. By default, call directly to the
1389 /// itinerary. This may be overriden by the target.
getOperandLatency(const InstrItineraryData * ItinData,const MachineInstr & DefMI,unsigned DefIdx,const MachineInstr & UseMI,unsigned UseIdx) const1390 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1391 const MachineInstr &DefMI,
1392 unsigned DefIdx,
1393 const MachineInstr &UseMI,
1394 unsigned UseIdx) const {
1395 unsigned DefClass = DefMI.getDesc().getSchedClass();
1396 unsigned UseClass = UseMI.getDesc().getSchedClass();
1397 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1398 }
1399
getRegSequenceInputs(const MachineInstr & MI,unsigned DefIdx,SmallVectorImpl<RegSubRegPairAndIdx> & InputRegs) const1400 bool TargetInstrInfo::getRegSequenceInputs(
1401 const MachineInstr &MI, unsigned DefIdx,
1402 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1403 assert((MI.isRegSequence() ||
1404 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1405
1406 if (!MI.isRegSequence())
1407 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1408
1409 // We are looking at:
1410 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1411 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1412 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1413 OpIdx += 2) {
1414 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1415 if (MOReg.isUndef())
1416 continue;
1417 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1418 assert(MOSubIdx.isImm() &&
1419 "One of the subindex of the reg_sequence is not an immediate");
1420 // Record Reg:SubReg, SubIdx.
1421 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1422 (unsigned)MOSubIdx.getImm()));
1423 }
1424 return true;
1425 }
1426
getExtractSubregInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPairAndIdx & InputReg) const1427 bool TargetInstrInfo::getExtractSubregInputs(
1428 const MachineInstr &MI, unsigned DefIdx,
1429 RegSubRegPairAndIdx &InputReg) const {
1430 assert((MI.isExtractSubreg() ||
1431 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1432
1433 if (!MI.isExtractSubreg())
1434 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1435
1436 // We are looking at:
1437 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1438 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1439 const MachineOperand &MOReg = MI.getOperand(1);
1440 if (MOReg.isUndef())
1441 return false;
1442 const MachineOperand &MOSubIdx = MI.getOperand(2);
1443 assert(MOSubIdx.isImm() &&
1444 "The subindex of the extract_subreg is not an immediate");
1445
1446 InputReg.Reg = MOReg.getReg();
1447 InputReg.SubReg = MOReg.getSubReg();
1448 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1449 return true;
1450 }
1451
getInsertSubregInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPair & BaseReg,RegSubRegPairAndIdx & InsertedReg) const1452 bool TargetInstrInfo::getInsertSubregInputs(
1453 const MachineInstr &MI, unsigned DefIdx,
1454 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1455 assert((MI.isInsertSubreg() ||
1456 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1457
1458 if (!MI.isInsertSubreg())
1459 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1460
1461 // We are looking at:
1462 // Def = INSERT_SEQUENCE v0, v1, sub0.
1463 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1464 const MachineOperand &MOBaseReg = MI.getOperand(1);
1465 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1466 if (MOInsertedReg.isUndef())
1467 return false;
1468 const MachineOperand &MOSubIdx = MI.getOperand(3);
1469 assert(MOSubIdx.isImm() &&
1470 "One of the subindex of the reg_sequence is not an immediate");
1471 BaseReg.Reg = MOBaseReg.getReg();
1472 BaseReg.SubReg = MOBaseReg.getSubReg();
1473
1474 InsertedReg.Reg = MOInsertedReg.getReg();
1475 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1476 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1477 return true;
1478 }
1479
1480 // Returns a MIRPrinter comment for this machine operand.
createMIROperandComment(const MachineInstr & MI,const MachineOperand & Op,unsigned OpIdx,const TargetRegisterInfo * TRI) const1481 std::string TargetInstrInfo::createMIROperandComment(
1482 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1483 const TargetRegisterInfo *TRI) const {
1484
1485 if (!MI.isInlineAsm())
1486 return "";
1487
1488 std::string Flags;
1489 raw_string_ostream OS(Flags);
1490
1491 if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
1492 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1493 unsigned ExtraInfo = Op.getImm();
1494 bool First = true;
1495 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
1496 if (!First)
1497 OS << " ";
1498 First = false;
1499 OS << Info;
1500 }
1501
1502 return OS.str();
1503 }
1504
1505 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
1506 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
1507 return "";
1508
1509 assert(Op.isImm() && "Expected flag operand to be an immediate");
1510 // Pretty print the inline asm operand descriptor.
1511 unsigned Flag = Op.getImm();
1512 unsigned Kind = InlineAsm::getKind(Flag);
1513 OS << InlineAsm::getKindName(Kind);
1514
1515 unsigned RCID = 0;
1516 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1517 InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1518 if (TRI) {
1519 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1520 } else
1521 OS << ":RC" << RCID;
1522 }
1523
1524 if (InlineAsm::isMemKind(Flag)) {
1525 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1526 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1527 }
1528
1529 unsigned TiedTo = 0;
1530 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1531 OS << " tiedto:$" << TiedTo;
1532
1533 return OS.str();
1534 }
1535
1536 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() = default;
1537
mergeOutliningCandidateAttributes(Function & F,std::vector<outliner::Candidate> & Candidates) const1538 void TargetInstrInfo::mergeOutliningCandidateAttributes(
1539 Function &F, std::vector<outliner::Candidate> &Candidates) const {
1540 // Include target features from an arbitrary candidate for the outlined
1541 // function. This makes sure the outlined function knows what kinds of
1542 // instructions are going into it. This is fine, since all parent functions
1543 // must necessarily support the instructions that are in the outlined region.
1544 outliner::Candidate &FirstCand = Candidates.front();
1545 const Function &ParentFn = FirstCand.getMF()->getFunction();
1546 if (ParentFn.hasFnAttribute("target-features"))
1547 F.addFnAttr(ParentFn.getFnAttribute("target-features"));
1548 if (ParentFn.hasFnAttribute("target-cpu"))
1549 F.addFnAttr(ParentFn.getFnAttribute("target-cpu"));
1550
1551 // Set nounwind, so we don't generate eh_frame.
1552 if (llvm::all_of(Candidates, [](const outliner::Candidate &C) {
1553 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
1554 }))
1555 F.addFnAttr(Attribute::NoUnwind);
1556 }
1557
isMBBSafeToOutlineFrom(MachineBasicBlock & MBB,unsigned & Flags) const1558 bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
1559 unsigned &Flags) const {
1560 // Some instrumentations create special TargetOpcode at the start which
1561 // expands to special code sequences which must be present.
1562 auto First = MBB.getFirstNonDebugInstr();
1563 if (First != MBB.end() &&
1564 (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
1565 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER))
1566 return false;
1567
1568 return true;
1569 }
1570