1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/TargetInstrInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/MachineScheduler.h"
20 #include "llvm/CodeGen/PseudoSourceValue.h"
21 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
22 #include "llvm/CodeGen/StackMaps.h"
23 #include "llvm/CodeGen/TargetFrameLowering.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetRegisterInfo.h"
26 #include "llvm/CodeGen/TargetSchedule.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/DebugInfoMetadata.h"
29 #include "llvm/MC/MCAsmInfo.h"
30 #include "llvm/MC/MCInstrItineraries.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include <cctype>
36
37 using namespace llvm;
38
39 static cl::opt<bool> DisableHazardRecognizer(
40 "disable-sched-hazard", cl::Hidden, cl::init(false),
41 cl::desc("Disable hazard detection during preRA scheduling"));
42
~TargetInstrInfo()43 TargetInstrInfo::~TargetInstrInfo() {
44 }
45
46 const TargetRegisterClass*
getRegClass(const MCInstrDesc & MCID,unsigned OpNum,const TargetRegisterInfo * TRI,const MachineFunction & MF) const47 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
48 const TargetRegisterInfo *TRI,
49 const MachineFunction &MF) const {
50 if (OpNum >= MCID.getNumOperands())
51 return nullptr;
52
53 short RegClass = MCID.OpInfo[OpNum].RegClass;
54 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
55 return TRI->getPointerRegClass(MF, RegClass);
56
57 // Instructions like INSERT_SUBREG do not have fixed register classes.
58 if (RegClass < 0)
59 return nullptr;
60
61 // Otherwise just look it up normally.
62 return TRI->getRegClass(RegClass);
63 }
64
65 /// insertNoop - Insert a noop into the instruction stream at the specified
66 /// point.
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const67 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
68 MachineBasicBlock::iterator MI) const {
69 llvm_unreachable("Target didn't implement insertNoop!");
70 }
71
72 /// insertNoops - Insert noops into the instruction stream at the specified
73 /// point.
insertNoops(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned Quantity) const74 void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB,
75 MachineBasicBlock::iterator MI,
76 unsigned Quantity) const {
77 for (unsigned i = 0; i < Quantity; ++i)
78 insertNoop(MBB, MI);
79 }
80
isAsmComment(const char * Str,const MCAsmInfo & MAI)81 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
82 return strncmp(Str, MAI.getCommentString().data(),
83 MAI.getCommentString().size()) == 0;
84 }
85
86 /// Measure the specified inline asm to determine an approximation of its
87 /// length.
88 /// Comments (which run till the next SeparatorString or newline) do not
89 /// count as an instruction.
90 /// Any other non-whitespace text is considered an instruction, with
91 /// multiple instructions separated by SeparatorString or newlines.
92 /// Variable-length instructions are not handled here; this function
93 /// may be overloaded in the target code to do that.
94 /// We implement a special case of the .space directive which takes only a
95 /// single integer argument in base 10 that is the size in bytes. This is a
96 /// restricted form of the GAS directive in that we only interpret
97 /// simple--i.e. not a logical or arithmetic expression--size values without
98 /// the optional fill value. This is primarily used for creating arbitrary
99 /// sized inline asm blocks for testing purposes.
getInlineAsmLength(const char * Str,const MCAsmInfo & MAI,const TargetSubtargetInfo * STI) const100 unsigned TargetInstrInfo::getInlineAsmLength(
101 const char *Str,
102 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
103 // Count the number of instructions in the asm.
104 bool AtInsnStart = true;
105 unsigned Length = 0;
106 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
107 for (; *Str; ++Str) {
108 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
109 strlen(MAI.getSeparatorString())) == 0) {
110 AtInsnStart = true;
111 } else if (isAsmComment(Str, MAI)) {
112 // Stop counting as an instruction after a comment until the next
113 // separator.
114 AtInsnStart = false;
115 }
116
117 if (AtInsnStart && !isSpace(static_cast<unsigned char>(*Str))) {
118 unsigned AddLength = MaxInstLength;
119 if (strncmp(Str, ".space", 6) == 0) {
120 char *EStr;
121 int SpaceSize;
122 SpaceSize = strtol(Str + 6, &EStr, 10);
123 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
124 while (*EStr != '\n' && isSpace(static_cast<unsigned char>(*EStr)))
125 ++EStr;
126 if (*EStr == '\0' || *EStr == '\n' ||
127 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
128 AddLength = SpaceSize;
129 }
130 Length += AddLength;
131 AtInsnStart = false;
132 }
133 }
134
135 return Length;
136 }
137
138 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
139 /// after it, replacing it with an unconditional branch to NewDest.
140 void
ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,MachineBasicBlock * NewDest) const141 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
142 MachineBasicBlock *NewDest) const {
143 MachineBasicBlock *MBB = Tail->getParent();
144
145 // Remove all the old successors of MBB from the CFG.
146 while (!MBB->succ_empty())
147 MBB->removeSuccessor(MBB->succ_begin());
148
149 // Save off the debug loc before erasing the instruction.
150 DebugLoc DL = Tail->getDebugLoc();
151
152 // Update call site info and remove all the dead instructions
153 // from the end of MBB.
154 while (Tail != MBB->end()) {
155 auto MI = Tail++;
156 if (MI->shouldUpdateCallSiteInfo())
157 MBB->getParent()->eraseCallSiteInfo(&*MI);
158 MBB->erase(MI);
159 }
160
161 // If MBB isn't immediately before MBB, insert a branch to it.
162 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
163 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
164 MBB->addSuccessor(NewDest);
165 }
166
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned Idx1,unsigned Idx2) const167 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
168 bool NewMI, unsigned Idx1,
169 unsigned Idx2) const {
170 const MCInstrDesc &MCID = MI.getDesc();
171 bool HasDef = MCID.getNumDefs();
172 if (HasDef && !MI.getOperand(0).isReg())
173 // No idea how to commute this instruction. Target should implement its own.
174 return nullptr;
175
176 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
177 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
178 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
179 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
180 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
181 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
182 "This only knows how to commute register operands so far");
183
184 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
185 Register Reg1 = MI.getOperand(Idx1).getReg();
186 Register Reg2 = MI.getOperand(Idx2).getReg();
187 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
188 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
189 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
190 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
191 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
192 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
193 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
194 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
195 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
196 // Avoid calling isRenamable for virtual registers since we assert that
197 // renamable property is only queried/set for physical registers.
198 bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1)
199 ? MI.getOperand(Idx1).isRenamable()
200 : false;
201 bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2)
202 ? MI.getOperand(Idx2).isRenamable()
203 : false;
204 // If destination is tied to either of the commuted source register, then
205 // it must be updated.
206 if (HasDef && Reg0 == Reg1 &&
207 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
208 Reg2IsKill = false;
209 Reg0 = Reg2;
210 SubReg0 = SubReg2;
211 } else if (HasDef && Reg0 == Reg2 &&
212 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
213 Reg1IsKill = false;
214 Reg0 = Reg1;
215 SubReg0 = SubReg1;
216 }
217
218 MachineInstr *CommutedMI = nullptr;
219 if (NewMI) {
220 // Create a new instruction.
221 MachineFunction &MF = *MI.getMF();
222 CommutedMI = MF.CloneMachineInstr(&MI);
223 } else {
224 CommutedMI = &MI;
225 }
226
227 if (HasDef) {
228 CommutedMI->getOperand(0).setReg(Reg0);
229 CommutedMI->getOperand(0).setSubReg(SubReg0);
230 }
231 CommutedMI->getOperand(Idx2).setReg(Reg1);
232 CommutedMI->getOperand(Idx1).setReg(Reg2);
233 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
234 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
235 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
236 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
237 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
238 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
239 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
240 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
241 // Avoid calling setIsRenamable for virtual registers since we assert that
242 // renamable property is only queried/set for physical registers.
243 if (Register::isPhysicalRegister(Reg1))
244 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
245 if (Register::isPhysicalRegister(Reg2))
246 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
247 return CommutedMI;
248 }
249
commuteInstruction(MachineInstr & MI,bool NewMI,unsigned OpIdx1,unsigned OpIdx2) const250 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
251 unsigned OpIdx1,
252 unsigned OpIdx2) const {
253 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
254 // any commutable operand, which is done in findCommutedOpIndices() method
255 // called below.
256 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
257 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
258 assert(MI.isCommutable() &&
259 "Precondition violation: MI must be commutable.");
260 return nullptr;
261 }
262 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
263 }
264
fixCommutedOpIndices(unsigned & ResultIdx1,unsigned & ResultIdx2,unsigned CommutableOpIdx1,unsigned CommutableOpIdx2)265 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
266 unsigned &ResultIdx2,
267 unsigned CommutableOpIdx1,
268 unsigned CommutableOpIdx2) {
269 if (ResultIdx1 == CommuteAnyOperandIndex &&
270 ResultIdx2 == CommuteAnyOperandIndex) {
271 ResultIdx1 = CommutableOpIdx1;
272 ResultIdx2 = CommutableOpIdx2;
273 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
274 if (ResultIdx2 == CommutableOpIdx1)
275 ResultIdx1 = CommutableOpIdx2;
276 else if (ResultIdx2 == CommutableOpIdx2)
277 ResultIdx1 = CommutableOpIdx1;
278 else
279 return false;
280 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
281 if (ResultIdx1 == CommutableOpIdx1)
282 ResultIdx2 = CommutableOpIdx2;
283 else if (ResultIdx1 == CommutableOpIdx2)
284 ResultIdx2 = CommutableOpIdx1;
285 else
286 return false;
287 } else
288 // Check that the result operand indices match the given commutable
289 // operand indices.
290 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
291 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
292
293 return true;
294 }
295
findCommutedOpIndices(const MachineInstr & MI,unsigned & SrcOpIdx1,unsigned & SrcOpIdx2) const296 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
297 unsigned &SrcOpIdx1,
298 unsigned &SrcOpIdx2) const {
299 assert(!MI.isBundle() &&
300 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
301
302 const MCInstrDesc &MCID = MI.getDesc();
303 if (!MCID.isCommutable())
304 return false;
305
306 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
307 // is not true, then the target must implement this.
308 unsigned CommutableOpIdx1 = MCID.getNumDefs();
309 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
310 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
311 CommutableOpIdx1, CommutableOpIdx2))
312 return false;
313
314 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
315 // No idea.
316 return false;
317 return true;
318 }
319
isUnpredicatedTerminator(const MachineInstr & MI) const320 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
321 if (!MI.isTerminator()) return false;
322
323 // Conditional branch is a special case.
324 if (MI.isBranch() && !MI.isBarrier())
325 return true;
326 if (!MI.isPredicable())
327 return true;
328 return !isPredicated(MI);
329 }
330
PredicateInstruction(MachineInstr & MI,ArrayRef<MachineOperand> Pred) const331 bool TargetInstrInfo::PredicateInstruction(
332 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
333 bool MadeChange = false;
334
335 assert(!MI.isBundle() &&
336 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
337
338 const MCInstrDesc &MCID = MI.getDesc();
339 if (!MI.isPredicable())
340 return false;
341
342 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
343 if (MCID.OpInfo[i].isPredicate()) {
344 MachineOperand &MO = MI.getOperand(i);
345 if (MO.isReg()) {
346 MO.setReg(Pred[j].getReg());
347 MadeChange = true;
348 } else if (MO.isImm()) {
349 MO.setImm(Pred[j].getImm());
350 MadeChange = true;
351 } else if (MO.isMBB()) {
352 MO.setMBB(Pred[j].getMBB());
353 MadeChange = true;
354 }
355 ++j;
356 }
357 }
358 return MadeChange;
359 }
360
hasLoadFromStackSlot(const MachineInstr & MI,SmallVectorImpl<const MachineMemOperand * > & Accesses) const361 bool TargetInstrInfo::hasLoadFromStackSlot(
362 const MachineInstr &MI,
363 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
364 size_t StartSize = Accesses.size();
365 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
366 oe = MI.memoperands_end();
367 o != oe; ++o) {
368 if ((*o)->isLoad() &&
369 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
370 Accesses.push_back(*o);
371 }
372 return Accesses.size() != StartSize;
373 }
374
hasStoreToStackSlot(const MachineInstr & MI,SmallVectorImpl<const MachineMemOperand * > & Accesses) const375 bool TargetInstrInfo::hasStoreToStackSlot(
376 const MachineInstr &MI,
377 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
378 size_t StartSize = Accesses.size();
379 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
380 oe = MI.memoperands_end();
381 o != oe; ++o) {
382 if ((*o)->isStore() &&
383 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
384 Accesses.push_back(*o);
385 }
386 return Accesses.size() != StartSize;
387 }
388
getStackSlotRange(const TargetRegisterClass * RC,unsigned SubIdx,unsigned & Size,unsigned & Offset,const MachineFunction & MF) const389 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
390 unsigned SubIdx, unsigned &Size,
391 unsigned &Offset,
392 const MachineFunction &MF) const {
393 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
394 if (!SubIdx) {
395 Size = TRI->getSpillSize(*RC);
396 Offset = 0;
397 return true;
398 }
399 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
400 // Convert bit size to byte size.
401 if (BitSize % 8)
402 return false;
403
404 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
405 if (BitOffset < 0 || BitOffset % 8)
406 return false;
407
408 Size = BitSize / 8;
409 Offset = (unsigned)BitOffset / 8;
410
411 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
412
413 if (!MF.getDataLayout().isLittleEndian()) {
414 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
415 }
416 return true;
417 }
418
reMaterialize(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,unsigned SubIdx,const MachineInstr & Orig,const TargetRegisterInfo & TRI) const419 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
420 MachineBasicBlock::iterator I,
421 Register DestReg, unsigned SubIdx,
422 const MachineInstr &Orig,
423 const TargetRegisterInfo &TRI) const {
424 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
425 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
426 MBB.insert(I, MI);
427 }
428
produceSameValue(const MachineInstr & MI0,const MachineInstr & MI1,const MachineRegisterInfo * MRI) const429 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
430 const MachineInstr &MI1,
431 const MachineRegisterInfo *MRI) const {
432 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
433 }
434
duplicate(MachineBasicBlock & MBB,MachineBasicBlock::iterator InsertBefore,const MachineInstr & Orig) const435 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
436 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
437 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
438 MachineFunction &MF = *MBB.getParent();
439 return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
440 }
441
442 // If the COPY instruction in MI can be folded to a stack operation, return
443 // the register class to use.
canFoldCopy(const MachineInstr & MI,unsigned FoldIdx)444 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
445 unsigned FoldIdx) {
446 assert(MI.isCopy() && "MI must be a COPY instruction");
447 if (MI.getNumOperands() != 2)
448 return nullptr;
449 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
450
451 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
452 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
453
454 if (FoldOp.getSubReg() || LiveOp.getSubReg())
455 return nullptr;
456
457 Register FoldReg = FoldOp.getReg();
458 Register LiveReg = LiveOp.getReg();
459
460 assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs");
461
462 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
463 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
464
465 if (Register::isPhysicalRegister(LiveOp.getReg()))
466 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
467
468 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
469 return RC;
470
471 // FIXME: Allow folding when register classes are memory compatible.
472 return nullptr;
473 }
474
getNoop(MCInst & NopInst) const475 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
476 llvm_unreachable("Not implemented");
477 }
478
foldPatchpoint(MachineFunction & MF,MachineInstr & MI,ArrayRef<unsigned> Ops,int FrameIndex,const TargetInstrInfo & TII)479 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
480 ArrayRef<unsigned> Ops, int FrameIndex,
481 const TargetInstrInfo &TII) {
482 unsigned StartIdx = 0;
483 unsigned NumDefs = 0;
484 switch (MI.getOpcode()) {
485 case TargetOpcode::STACKMAP: {
486 // StackMapLiveValues are foldable
487 StartIdx = StackMapOpers(&MI).getVarIdx();
488 break;
489 }
490 case TargetOpcode::PATCHPOINT: {
491 // For PatchPoint, the call args are not foldable (even if reported in the
492 // stackmap e.g. via anyregcc).
493 StartIdx = PatchPointOpers(&MI).getVarIdx();
494 break;
495 }
496 case TargetOpcode::STATEPOINT: {
497 // For statepoints, fold deopt and gc arguments, but not call arguments.
498 StartIdx = StatepointOpers(&MI).getVarIdx();
499 NumDefs = MI.getNumDefs();
500 break;
501 }
502 default:
503 llvm_unreachable("unexpected stackmap opcode");
504 }
505
506 unsigned DefToFoldIdx = MI.getNumOperands();
507
508 // Return false if any operands requested for folding are not foldable (not
509 // part of the stackmap's live values).
510 for (unsigned Op : Ops) {
511 if (Op < NumDefs) {
512 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
513 DefToFoldIdx = Op;
514 } else if (Op < StartIdx) {
515 return nullptr;
516 }
517 if (MI.getOperand(Op).isTied())
518 return nullptr;
519 }
520
521 MachineInstr *NewMI =
522 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
523 MachineInstrBuilder MIB(MF, NewMI);
524
525 // No need to fold return, the meta data, and function arguments
526 for (unsigned i = 0; i < StartIdx; ++i)
527 if (i != DefToFoldIdx)
528 MIB.add(MI.getOperand(i));
529
530 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
531 MachineOperand &MO = MI.getOperand(i);
532 unsigned TiedTo = e;
533 (void)MI.isRegTiedToDefOperand(i, &TiedTo);
534
535 if (is_contained(Ops, i)) {
536 assert(TiedTo == e && "Cannot fold tied operands");
537 unsigned SpillSize;
538 unsigned SpillOffset;
539 // Compute the spill slot size and offset.
540 const TargetRegisterClass *RC =
541 MF.getRegInfo().getRegClass(MO.getReg());
542 bool Valid =
543 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
544 if (!Valid)
545 report_fatal_error("cannot spill patchpoint subregister operand");
546 MIB.addImm(StackMaps::IndirectMemRefOp);
547 MIB.addImm(SpillSize);
548 MIB.addFrameIndex(FrameIndex);
549 MIB.addImm(SpillOffset);
550 } else {
551 MIB.add(MO);
552 if (TiedTo < e) {
553 assert(TiedTo < NumDefs && "Bad tied operand");
554 if (TiedTo > DefToFoldIdx)
555 --TiedTo;
556 NewMI->tieOperands(TiedTo, NewMI->getNumOperands() - 1);
557 }
558 }
559 }
560 return NewMI;
561 }
562
foldMemoryOperand(MachineInstr & MI,ArrayRef<unsigned> Ops,int FI,LiveIntervals * LIS,VirtRegMap * VRM) const563 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
564 ArrayRef<unsigned> Ops, int FI,
565 LiveIntervals *LIS,
566 VirtRegMap *VRM) const {
567 auto Flags = MachineMemOperand::MONone;
568 for (unsigned OpIdx : Ops)
569 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
570 : MachineMemOperand::MOLoad;
571
572 MachineBasicBlock *MBB = MI.getParent();
573 assert(MBB && "foldMemoryOperand needs an inserted instruction");
574 MachineFunction &MF = *MBB->getParent();
575
576 // If we're not folding a load into a subreg, the size of the load is the
577 // size of the spill slot. But if we are, we need to figure out what the
578 // actual load size is.
579 int64_t MemSize = 0;
580 const MachineFrameInfo &MFI = MF.getFrameInfo();
581 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
582
583 if (Flags & MachineMemOperand::MOStore) {
584 MemSize = MFI.getObjectSize(FI);
585 } else {
586 for (unsigned OpIdx : Ops) {
587 int64_t OpSize = MFI.getObjectSize(FI);
588
589 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
590 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
591 if (SubRegSize > 0 && !(SubRegSize % 8))
592 OpSize = SubRegSize / 8;
593 }
594
595 MemSize = std::max(MemSize, OpSize);
596 }
597 }
598
599 assert(MemSize && "Did not expect a zero-sized stack slot");
600
601 MachineInstr *NewMI = nullptr;
602
603 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
604 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
605 MI.getOpcode() == TargetOpcode::STATEPOINT) {
606 // Fold stackmap/patchpoint.
607 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
608 if (NewMI)
609 MBB->insert(MI, NewMI);
610 } else {
611 // Ask the target to do the actual folding.
612 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
613 }
614
615 if (NewMI) {
616 NewMI->setMemRefs(MF, MI.memoperands());
617 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
618 assert((!(Flags & MachineMemOperand::MOStore) ||
619 NewMI->mayStore()) &&
620 "Folded a def to a non-store!");
621 assert((!(Flags & MachineMemOperand::MOLoad) ||
622 NewMI->mayLoad()) &&
623 "Folded a use to a non-load!");
624 assert(MFI.getObjectOffset(FI) != -1);
625 MachineMemOperand *MMO =
626 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
627 Flags, MemSize, MFI.getObjectAlign(FI));
628 NewMI->addMemOperand(MF, MMO);
629
630 // The pass "x86 speculative load hardening" always attaches symbols to
631 // call instructions. We need copy it form old instruction.
632 NewMI->cloneInstrSymbols(MF, MI);
633
634 return NewMI;
635 }
636
637 // Straight COPY may fold as load/store.
638 if (!MI.isCopy() || Ops.size() != 1)
639 return nullptr;
640
641 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
642 if (!RC)
643 return nullptr;
644
645 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
646 MachineBasicBlock::iterator Pos = MI;
647
648 if (Flags == MachineMemOperand::MOStore)
649 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
650 else
651 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
652 return &*--Pos;
653 }
654
foldMemoryOperand(MachineInstr & MI,ArrayRef<unsigned> Ops,MachineInstr & LoadMI,LiveIntervals * LIS) const655 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
656 ArrayRef<unsigned> Ops,
657 MachineInstr &LoadMI,
658 LiveIntervals *LIS) const {
659 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
660 #ifndef NDEBUG
661 for (unsigned OpIdx : Ops)
662 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
663 #endif
664
665 MachineBasicBlock &MBB = *MI.getParent();
666 MachineFunction &MF = *MBB.getParent();
667
668 // Ask the target to do the actual folding.
669 MachineInstr *NewMI = nullptr;
670 int FrameIndex = 0;
671
672 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
673 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
674 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
675 isLoadFromStackSlot(LoadMI, FrameIndex)) {
676 // Fold stackmap/patchpoint.
677 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
678 if (NewMI)
679 NewMI = &*MBB.insert(MI, NewMI);
680 } else {
681 // Ask the target to do the actual folding.
682 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
683 }
684
685 if (!NewMI)
686 return nullptr;
687
688 // Copy the memoperands from the load to the folded instruction.
689 if (MI.memoperands_empty()) {
690 NewMI->setMemRefs(MF, LoadMI.memoperands());
691 } else {
692 // Handle the rare case of folding multiple loads.
693 NewMI->setMemRefs(MF, MI.memoperands());
694 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
695 E = LoadMI.memoperands_end();
696 I != E; ++I) {
697 NewMI->addMemOperand(MF, *I);
698 }
699 }
700 return NewMI;
701 }
702
hasReassociableOperands(const MachineInstr & Inst,const MachineBasicBlock * MBB) const703 bool TargetInstrInfo::hasReassociableOperands(
704 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
705 const MachineOperand &Op1 = Inst.getOperand(1);
706 const MachineOperand &Op2 = Inst.getOperand(2);
707 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
708
709 // We need virtual register definitions for the operands that we will
710 // reassociate.
711 MachineInstr *MI1 = nullptr;
712 MachineInstr *MI2 = nullptr;
713 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg()))
714 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
715 if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg()))
716 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
717
718 // And they need to be in the trace (otherwise, they won't have a depth).
719 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
720 }
721
hasReassociableSibling(const MachineInstr & Inst,bool & Commuted) const722 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
723 bool &Commuted) const {
724 const MachineBasicBlock *MBB = Inst.getParent();
725 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
726 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
727 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
728 unsigned AssocOpcode = Inst.getOpcode();
729
730 // If only one operand has the same opcode and it's the second source operand,
731 // the operands must be commuted.
732 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
733 if (Commuted)
734 std::swap(MI1, MI2);
735
736 // 1. The previous instruction must be the same type as Inst.
737 // 2. The previous instruction must also be associative/commutative (this can
738 // be different even for instructions with the same opcode if traits like
739 // fast-math-flags are included).
740 // 3. The previous instruction must have virtual register definitions for its
741 // operands in the same basic block as Inst.
742 // 4. The previous instruction's result must only be used by Inst.
743 return MI1->getOpcode() == AssocOpcode && isAssociativeAndCommutative(*MI1) &&
744 hasReassociableOperands(*MI1, MBB) &&
745 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
746 }
747
748 // 1. The operation must be associative and commutative.
749 // 2. The instruction must have virtual register definitions for its
750 // operands in the same basic block.
751 // 3. The instruction must have a reassociable sibling.
isReassociationCandidate(const MachineInstr & Inst,bool & Commuted) const752 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
753 bool &Commuted) const {
754 return isAssociativeAndCommutative(Inst) &&
755 hasReassociableOperands(Inst, Inst.getParent()) &&
756 hasReassociableSibling(Inst, Commuted);
757 }
758
759 // The concept of the reassociation pass is that these operations can benefit
760 // from this kind of transformation:
761 //
762 // A = ? op ?
763 // B = A op X (Prev)
764 // C = B op Y (Root)
765 // -->
766 // A = ? op ?
767 // B = X op Y
768 // C = A op B
769 //
770 // breaking the dependency between A and B, allowing them to be executed in
771 // parallel (or back-to-back in a pipeline) instead of depending on each other.
772
773 // FIXME: This has the potential to be expensive (compile time) while not
774 // improving the code at all. Some ways to limit the overhead:
775 // 1. Track successful transforms; bail out if hit rate gets too low.
776 // 2. Only enable at -O3 or some other non-default optimization level.
777 // 3. Pre-screen pattern candidates here: if an operand of the previous
778 // instruction is known to not increase the critical path, then don't match
779 // that pattern.
getMachineCombinerPatterns(MachineInstr & Root,SmallVectorImpl<MachineCombinerPattern> & Patterns,bool DoRegPressureReduce) const780 bool TargetInstrInfo::getMachineCombinerPatterns(
781 MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
782 bool DoRegPressureReduce) const {
783 bool Commute;
784 if (isReassociationCandidate(Root, Commute)) {
785 // We found a sequence of instructions that may be suitable for a
786 // reassociation of operands to increase ILP. Specify each commutation
787 // possibility for the Prev instruction in the sequence and let the
788 // machine combiner decide if changing the operands is worthwhile.
789 if (Commute) {
790 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
791 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
792 } else {
793 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
794 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
795 }
796 return true;
797 }
798
799 return false;
800 }
801
802 /// Return true when a code sequence can improve loop throughput.
803 bool
isThroughputPattern(MachineCombinerPattern Pattern) const804 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
805 return false;
806 }
807
808 /// Attempt the reassociation transformation to reduce critical path length.
809 /// See the above comments before getMachineCombinerPatterns().
reassociateOps(MachineInstr & Root,MachineInstr & Prev,MachineCombinerPattern Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,DenseMap<unsigned,unsigned> & InstrIdxForVirtReg) const810 void TargetInstrInfo::reassociateOps(
811 MachineInstr &Root, MachineInstr &Prev,
812 MachineCombinerPattern Pattern,
813 SmallVectorImpl<MachineInstr *> &InsInstrs,
814 SmallVectorImpl<MachineInstr *> &DelInstrs,
815 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
816 MachineFunction *MF = Root.getMF();
817 MachineRegisterInfo &MRI = MF->getRegInfo();
818 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
819 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
820 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
821
822 // This array encodes the operand index for each parameter because the
823 // operands may be commuted. Each row corresponds to a pattern value,
824 // and each column specifies the index of A, B, X, Y.
825 unsigned OpIdx[4][4] = {
826 { 1, 1, 2, 2 },
827 { 1, 2, 2, 1 },
828 { 2, 1, 1, 2 },
829 { 2, 2, 1, 1 }
830 };
831
832 int Row;
833 switch (Pattern) {
834 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
835 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
836 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
837 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
838 default: llvm_unreachable("unexpected MachineCombinerPattern");
839 }
840
841 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
842 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
843 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
844 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
845 MachineOperand &OpC = Root.getOperand(0);
846
847 Register RegA = OpA.getReg();
848 Register RegB = OpB.getReg();
849 Register RegX = OpX.getReg();
850 Register RegY = OpY.getReg();
851 Register RegC = OpC.getReg();
852
853 if (Register::isVirtualRegister(RegA))
854 MRI.constrainRegClass(RegA, RC);
855 if (Register::isVirtualRegister(RegB))
856 MRI.constrainRegClass(RegB, RC);
857 if (Register::isVirtualRegister(RegX))
858 MRI.constrainRegClass(RegX, RC);
859 if (Register::isVirtualRegister(RegY))
860 MRI.constrainRegClass(RegY, RC);
861 if (Register::isVirtualRegister(RegC))
862 MRI.constrainRegClass(RegC, RC);
863
864 // Create a new virtual register for the result of (X op Y) instead of
865 // recycling RegB because the MachineCombiner's computation of the critical
866 // path requires a new register definition rather than an existing one.
867 Register NewVR = MRI.createVirtualRegister(RC);
868 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
869
870 unsigned Opcode = Root.getOpcode();
871 bool KillA = OpA.isKill();
872 bool KillX = OpX.isKill();
873 bool KillY = OpY.isKill();
874
875 // Create new instructions for insertion.
876 MachineInstrBuilder MIB1 =
877 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
878 .addReg(RegX, getKillRegState(KillX))
879 .addReg(RegY, getKillRegState(KillY));
880 MachineInstrBuilder MIB2 =
881 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
882 .addReg(RegA, getKillRegState(KillA))
883 .addReg(NewVR, getKillRegState(true));
884
885 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
886
887 // Record new instructions for insertion and old instructions for deletion.
888 InsInstrs.push_back(MIB1);
889 InsInstrs.push_back(MIB2);
890 DelInstrs.push_back(&Prev);
891 DelInstrs.push_back(&Root);
892 }
893
genAlternativeCodeSequence(MachineInstr & Root,MachineCombinerPattern Pattern,SmallVectorImpl<MachineInstr * > & InsInstrs,SmallVectorImpl<MachineInstr * > & DelInstrs,DenseMap<unsigned,unsigned> & InstIdxForVirtReg) const894 void TargetInstrInfo::genAlternativeCodeSequence(
895 MachineInstr &Root, MachineCombinerPattern Pattern,
896 SmallVectorImpl<MachineInstr *> &InsInstrs,
897 SmallVectorImpl<MachineInstr *> &DelInstrs,
898 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
899 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
900
901 // Select the previous instruction in the sequence based on the input pattern.
902 MachineInstr *Prev = nullptr;
903 switch (Pattern) {
904 case MachineCombinerPattern::REASSOC_AX_BY:
905 case MachineCombinerPattern::REASSOC_XA_BY:
906 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
907 break;
908 case MachineCombinerPattern::REASSOC_AX_YB:
909 case MachineCombinerPattern::REASSOC_XA_YB:
910 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
911 break;
912 default:
913 break;
914 }
915
916 assert(Prev && "Unknown pattern for machine combiner");
917
918 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
919 }
920
isReallyTriviallyReMaterializableGeneric(const MachineInstr & MI,AAResults * AA) const921 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
922 const MachineInstr &MI, AAResults *AA) const {
923 const MachineFunction &MF = *MI.getMF();
924 const MachineRegisterInfo &MRI = MF.getRegInfo();
925
926 // Remat clients assume operand 0 is the defined register.
927 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
928 return false;
929 Register DefReg = MI.getOperand(0).getReg();
930
931 // A sub-register definition can only be rematerialized if the instruction
932 // doesn't read the other parts of the register. Otherwise it is really a
933 // read-modify-write operation on the full virtual register which cannot be
934 // moved safely.
935 if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() &&
936 MI.readsVirtualRegister(DefReg))
937 return false;
938
939 // A load from a fixed stack slot can be rematerialized. This may be
940 // redundant with subsequent checks, but it's target-independent,
941 // simple, and a common case.
942 int FrameIdx = 0;
943 if (isLoadFromStackSlot(MI, FrameIdx) &&
944 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
945 return true;
946
947 // Avoid instructions obviously unsafe for remat.
948 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
949 MI.hasUnmodeledSideEffects())
950 return false;
951
952 // Don't remat inline asm. We have no idea how expensive it is
953 // even if it's side effect free.
954 if (MI.isInlineAsm())
955 return false;
956
957 // Avoid instructions which load from potentially varying memory.
958 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
959 return false;
960
961 // If any of the registers accessed are non-constant, conservatively assume
962 // the instruction is not rematerializable.
963 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
964 const MachineOperand &MO = MI.getOperand(i);
965 if (!MO.isReg()) continue;
966 Register Reg = MO.getReg();
967 if (Reg == 0)
968 continue;
969
970 // Check for a well-behaved physical register.
971 if (Register::isPhysicalRegister(Reg)) {
972 if (MO.isUse()) {
973 // If the physreg has no defs anywhere, it's just an ambient register
974 // and we can freely move its uses. Alternatively, if it's allocatable,
975 // it could get allocated to something with a def during allocation.
976 if (!MRI.isConstantPhysReg(Reg))
977 return false;
978 } else {
979 // A physreg def. We can't remat it.
980 return false;
981 }
982 continue;
983 }
984
985 // Only allow one virtual-register def. There may be multiple defs of the
986 // same virtual register, though.
987 if (MO.isDef() && Reg != DefReg)
988 return false;
989
990 // Don't allow any virtual-register uses. Rematting an instruction with
991 // virtual register uses would length the live ranges of the uses, which
992 // is not necessarily a good idea, certainly not "trivial".
993 if (MO.isUse())
994 return false;
995 }
996
997 // Everything checked out.
998 return true;
999 }
1000
getSPAdjust(const MachineInstr & MI) const1001 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
1002 const MachineFunction *MF = MI.getMF();
1003 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
1004 bool StackGrowsDown =
1005 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
1006
1007 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1008 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1009
1010 if (!isFrameInstr(MI))
1011 return 0;
1012
1013 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
1014
1015 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1016 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1017 SPAdj = -SPAdj;
1018
1019 return SPAdj;
1020 }
1021
1022 /// isSchedulingBoundary - Test if the given instruction should be
1023 /// considered a scheduling boundary. This primarily includes labels
1024 /// and terminators.
isSchedulingBoundary(const MachineInstr & MI,const MachineBasicBlock * MBB,const MachineFunction & MF) const1025 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1026 const MachineBasicBlock *MBB,
1027 const MachineFunction &MF) const {
1028 // Terminators and labels can't be scheduled around.
1029 if (MI.isTerminator() || MI.isPosition())
1030 return true;
1031
1032 // INLINEASM_BR can jump to another block
1033 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1034 return true;
1035
1036 // Don't attempt to schedule around any instruction that defines
1037 // a stack-oriented pointer, as it's unlikely to be profitable. This
1038 // saves compile time, because it doesn't require every single
1039 // stack slot reference to depend on the instruction that does the
1040 // modification.
1041 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1042 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1043 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1044 }
1045
1046 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1047 // may choose to honor.
usePreRAHazardRecognizer() const1048 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1049 return !DisableHazardRecognizer;
1050 }
1051
1052 // Default implementation of CreateTargetRAHazardRecognizer.
1053 ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetHazardRecognizer(const TargetSubtargetInfo * STI,const ScheduleDAG * DAG) const1054 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1055 const ScheduleDAG *DAG) const {
1056 // Dummy hazard recognizer allows all instructions to issue.
1057 return new ScheduleHazardRecognizer();
1058 }
1059
1060 // Default implementation of CreateTargetMIHazardRecognizer.
CreateTargetMIHazardRecognizer(const InstrItineraryData * II,const ScheduleDAGMI * DAG) const1061 ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
1062 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1063 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1064 }
1065
1066 // Default implementation of CreateTargetPostRAHazardRecognizer.
1067 ScheduleHazardRecognizer *TargetInstrInfo::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData * II,const ScheduleDAG * DAG) const1068 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1069 const ScheduleDAG *DAG) const {
1070 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1071 }
1072
1073 // Default implementation of getMemOperandWithOffset.
getMemOperandWithOffset(const MachineInstr & MI,const MachineOperand * & BaseOp,int64_t & Offset,bool & OffsetIsScalable,const TargetRegisterInfo * TRI) const1074 bool TargetInstrInfo::getMemOperandWithOffset(
1075 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1076 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1077 SmallVector<const MachineOperand *, 4> BaseOps;
1078 unsigned Width;
1079 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1080 Width, TRI) ||
1081 BaseOps.size() != 1)
1082 return false;
1083 BaseOp = BaseOps.front();
1084 return true;
1085 }
1086
1087 //===----------------------------------------------------------------------===//
1088 // SelectionDAG latency interface.
1089 //===----------------------------------------------------------------------===//
1090
1091 int
getOperandLatency(const InstrItineraryData * ItinData,SDNode * DefNode,unsigned DefIdx,SDNode * UseNode,unsigned UseIdx) const1092 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1093 SDNode *DefNode, unsigned DefIdx,
1094 SDNode *UseNode, unsigned UseIdx) const {
1095 if (!ItinData || ItinData->isEmpty())
1096 return -1;
1097
1098 if (!DefNode->isMachineOpcode())
1099 return -1;
1100
1101 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1102 if (!UseNode->isMachineOpcode())
1103 return ItinData->getOperandCycle(DefClass, DefIdx);
1104 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1105 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1106 }
1107
getInstrLatency(const InstrItineraryData * ItinData,SDNode * N) const1108 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1109 SDNode *N) const {
1110 if (!ItinData || ItinData->isEmpty())
1111 return 1;
1112
1113 if (!N->isMachineOpcode())
1114 return 1;
1115
1116 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1117 }
1118
1119 //===----------------------------------------------------------------------===//
1120 // MachineInstr latency interface.
1121 //===----------------------------------------------------------------------===//
1122
getNumMicroOps(const InstrItineraryData * ItinData,const MachineInstr & MI) const1123 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1124 const MachineInstr &MI) const {
1125 if (!ItinData || ItinData->isEmpty())
1126 return 1;
1127
1128 unsigned Class = MI.getDesc().getSchedClass();
1129 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1130 if (UOps >= 0)
1131 return UOps;
1132
1133 // The # of u-ops is dynamically determined. The specific target should
1134 // override this function to return the right number.
1135 return 1;
1136 }
1137
1138 /// Return the default expected latency for a def based on it's opcode.
defaultDefLatency(const MCSchedModel & SchedModel,const MachineInstr & DefMI) const1139 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1140 const MachineInstr &DefMI) const {
1141 if (DefMI.isTransient())
1142 return 0;
1143 if (DefMI.mayLoad())
1144 return SchedModel.LoadLatency;
1145 if (isHighLatencyDef(DefMI.getOpcode()))
1146 return SchedModel.HighLatency;
1147 return 1;
1148 }
1149
getPredicationCost(const MachineInstr &) const1150 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1151 return 0;
1152 }
1153
getInstrLatency(const InstrItineraryData * ItinData,const MachineInstr & MI,unsigned * PredCost) const1154 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1155 const MachineInstr &MI,
1156 unsigned *PredCost) const {
1157 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1158 // still have a MinLatency property, which getStageLatency checks.
1159 if (!ItinData)
1160 return MI.mayLoad() ? 2 : 1;
1161
1162 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1163 }
1164
hasLowDefLatency(const TargetSchedModel & SchedModel,const MachineInstr & DefMI,unsigned DefIdx) const1165 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1166 const MachineInstr &DefMI,
1167 unsigned DefIdx) const {
1168 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1169 if (!ItinData || ItinData->isEmpty())
1170 return false;
1171
1172 unsigned DefClass = DefMI.getDesc().getSchedClass();
1173 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1174 return (DefCycle != -1 && DefCycle <= 1);
1175 }
1176
1177 Optional<ParamLoadedValue>
describeLoadedValue(const MachineInstr & MI,Register Reg) const1178 TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
1179 Register Reg) const {
1180 const MachineFunction *MF = MI.getMF();
1181 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1182 DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});
1183 int64_t Offset;
1184 bool OffsetIsScalable;
1185
1186 // To simplify the sub-register handling, verify that we only need to
1187 // consider physical registers.
1188 assert(MF->getProperties().hasProperty(
1189 MachineFunctionProperties::Property::NoVRegs));
1190
1191 if (auto DestSrc = isCopyInstr(MI)) {
1192 Register DestReg = DestSrc->Destination->getReg();
1193
1194 // If the copy destination is the forwarding reg, describe the forwarding
1195 // reg using the copy source as the backup location. Example:
1196 //
1197 // x0 = MOV x7
1198 // call callee(x0) ; x0 described as x7
1199 if (Reg == DestReg)
1200 return ParamLoadedValue(*DestSrc->Source, Expr);
1201
1202 // Cases where super- or sub-registers needs to be described should
1203 // be handled by the target's hook implementation.
1204 assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) &&
1205 "TargetInstrInfo::describeLoadedValue can't describe super- or "
1206 "sub-regs for copy instructions");
1207 return None;
1208 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1209 Register SrcReg = RegImm->Reg;
1210 Offset = RegImm->Imm;
1211 Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset);
1212 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1213 } else if (MI.hasOneMemOperand()) {
1214 // Only describe memory which provably does not escape the function. As
1215 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1216 // callee (or by another thread).
1217 const auto &TII = MF->getSubtarget().getInstrInfo();
1218 const MachineFrameInfo &MFI = MF->getFrameInfo();
1219 const MachineMemOperand *MMO = MI.memoperands()[0];
1220 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1221
1222 // If the address points to "special" memory (e.g. a spill slot), it's
1223 // sufficient to check that it isn't aliased by any high-level IR value.
1224 if (!PSV || PSV->mayAlias(&MFI))
1225 return None;
1226
1227 const MachineOperand *BaseOp;
1228 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1229 TRI))
1230 return None;
1231
1232 // FIXME: Scalable offsets are not yet handled in the offset code below.
1233 if (OffsetIsScalable)
1234 return None;
1235
1236 // TODO: Can currently only handle mem instructions with a single define.
1237 // An example from the x86 target:
1238 // ...
1239 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1240 // ...
1241 //
1242 if (MI.getNumExplicitDefs() != 1)
1243 return None;
1244
1245 // TODO: In what way do we need to take Reg into consideration here?
1246
1247 SmallVector<uint64_t, 8> Ops;
1248 DIExpression::appendOffset(Ops, Offset);
1249 Ops.push_back(dwarf::DW_OP_deref_size);
1250 Ops.push_back(MMO->getSize());
1251 Expr = DIExpression::prependOpcodes(Expr, Ops);
1252 return ParamLoadedValue(*BaseOp, Expr);
1253 }
1254
1255 return None;
1256 }
1257
1258 /// Both DefMI and UseMI must be valid. By default, call directly to the
1259 /// itinerary. This may be overriden by the target.
getOperandLatency(const InstrItineraryData * ItinData,const MachineInstr & DefMI,unsigned DefIdx,const MachineInstr & UseMI,unsigned UseIdx) const1260 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1261 const MachineInstr &DefMI,
1262 unsigned DefIdx,
1263 const MachineInstr &UseMI,
1264 unsigned UseIdx) const {
1265 unsigned DefClass = DefMI.getDesc().getSchedClass();
1266 unsigned UseClass = UseMI.getDesc().getSchedClass();
1267 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1268 }
1269
1270 /// If we can determine the operand latency from the def only, without itinerary
1271 /// lookup, do so. Otherwise return -1.
computeDefOperandLatency(const InstrItineraryData * ItinData,const MachineInstr & DefMI) const1272 int TargetInstrInfo::computeDefOperandLatency(
1273 const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1274
1275 // Let the target hook getInstrLatency handle missing itineraries.
1276 if (!ItinData)
1277 return getInstrLatency(ItinData, DefMI);
1278
1279 if(ItinData->isEmpty())
1280 return defaultDefLatency(ItinData->SchedModel, DefMI);
1281
1282 // ...operand lookup required
1283 return -1;
1284 }
1285
getRegSequenceInputs(const MachineInstr & MI,unsigned DefIdx,SmallVectorImpl<RegSubRegPairAndIdx> & InputRegs) const1286 bool TargetInstrInfo::getRegSequenceInputs(
1287 const MachineInstr &MI, unsigned DefIdx,
1288 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1289 assert((MI.isRegSequence() ||
1290 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1291
1292 if (!MI.isRegSequence())
1293 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1294
1295 // We are looking at:
1296 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1297 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1298 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1299 OpIdx += 2) {
1300 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1301 if (MOReg.isUndef())
1302 continue;
1303 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1304 assert(MOSubIdx.isImm() &&
1305 "One of the subindex of the reg_sequence is not an immediate");
1306 // Record Reg:SubReg, SubIdx.
1307 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1308 (unsigned)MOSubIdx.getImm()));
1309 }
1310 return true;
1311 }
1312
getExtractSubregInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPairAndIdx & InputReg) const1313 bool TargetInstrInfo::getExtractSubregInputs(
1314 const MachineInstr &MI, unsigned DefIdx,
1315 RegSubRegPairAndIdx &InputReg) const {
1316 assert((MI.isExtractSubreg() ||
1317 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1318
1319 if (!MI.isExtractSubreg())
1320 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1321
1322 // We are looking at:
1323 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1324 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1325 const MachineOperand &MOReg = MI.getOperand(1);
1326 if (MOReg.isUndef())
1327 return false;
1328 const MachineOperand &MOSubIdx = MI.getOperand(2);
1329 assert(MOSubIdx.isImm() &&
1330 "The subindex of the extract_subreg is not an immediate");
1331
1332 InputReg.Reg = MOReg.getReg();
1333 InputReg.SubReg = MOReg.getSubReg();
1334 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1335 return true;
1336 }
1337
getInsertSubregInputs(const MachineInstr & MI,unsigned DefIdx,RegSubRegPair & BaseReg,RegSubRegPairAndIdx & InsertedReg) const1338 bool TargetInstrInfo::getInsertSubregInputs(
1339 const MachineInstr &MI, unsigned DefIdx,
1340 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1341 assert((MI.isInsertSubreg() ||
1342 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1343
1344 if (!MI.isInsertSubreg())
1345 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1346
1347 // We are looking at:
1348 // Def = INSERT_SEQUENCE v0, v1, sub0.
1349 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1350 const MachineOperand &MOBaseReg = MI.getOperand(1);
1351 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1352 if (MOInsertedReg.isUndef())
1353 return false;
1354 const MachineOperand &MOSubIdx = MI.getOperand(3);
1355 assert(MOSubIdx.isImm() &&
1356 "One of the subindex of the reg_sequence is not an immediate");
1357 BaseReg.Reg = MOBaseReg.getReg();
1358 BaseReg.SubReg = MOBaseReg.getSubReg();
1359
1360 InsertedReg.Reg = MOInsertedReg.getReg();
1361 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1362 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1363 return true;
1364 }
1365
1366 // Returns a MIRPrinter comment for this machine operand.
createMIROperandComment(const MachineInstr & MI,const MachineOperand & Op,unsigned OpIdx,const TargetRegisterInfo * TRI) const1367 std::string TargetInstrInfo::createMIROperandComment(
1368 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1369 const TargetRegisterInfo *TRI) const {
1370
1371 if (!MI.isInlineAsm())
1372 return "";
1373
1374 std::string Flags;
1375 raw_string_ostream OS(Flags);
1376
1377 if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
1378 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1379 unsigned ExtraInfo = Op.getImm();
1380 bool First = true;
1381 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
1382 if (!First)
1383 OS << " ";
1384 First = false;
1385 OS << Info;
1386 }
1387
1388 return OS.str();
1389 }
1390
1391 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
1392 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
1393 return "";
1394
1395 assert(Op.isImm() && "Expected flag operand to be an immediate");
1396 // Pretty print the inline asm operand descriptor.
1397 unsigned Flag = Op.getImm();
1398 unsigned Kind = InlineAsm::getKind(Flag);
1399 OS << InlineAsm::getKindName(Kind);
1400
1401 unsigned RCID = 0;
1402 if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
1403 InlineAsm::hasRegClassConstraint(Flag, RCID)) {
1404 if (TRI) {
1405 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1406 } else
1407 OS << ":RC" << RCID;
1408 }
1409
1410 if (InlineAsm::isMemKind(Flag)) {
1411 unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
1412 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1413 }
1414
1415 unsigned TiedTo = 0;
1416 if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
1417 OS << " tiedto:$" << TiedTo;
1418
1419 return OS.str();
1420 }
1421
~PipelinerLoopInfo()1422 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() {}
1423