1 //===- MachineFunction.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Collect native machine code information for a function.  This allows
10 // target-specific information about the generated code to be stored with each
11 // function.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/Analysis/ConstantFolding.h"
25 #include "llvm/Analysis/EHPersonalities.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineConstantPool.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineJumpTableInfo.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineModuleInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/PseudoSourceValue.h"
35 #include "llvm/CodeGen/TargetFrameLowering.h"
36 #include "llvm/CodeGen/TargetInstrInfo.h"
37 #include "llvm/CodeGen/TargetLowering.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/CodeGen/WasmEHFuncInfo.h"
41 #include "llvm/CodeGen/WinEHFuncInfo.h"
42 #include "llvm/Config/llvm-config.h"
43 #include "llvm/IR/Attributes.h"
44 #include "llvm/IR/BasicBlock.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/DataLayout.h"
47 #include "llvm/IR/DebugInfoMetadata.h"
48 #include "llvm/IR/DerivedTypes.h"
49 #include "llvm/IR/Function.h"
50 #include "llvm/IR/GlobalValue.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/Metadata.h"
54 #include "llvm/IR/Module.h"
55 #include "llvm/IR/ModuleSlotTracker.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/MC/MCContext.h"
58 #include "llvm/MC/MCSymbol.h"
59 #include "llvm/MC/SectionKind.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/Compiler.h"
63 #include "llvm/Support/DOTGraphTraits.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/GraphWriter.h"
67 #include "llvm/Support/raw_ostream.h"
68 #include "llvm/Target/TargetMachine.h"
69 #include <algorithm>
70 #include <cassert>
71 #include <cstddef>
72 #include <cstdint>
73 #include <iterator>
74 #include <string>
75 #include <type_traits>
76 #include <utility>
77 #include <vector>
78 
79 using namespace llvm;
80 
81 #define DEBUG_TYPE "codegen"
82 
83 static cl::opt<unsigned> AlignAllFunctions(
84     "align-all-functions",
85     cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
86              "means align on 16B boundaries)."),
87     cl::init(0), cl::Hidden);
88 
getPropertyName(MachineFunctionProperties::Property Prop)89 static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
90   using P = MachineFunctionProperties::Property;
91 
92   switch(Prop) {
93   case P::FailedISel: return "FailedISel";
94   case P::IsSSA: return "IsSSA";
95   case P::Legalized: return "Legalized";
96   case P::NoPHIs: return "NoPHIs";
97   case P::NoVRegs: return "NoVRegs";
98   case P::RegBankSelected: return "RegBankSelected";
99   case P::Selected: return "Selected";
100   case P::TracksLiveness: return "TracksLiveness";
101   case P::TiedOpsRewritten: return "TiedOpsRewritten";
102   }
103   llvm_unreachable("Invalid machine function property");
104 }
105 
106 // Pin the vtable to this file.
anchor()107 void MachineFunction::Delegate::anchor() {}
108 
print(raw_ostream & OS) const109 void MachineFunctionProperties::print(raw_ostream &OS) const {
110   const char *Separator = "";
111   for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
112     if (!Properties[I])
113       continue;
114     OS << Separator << getPropertyName(static_cast<Property>(I));
115     Separator = ", ";
116   }
117 }
118 
119 //===----------------------------------------------------------------------===//
120 // MachineFunction implementation
121 //===----------------------------------------------------------------------===//
122 
123 // Out-of-line virtual method.
124 MachineFunctionInfo::~MachineFunctionInfo() = default;
125 
deleteNode(MachineBasicBlock * MBB)126 void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
127   MBB->getParent()->DeleteMachineBasicBlock(MBB);
128 }
129 
getFnStackAlignment(const TargetSubtargetInfo * STI,const Function & F)130 static inline unsigned getFnStackAlignment(const TargetSubtargetInfo *STI,
131                                            const Function &F) {
132   if (F.hasFnAttribute(Attribute::StackAlignment))
133     return F.getFnStackAlignment();
134   return STI->getFrameLowering()->getStackAlign().value();
135 }
136 
MachineFunction(Function & F,const LLVMTargetMachine & Target,const TargetSubtargetInfo & STI,unsigned FunctionNum,MachineModuleInfo & mmi)137 MachineFunction::MachineFunction(Function &F, const LLVMTargetMachine &Target,
138                                  const TargetSubtargetInfo &STI,
139                                  unsigned FunctionNum, MachineModuleInfo &mmi)
140     : F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) {
141   FunctionNumber = FunctionNum;
142   init();
143 }
144 
handleInsertion(MachineInstr & MI)145 void MachineFunction::handleInsertion(MachineInstr &MI) {
146   if (TheDelegate)
147     TheDelegate->MF_HandleInsertion(MI);
148 }
149 
handleRemoval(MachineInstr & MI)150 void MachineFunction::handleRemoval(MachineInstr &MI) {
151   if (TheDelegate)
152     TheDelegate->MF_HandleRemoval(MI);
153 }
154 
init()155 void MachineFunction::init() {
156   // Assume the function starts in SSA form with correct liveness.
157   Properties.set(MachineFunctionProperties::Property::IsSSA);
158   Properties.set(MachineFunctionProperties::Property::TracksLiveness);
159   if (STI->getRegisterInfo())
160     RegInfo = new (Allocator) MachineRegisterInfo(this);
161   else
162     RegInfo = nullptr;
163 
164   MFInfo = nullptr;
165   // We can realign the stack if the target supports it and the user hasn't
166   // explicitly asked us not to.
167   bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
168                       !F.hasFnAttribute("no-realign-stack");
169   FrameInfo = new (Allocator) MachineFrameInfo(
170       getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
171       /*ForcedRealign=*/CanRealignSP &&
172           F.hasFnAttribute(Attribute::StackAlignment));
173 
174   if (F.hasFnAttribute(Attribute::StackAlignment))
175     FrameInfo->ensureMaxAlignment(*F.getFnStackAlign());
176 
177   ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
178   Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
179 
180   // FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
181   // FIXME: Use Function::hasOptSize().
182   if (!F.hasFnAttribute(Attribute::OptimizeForSize))
183     Alignment = std::max(Alignment,
184                          STI->getTargetLowering()->getPrefFunctionAlignment());
185 
186   if (AlignAllFunctions)
187     Alignment = Align(1ULL << AlignAllFunctions);
188 
189   JumpTableInfo = nullptr;
190 
191   if (isFuncletEHPersonality(classifyEHPersonality(
192           F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
193     WinEHInfo = new (Allocator) WinEHFuncInfo();
194   }
195 
196   if (isScopedEHPersonality(classifyEHPersonality(
197           F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
198     WasmEHInfo = new (Allocator) WasmEHFuncInfo();
199   }
200 
201   assert(Target.isCompatibleDataLayout(getDataLayout()) &&
202          "Can't create a MachineFunction using a Module with a "
203          "Target-incompatible DataLayout attached\n");
204 
205   PSVManager =
206     std::make_unique<PseudoSourceValueManager>(*(getSubtarget().
207                                                   getInstrInfo()));
208 }
209 
~MachineFunction()210 MachineFunction::~MachineFunction() {
211   clear();
212 }
213 
clear()214 void MachineFunction::clear() {
215   Properties.reset();
216   // Don't call destructors on MachineInstr and MachineOperand. All of their
217   // memory comes from the BumpPtrAllocator which is about to be purged.
218   //
219   // Do call MachineBasicBlock destructors, it contains std::vectors.
220   for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
221     I->Insts.clearAndLeakNodesUnsafely();
222   MBBNumbering.clear();
223 
224   InstructionRecycler.clear(Allocator);
225   OperandRecycler.clear(Allocator);
226   BasicBlockRecycler.clear(Allocator);
227   CodeViewAnnotations.clear();
228   VariableDbgInfos.clear();
229   if (RegInfo) {
230     RegInfo->~MachineRegisterInfo();
231     Allocator.Deallocate(RegInfo);
232   }
233   if (MFInfo) {
234     MFInfo->~MachineFunctionInfo();
235     Allocator.Deallocate(MFInfo);
236   }
237 
238   FrameInfo->~MachineFrameInfo();
239   Allocator.Deallocate(FrameInfo);
240 
241   ConstantPool->~MachineConstantPool();
242   Allocator.Deallocate(ConstantPool);
243 
244   if (JumpTableInfo) {
245     JumpTableInfo->~MachineJumpTableInfo();
246     Allocator.Deallocate(JumpTableInfo);
247   }
248 
249   if (WinEHInfo) {
250     WinEHInfo->~WinEHFuncInfo();
251     Allocator.Deallocate(WinEHInfo);
252   }
253 
254   if (WasmEHInfo) {
255     WasmEHInfo->~WasmEHFuncInfo();
256     Allocator.Deallocate(WasmEHInfo);
257   }
258 }
259 
getDataLayout() const260 const DataLayout &MachineFunction::getDataLayout() const {
261   return F.getParent()->getDataLayout();
262 }
263 
264 /// Get the JumpTableInfo for this function.
265 /// If it does not already exist, allocate one.
266 MachineJumpTableInfo *MachineFunction::
getOrCreateJumpTableInfo(unsigned EntryKind)267 getOrCreateJumpTableInfo(unsigned EntryKind) {
268   if (JumpTableInfo) return JumpTableInfo;
269 
270   JumpTableInfo = new (Allocator)
271     MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
272   return JumpTableInfo;
273 }
274 
getDenormalMode(const fltSemantics & FPType) const275 DenormalMode MachineFunction::getDenormalMode(const fltSemantics &FPType) const {
276   return F.getDenormalMode(FPType);
277 }
278 
279 /// Should we be emitting segmented stack stuff for the function
shouldSplitStack() const280 bool MachineFunction::shouldSplitStack() const {
281   return getFunction().hasFnAttribute("split-stack");
282 }
283 
284 LLVM_NODISCARD unsigned
addFrameInst(const MCCFIInstruction & Inst)285 MachineFunction::addFrameInst(const MCCFIInstruction &Inst) {
286   FrameInstructions.push_back(Inst);
287   return FrameInstructions.size() - 1;
288 }
289 
290 /// This discards all of the MachineBasicBlock numbers and recomputes them.
291 /// This guarantees that the MBB numbers are sequential, dense, and match the
292 /// ordering of the blocks within the function.  If a specific MachineBasicBlock
293 /// is specified, only that block and those after it are renumbered.
RenumberBlocks(MachineBasicBlock * MBB)294 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
295   if (empty()) { MBBNumbering.clear(); return; }
296   MachineFunction::iterator MBBI, E = end();
297   if (MBB == nullptr)
298     MBBI = begin();
299   else
300     MBBI = MBB->getIterator();
301 
302   // Figure out the block number this should have.
303   unsigned BlockNo = 0;
304   if (MBBI != begin())
305     BlockNo = std::prev(MBBI)->getNumber() + 1;
306 
307   for (; MBBI != E; ++MBBI, ++BlockNo) {
308     if (MBBI->getNumber() != (int)BlockNo) {
309       // Remove use of the old number.
310       if (MBBI->getNumber() != -1) {
311         assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
312                "MBB number mismatch!");
313         MBBNumbering[MBBI->getNumber()] = nullptr;
314       }
315 
316       // If BlockNo is already taken, set that block's number to -1.
317       if (MBBNumbering[BlockNo])
318         MBBNumbering[BlockNo]->setNumber(-1);
319 
320       MBBNumbering[BlockNo] = &*MBBI;
321       MBBI->setNumber(BlockNo);
322     }
323   }
324 
325   // Okay, all the blocks are renumbered.  If we have compactified the block
326   // numbering, shrink MBBNumbering now.
327   assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
328   MBBNumbering.resize(BlockNo);
329 }
330 
331 /// This method iterates over the basic blocks and assigns their IsBeginSection
332 /// and IsEndSection fields. This must be called after MBB layout is finalized
333 /// and the SectionID's are assigned to MBBs.
assignBeginEndSections()334 void MachineFunction::assignBeginEndSections() {
335   front().setIsBeginSection();
336   auto CurrentSectionID = front().getSectionID();
337   for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) {
338     if (MBBI->getSectionID() == CurrentSectionID)
339       continue;
340     MBBI->setIsBeginSection();
341     std::prev(MBBI)->setIsEndSection();
342     CurrentSectionID = MBBI->getSectionID();
343   }
344   back().setIsEndSection();
345 }
346 
347 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
CreateMachineInstr(const MCInstrDesc & MCID,const DebugLoc & DL,bool NoImplicit)348 MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
349                                                   const DebugLoc &DL,
350                                                   bool NoImplicit) {
351   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
352       MachineInstr(*this, MCID, DL, NoImplicit);
353 }
354 
355 /// Create a new MachineInstr which is a copy of the 'Orig' instruction,
356 /// identical in all ways except the instruction has no parent, prev, or next.
357 MachineInstr *
CloneMachineInstr(const MachineInstr * Orig)358 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
359   return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
360              MachineInstr(*this, *Orig);
361 }
362 
CloneMachineInstrBundle(MachineBasicBlock & MBB,MachineBasicBlock::iterator InsertBefore,const MachineInstr & Orig)363 MachineInstr &MachineFunction::CloneMachineInstrBundle(MachineBasicBlock &MBB,
364     MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) {
365   MachineInstr *FirstClone = nullptr;
366   MachineBasicBlock::const_instr_iterator I = Orig.getIterator();
367   while (true) {
368     MachineInstr *Cloned = CloneMachineInstr(&*I);
369     MBB.insert(InsertBefore, Cloned);
370     if (FirstClone == nullptr) {
371       FirstClone = Cloned;
372     } else {
373       Cloned->bundleWithPred();
374     }
375 
376     if (!I->isBundledWithSucc())
377       break;
378     ++I;
379   }
380   // Copy over call site info to the cloned instruction if needed. If Orig is in
381   // a bundle, copyCallSiteInfo takes care of finding the call instruction in
382   // the bundle.
383   if (Orig.shouldUpdateCallSiteInfo())
384     copyCallSiteInfo(&Orig, FirstClone);
385   return *FirstClone;
386 }
387 
388 /// Delete the given MachineInstr.
389 ///
390 /// This function also serves as the MachineInstr destructor - the real
391 /// ~MachineInstr() destructor must be empty.
392 void
DeleteMachineInstr(MachineInstr * MI)393 MachineFunction::DeleteMachineInstr(MachineInstr *MI) {
394   // Verify that a call site info is at valid state. This assertion should
395   // be triggered during the implementation of support for the
396   // call site info of a new architecture. If the assertion is triggered,
397   // back trace will tell where to insert a call to updateCallSiteInfo().
398   assert((!MI->isCandidateForCallSiteEntry() ||
399           CallSitesInfo.find(MI) == CallSitesInfo.end()) &&
400          "Call site info was not updated!");
401   // Strip it for parts. The operand array and the MI object itself are
402   // independently recyclable.
403   if (MI->Operands)
404     deallocateOperandArray(MI->CapOperands, MI->Operands);
405   // Don't call ~MachineInstr() which must be trivial anyway because
406   // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
407   // destructors.
408   InstructionRecycler.Deallocate(Allocator, MI);
409 }
410 
411 /// Allocate a new MachineBasicBlock. Use this instead of
412 /// `new MachineBasicBlock'.
413 MachineBasicBlock *
CreateMachineBasicBlock(const BasicBlock * bb)414 MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
415   return new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
416              MachineBasicBlock(*this, bb);
417 }
418 
419 /// Delete the given MachineBasicBlock.
420 void
DeleteMachineBasicBlock(MachineBasicBlock * MBB)421 MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
422   assert(MBB->getParent() == this && "MBB parent mismatch!");
423   // Clean up any references to MBB in jump tables before deleting it.
424   if (JumpTableInfo)
425     JumpTableInfo->RemoveMBBFromJumpTables(MBB);
426   MBB->~MachineBasicBlock();
427   BasicBlockRecycler.Deallocate(Allocator, MBB);
428 }
429 
getMachineMemOperand(MachinePointerInfo PtrInfo,MachineMemOperand::Flags f,uint64_t s,Align base_alignment,const AAMDNodes & AAInfo,const MDNode * Ranges,SyncScope::ID SSID,AtomicOrdering Ordering,AtomicOrdering FailureOrdering)430 MachineMemOperand *MachineFunction::getMachineMemOperand(
431     MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
432     Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
433     SyncScope::ID SSID, AtomicOrdering Ordering,
434     AtomicOrdering FailureOrdering) {
435   return new (Allocator)
436       MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
437                         SSID, Ordering, FailureOrdering);
438 }
439 
getMachineMemOperand(const MachineMemOperand * MMO,MachinePointerInfo & PtrInfo,uint64_t Size)440 MachineMemOperand *MachineFunction::getMachineMemOperand(
441     const MachineMemOperand *MMO, MachinePointerInfo &PtrInfo, uint64_t Size) {
442   return new (Allocator) MachineMemOperand(
443       PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(), AAMDNodes(), nullptr,
444       MMO->getSyncScopeID(), MMO->getOrdering(), MMO->getFailureOrdering());
445 }
446 
447 MachineMemOperand *
getMachineMemOperand(const MachineMemOperand * MMO,int64_t Offset,uint64_t Size)448 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
449                                       int64_t Offset, uint64_t Size) {
450   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
451 
452   // If there is no pointer value, the offset isn't tracked so we need to adjust
453   // the base alignment.
454   Align Alignment = PtrInfo.V.isNull()
455                         ? commonAlignment(MMO->getBaseAlign(), Offset)
456                         : MMO->getBaseAlign();
457 
458   // Do not preserve ranges, since we don't necessarily know what the high bits
459   // are anymore.
460   return new (Allocator)
461       MachineMemOperand(PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size,
462                         Alignment, MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(),
463                         MMO->getOrdering(), MMO->getFailureOrdering());
464 }
465 
466 MachineMemOperand *
getMachineMemOperand(const MachineMemOperand * MMO,const AAMDNodes & AAInfo)467 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
468                                       const AAMDNodes &AAInfo) {
469   MachinePointerInfo MPI = MMO->getValue() ?
470              MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
471              MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset());
472 
473   return new (Allocator) MachineMemOperand(
474       MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo,
475       MMO->getRanges(), MMO->getSyncScopeID(), MMO->getOrdering(),
476       MMO->getFailureOrdering());
477 }
478 
479 MachineMemOperand *
getMachineMemOperand(const MachineMemOperand * MMO,MachineMemOperand::Flags Flags)480 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
481                                       MachineMemOperand::Flags Flags) {
482   return new (Allocator) MachineMemOperand(
483       MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(),
484       MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
485       MMO->getOrdering(), MMO->getFailureOrdering());
486 }
487 
createMIExtraInfo(ArrayRef<MachineMemOperand * > MMOs,MCSymbol * PreInstrSymbol,MCSymbol * PostInstrSymbol,MDNode * HeapAllocMarker)488 MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo(
489     ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
490     MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker) {
491   return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
492                                          PostInstrSymbol, HeapAllocMarker);
493 }
494 
createExternalSymbolName(StringRef Name)495 const char *MachineFunction::createExternalSymbolName(StringRef Name) {
496   char *Dest = Allocator.Allocate<char>(Name.size() + 1);
497   llvm::copy(Name, Dest);
498   Dest[Name.size()] = 0;
499   return Dest;
500 }
501 
allocateRegMask()502 uint32_t *MachineFunction::allocateRegMask() {
503   unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
504   unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
505   uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
506   memset(Mask, 0, Size * sizeof(Mask[0]));
507   return Mask;
508 }
509 
allocateShuffleMask(ArrayRef<int> Mask)510 ArrayRef<int> MachineFunction::allocateShuffleMask(ArrayRef<int> Mask) {
511   int* AllocMask = Allocator.Allocate<int>(Mask.size());
512   copy(Mask, AllocMask);
513   return {AllocMask, Mask.size()};
514 }
515 
516 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const517 LLVM_DUMP_METHOD void MachineFunction::dump() const {
518   print(dbgs());
519 }
520 #endif
521 
getName() const522 StringRef MachineFunction::getName() const {
523   return getFunction().getName();
524 }
525 
print(raw_ostream & OS,const SlotIndexes * Indexes) const526 void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
527   OS << "# Machine code for function " << getName() << ": ";
528   getProperties().print(OS);
529   OS << '\n';
530 
531   // Print Frame Information
532   FrameInfo->print(*this, OS);
533 
534   // Print JumpTable Information
535   if (JumpTableInfo)
536     JumpTableInfo->print(OS);
537 
538   // Print Constant Pool
539   ConstantPool->print(OS);
540 
541   const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
542 
543   if (RegInfo && !RegInfo->livein_empty()) {
544     OS << "Function Live Ins: ";
545     for (MachineRegisterInfo::livein_iterator
546          I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
547       OS << printReg(I->first, TRI);
548       if (I->second)
549         OS << " in " << printReg(I->second, TRI);
550       if (std::next(I) != E)
551         OS << ", ";
552     }
553     OS << '\n';
554   }
555 
556   ModuleSlotTracker MST(getFunction().getParent());
557   MST.incorporateFunction(getFunction());
558   for (const auto &BB : *this) {
559     OS << '\n';
560     // If we print the whole function, print it at its most verbose level.
561     BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
562   }
563 
564   OS << "\n# End machine code for function " << getName() << ".\n\n";
565 }
566 
567 /// True if this function needs frame moves for debug or exceptions.
needsFrameMoves() const568 bool MachineFunction::needsFrameMoves() const {
569   return getMMI().hasDebugInfo() ||
570          getTarget().Options.ForceDwarfFrameSection ||
571          F.needsUnwindTableEntry();
572 }
573 
574 namespace llvm {
575 
576   template<>
577   struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
DOTGraphTraitsllvm::DOTGraphTraits578     DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
579 
getGraphNamellvm::DOTGraphTraits580     static std::string getGraphName(const MachineFunction *F) {
581       return ("CFG for '" + F->getName() + "' function").str();
582     }
583 
getNodeLabelllvm::DOTGraphTraits584     std::string getNodeLabel(const MachineBasicBlock *Node,
585                              const MachineFunction *Graph) {
586       std::string OutStr;
587       {
588         raw_string_ostream OSS(OutStr);
589 
590         if (isSimple()) {
591           OSS << printMBBReference(*Node);
592           if (const BasicBlock *BB = Node->getBasicBlock())
593             OSS << ": " << BB->getName();
594         } else
595           Node->print(OSS);
596       }
597 
598       if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
599 
600       // Process string output to make it nicer...
601       for (unsigned i = 0; i != OutStr.length(); ++i)
602         if (OutStr[i] == '\n') {                            // Left justify
603           OutStr[i] = '\\';
604           OutStr.insert(OutStr.begin()+i+1, 'l');
605         }
606       return OutStr;
607     }
608   };
609 
610 } // end namespace llvm
611 
viewCFG() const612 void MachineFunction::viewCFG() const
613 {
614 #ifndef NDEBUG
615   ViewGraph(this, "mf" + getName());
616 #else
617   errs() << "MachineFunction::viewCFG is only available in debug builds on "
618          << "systems with Graphviz or gv!\n";
619 #endif // NDEBUG
620 }
621 
viewCFGOnly() const622 void MachineFunction::viewCFGOnly() const
623 {
624 #ifndef NDEBUG
625   ViewGraph(this, "mf" + getName(), true);
626 #else
627   errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
628          << "systems with Graphviz or gv!\n";
629 #endif // NDEBUG
630 }
631 
632 /// Add the specified physical register as a live-in value and
633 /// create a corresponding virtual register for it.
addLiveIn(MCRegister PReg,const TargetRegisterClass * RC)634 Register MachineFunction::addLiveIn(MCRegister PReg,
635                                     const TargetRegisterClass *RC) {
636   MachineRegisterInfo &MRI = getRegInfo();
637   Register VReg = MRI.getLiveInVirtReg(PReg);
638   if (VReg) {
639     const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
640     (void)VRegRC;
641     // A physical register can be added several times.
642     // Between two calls, the register class of the related virtual register
643     // may have been constrained to match some operation constraints.
644     // In that case, check that the current register class includes the
645     // physical register and is a sub class of the specified RC.
646     assert((VRegRC == RC || (VRegRC->contains(PReg) &&
647                              RC->hasSubClassEq(VRegRC))) &&
648             "Register class mismatch!");
649     return VReg;
650   }
651   VReg = MRI.createVirtualRegister(RC);
652   MRI.addLiveIn(PReg, VReg);
653   return VReg;
654 }
655 
656 /// Return the MCSymbol for the specified non-empty jump table.
657 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
658 /// normal 'L' label is returned.
getJTISymbol(unsigned JTI,MCContext & Ctx,bool isLinkerPrivate) const659 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
660                                         bool isLinkerPrivate) const {
661   const DataLayout &DL = getDataLayout();
662   assert(JumpTableInfo && "No jump tables");
663   assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
664 
665   StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
666                                      : DL.getPrivateGlobalPrefix();
667   SmallString<60> Name;
668   raw_svector_ostream(Name)
669     << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
670   return Ctx.getOrCreateSymbol(Name);
671 }
672 
673 /// Return a function-local symbol to represent the PIC base.
getPICBaseSymbol() const674 MCSymbol *MachineFunction::getPICBaseSymbol() const {
675   const DataLayout &DL = getDataLayout();
676   return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
677                                Twine(getFunctionNumber()) + "$pb");
678 }
679 
680 /// \name Exception Handling
681 /// \{
682 
683 LandingPadInfo &
getOrCreateLandingPadInfo(MachineBasicBlock * LandingPad)684 MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) {
685   unsigned N = LandingPads.size();
686   for (unsigned i = 0; i < N; ++i) {
687     LandingPadInfo &LP = LandingPads[i];
688     if (LP.LandingPadBlock == LandingPad)
689       return LP;
690   }
691 
692   LandingPads.push_back(LandingPadInfo(LandingPad));
693   return LandingPads[N];
694 }
695 
addInvoke(MachineBasicBlock * LandingPad,MCSymbol * BeginLabel,MCSymbol * EndLabel)696 void MachineFunction::addInvoke(MachineBasicBlock *LandingPad,
697                                 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
698   LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
699   LP.BeginLabels.push_back(BeginLabel);
700   LP.EndLabels.push_back(EndLabel);
701 }
702 
addLandingPad(MachineBasicBlock * LandingPad)703 MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) {
704   MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
705   LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
706   LP.LandingPadLabel = LandingPadLabel;
707 
708   const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI();
709   if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
710     if (const auto *PF =
711             dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts()))
712       getMMI().addPersonality(PF);
713 
714     if (LPI->isCleanup())
715       addCleanup(LandingPad);
716 
717     // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
718     //        correct, but we need to do it this way because of how the DWARF EH
719     //        emitter processes the clauses.
720     for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
721       Value *Val = LPI->getClause(I - 1);
722       if (LPI->isCatch(I - 1)) {
723         addCatchTypeInfo(LandingPad,
724                          dyn_cast<GlobalValue>(Val->stripPointerCasts()));
725       } else {
726         // Add filters in a list.
727         auto *CVal = cast<Constant>(Val);
728         SmallVector<const GlobalValue *, 4> FilterList;
729         for (User::op_iterator II = CVal->op_begin(), IE = CVal->op_end();
730              II != IE; ++II)
731           FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
732 
733         addFilterTypeInfo(LandingPad, FilterList);
734       }
735     }
736 
737   } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
738     for (unsigned I = CPI->getNumArgOperands(); I != 0; --I) {
739       Value *TypeInfo = CPI->getArgOperand(I - 1)->stripPointerCasts();
740       addCatchTypeInfo(LandingPad, dyn_cast<GlobalValue>(TypeInfo));
741     }
742 
743   } else {
744     assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!");
745   }
746 
747   return LandingPadLabel;
748 }
749 
addCatchTypeInfo(MachineBasicBlock * LandingPad,ArrayRef<const GlobalValue * > TyInfo)750 void MachineFunction::addCatchTypeInfo(MachineBasicBlock *LandingPad,
751                                        ArrayRef<const GlobalValue *> TyInfo) {
752   LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
753   for (unsigned N = TyInfo.size(); N; --N)
754     LP.TypeIds.push_back(getTypeIDFor(TyInfo[N - 1]));
755 }
756 
addFilterTypeInfo(MachineBasicBlock * LandingPad,ArrayRef<const GlobalValue * > TyInfo)757 void MachineFunction::addFilterTypeInfo(MachineBasicBlock *LandingPad,
758                                         ArrayRef<const GlobalValue *> TyInfo) {
759   LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
760   std::vector<unsigned> IdsInFilter(TyInfo.size());
761   for (unsigned I = 0, E = TyInfo.size(); I != E; ++I)
762     IdsInFilter[I] = getTypeIDFor(TyInfo[I]);
763   LP.TypeIds.push_back(getFilterIDFor(IdsInFilter));
764 }
765 
tidyLandingPads(DenseMap<MCSymbol *,uintptr_t> * LPMap,bool TidyIfNoBeginLabels)766 void MachineFunction::tidyLandingPads(DenseMap<MCSymbol *, uintptr_t> *LPMap,
767                                       bool TidyIfNoBeginLabels) {
768   for (unsigned i = 0; i != LandingPads.size(); ) {
769     LandingPadInfo &LandingPad = LandingPads[i];
770     if (LandingPad.LandingPadLabel &&
771         !LandingPad.LandingPadLabel->isDefined() &&
772         (!LPMap || (*LPMap)[LandingPad.LandingPadLabel] == 0))
773       LandingPad.LandingPadLabel = nullptr;
774 
775     // Special case: we *should* emit LPs with null LP MBB. This indicates
776     // "nounwind" case.
777     if (!LandingPad.LandingPadLabel && LandingPad.LandingPadBlock) {
778       LandingPads.erase(LandingPads.begin() + i);
779       continue;
780     }
781 
782     if (TidyIfNoBeginLabels) {
783       for (unsigned j = 0, e = LandingPads[i].BeginLabels.size(); j != e; ++j) {
784         MCSymbol *BeginLabel = LandingPad.BeginLabels[j];
785         MCSymbol *EndLabel = LandingPad.EndLabels[j];
786         if ((BeginLabel->isDefined() || (LPMap && (*LPMap)[BeginLabel] != 0)) &&
787             (EndLabel->isDefined() || (LPMap && (*LPMap)[EndLabel] != 0)))
788           continue;
789 
790         LandingPad.BeginLabels.erase(LandingPad.BeginLabels.begin() + j);
791         LandingPad.EndLabels.erase(LandingPad.EndLabels.begin() + j);
792         --j;
793         --e;
794       }
795 
796       // Remove landing pads with no try-ranges.
797       if (LandingPads[i].BeginLabels.empty()) {
798         LandingPads.erase(LandingPads.begin() + i);
799         continue;
800       }
801     }
802 
803     // If there is no landing pad, ensure that the list of typeids is empty.
804     // If the only typeid is a cleanup, this is the same as having no typeids.
805     if (!LandingPad.LandingPadBlock ||
806         (LandingPad.TypeIds.size() == 1 && !LandingPad.TypeIds[0]))
807       LandingPad.TypeIds.clear();
808     ++i;
809   }
810 }
811 
addCleanup(MachineBasicBlock * LandingPad)812 void MachineFunction::addCleanup(MachineBasicBlock *LandingPad) {
813   LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
814   LP.TypeIds.push_back(0);
815 }
816 
addSEHCatchHandler(MachineBasicBlock * LandingPad,const Function * Filter,const BlockAddress * RecoverBA)817 void MachineFunction::addSEHCatchHandler(MachineBasicBlock *LandingPad,
818                                          const Function *Filter,
819                                          const BlockAddress *RecoverBA) {
820   LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
821   SEHHandler Handler;
822   Handler.FilterOrFinally = Filter;
823   Handler.RecoverBA = RecoverBA;
824   LP.SEHHandlers.push_back(Handler);
825 }
826 
addSEHCleanupHandler(MachineBasicBlock * LandingPad,const Function * Cleanup)827 void MachineFunction::addSEHCleanupHandler(MachineBasicBlock *LandingPad,
828                                            const Function *Cleanup) {
829   LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
830   SEHHandler Handler;
831   Handler.FilterOrFinally = Cleanup;
832   Handler.RecoverBA = nullptr;
833   LP.SEHHandlers.push_back(Handler);
834 }
835 
setCallSiteLandingPad(MCSymbol * Sym,ArrayRef<unsigned> Sites)836 void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym,
837                                             ArrayRef<unsigned> Sites) {
838   LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
839 }
840 
getTypeIDFor(const GlobalValue * TI)841 unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) {
842   for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
843     if (TypeInfos[i] == TI) return i + 1;
844 
845   TypeInfos.push_back(TI);
846   return TypeInfos.size();
847 }
848 
getFilterIDFor(std::vector<unsigned> & TyIds)849 int MachineFunction::getFilterIDFor(std::vector<unsigned> &TyIds) {
850   // If the new filter coincides with the tail of an existing filter, then
851   // re-use the existing filter.  Folding filters more than this requires
852   // re-ordering filters and/or their elements - probably not worth it.
853   for (std::vector<unsigned>::iterator I = FilterEnds.begin(),
854        E = FilterEnds.end(); I != E; ++I) {
855     unsigned i = *I, j = TyIds.size();
856 
857     while (i && j)
858       if (FilterIds[--i] != TyIds[--j])
859         goto try_next;
860 
861     if (!j)
862       // The new filter coincides with range [i, end) of the existing filter.
863       return -(1 + i);
864 
865 try_next:;
866   }
867 
868   // Add the new filter.
869   int FilterID = -(1 + FilterIds.size());
870   FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
871   llvm::append_range(FilterIds, TyIds);
872   FilterEnds.push_back(FilterIds.size());
873   FilterIds.push_back(0); // terminator
874   return FilterID;
875 }
876 
877 MachineFunction::CallSiteInfoMap::iterator
getCallSiteInfo(const MachineInstr * MI)878 MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
879   assert(MI->isCandidateForCallSiteEntry() &&
880          "Call site info refers only to call (MI) candidates");
881 
882   if (!Target.Options.EmitCallSiteInfo)
883     return CallSitesInfo.end();
884   return CallSitesInfo.find(MI);
885 }
886 
887 /// Return the call machine instruction or find a call within bundle.
getCallInstr(const MachineInstr * MI)888 static const MachineInstr *getCallInstr(const MachineInstr *MI) {
889   if (!MI->isBundle())
890     return MI;
891 
892   for (auto &BMI : make_range(getBundleStart(MI->getIterator()),
893                               getBundleEnd(MI->getIterator())))
894     if (BMI.isCandidateForCallSiteEntry())
895       return &BMI;
896 
897   llvm_unreachable("Unexpected bundle without a call site candidate");
898 }
899 
eraseCallSiteInfo(const MachineInstr * MI)900 void MachineFunction::eraseCallSiteInfo(const MachineInstr *MI) {
901   assert(MI->shouldUpdateCallSiteInfo() &&
902          "Call site info refers only to call (MI) candidates or "
903          "candidates inside bundles");
904 
905   const MachineInstr *CallMI = getCallInstr(MI);
906   CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI);
907   if (CSIt == CallSitesInfo.end())
908     return;
909   CallSitesInfo.erase(CSIt);
910 }
911 
copyCallSiteInfo(const MachineInstr * Old,const MachineInstr * New)912 void MachineFunction::copyCallSiteInfo(const MachineInstr *Old,
913                                        const MachineInstr *New) {
914   assert(Old->shouldUpdateCallSiteInfo() &&
915          "Call site info refers only to call (MI) candidates or "
916          "candidates inside bundles");
917 
918   if (!New->isCandidateForCallSiteEntry())
919     return eraseCallSiteInfo(Old);
920 
921   const MachineInstr *OldCallMI = getCallInstr(Old);
922   CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
923   if (CSIt == CallSitesInfo.end())
924     return;
925 
926   CallSiteInfo CSInfo = CSIt->second;
927   CallSitesInfo[New] = CSInfo;
928 }
929 
moveCallSiteInfo(const MachineInstr * Old,const MachineInstr * New)930 void MachineFunction::moveCallSiteInfo(const MachineInstr *Old,
931                                        const MachineInstr *New) {
932   assert(Old->shouldUpdateCallSiteInfo() &&
933          "Call site info refers only to call (MI) candidates or "
934          "candidates inside bundles");
935 
936   if (!New->isCandidateForCallSiteEntry())
937     return eraseCallSiteInfo(Old);
938 
939   const MachineInstr *OldCallMI = getCallInstr(Old);
940   CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
941   if (CSIt == CallSitesInfo.end())
942     return;
943 
944   CallSiteInfo CSInfo = std::move(CSIt->second);
945   CallSitesInfo.erase(CSIt);
946   CallSitesInfo[New] = CSInfo;
947 }
948 
setDebugInstrNumberingCount(unsigned Num)949 void MachineFunction::setDebugInstrNumberingCount(unsigned Num) {
950   DebugInstrNumberingCount = Num;
951 }
952 
makeDebugValueSubstitution(DebugInstrOperandPair A,DebugInstrOperandPair B)953 void MachineFunction::makeDebugValueSubstitution(DebugInstrOperandPair A,
954                                                  DebugInstrOperandPair B) {
955   auto Result = DebugValueSubstitutions.insert(std::make_pair(A, B));
956   (void)Result;
957   assert(Result.second && "Substitution for an already substituted value?");
958 }
959 
substituteDebugValuesForInst(const MachineInstr & Old,MachineInstr & New,unsigned MaxOperand)960 void MachineFunction::substituteDebugValuesForInst(const MachineInstr &Old,
961                                                    MachineInstr &New,
962                                                    unsigned MaxOperand) {
963   // If the Old instruction wasn't tracked at all, there is no work to do.
964   unsigned OldInstrNum = Old.peekDebugInstrNum();
965   if (!OldInstrNum)
966     return;
967 
968   // Iterate over all operands looking for defs to create substitutions for.
969   // Avoid creating new instr numbers unless we create a new substitution.
970   // While this has no functional effect, it risks confusing someone reading
971   // MIR output.
972   // Examine all the operands, or the first N specified by the caller.
973   MaxOperand = std::min(MaxOperand, Old.getNumOperands());
974   for (unsigned int I = 0; I < Old.getNumOperands(); ++I) {
975     const auto &OldMO = Old.getOperand(I);
976     auto &NewMO = New.getOperand(I);
977     (void)NewMO;
978 
979     if (!OldMO.isReg() || !OldMO.isDef())
980       continue;
981     assert(NewMO.isDef());
982 
983     unsigned NewInstrNum = New.getDebugInstrNum();
984     makeDebugValueSubstitution(std::make_pair(OldInstrNum, I),
985                                std::make_pair(NewInstrNum, I));
986   }
987 }
988 
989 /// \}
990 
991 //===----------------------------------------------------------------------===//
992 //  MachineJumpTableInfo implementation
993 //===----------------------------------------------------------------------===//
994 
995 /// Return the size of each entry in the jump table.
getEntrySize(const DataLayout & TD) const996 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
997   // The size of a jump table entry is 4 bytes unless the entry is just the
998   // address of a block, in which case it is the pointer size.
999   switch (getEntryKind()) {
1000   case MachineJumpTableInfo::EK_BlockAddress:
1001     return TD.getPointerSize();
1002   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
1003     return 8;
1004   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1005   case MachineJumpTableInfo::EK_LabelDifference32:
1006   case MachineJumpTableInfo::EK_Custom32:
1007     return 4;
1008   case MachineJumpTableInfo::EK_Inline:
1009     return 0;
1010   }
1011   llvm_unreachable("Unknown jump table encoding!");
1012 }
1013 
1014 /// Return the alignment of each entry in the jump table.
getEntryAlignment(const DataLayout & TD) const1015 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
1016   // The alignment of a jump table entry is the alignment of int32 unless the
1017   // entry is just the address of a block, in which case it is the pointer
1018   // alignment.
1019   switch (getEntryKind()) {
1020   case MachineJumpTableInfo::EK_BlockAddress:
1021     return TD.getPointerABIAlignment(0).value();
1022   case MachineJumpTableInfo::EK_GPRel64BlockAddress:
1023     return TD.getABIIntegerTypeAlignment(64).value();
1024   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1025   case MachineJumpTableInfo::EK_LabelDifference32:
1026   case MachineJumpTableInfo::EK_Custom32:
1027     return TD.getABIIntegerTypeAlignment(32).value();
1028   case MachineJumpTableInfo::EK_Inline:
1029     return 1;
1030   }
1031   llvm_unreachable("Unknown jump table encoding!");
1032 }
1033 
1034 /// Create a new jump table entry in the jump table info.
createJumpTableIndex(const std::vector<MachineBasicBlock * > & DestBBs)1035 unsigned MachineJumpTableInfo::createJumpTableIndex(
1036                                const std::vector<MachineBasicBlock*> &DestBBs) {
1037   assert(!DestBBs.empty() && "Cannot create an empty jump table!");
1038   JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1039   return JumpTables.size()-1;
1040 }
1041 
1042 /// If Old is the target of any jump tables, update the jump tables to branch
1043 /// to New instead.
ReplaceMBBInJumpTables(MachineBasicBlock * Old,MachineBasicBlock * New)1044 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
1045                                                   MachineBasicBlock *New) {
1046   assert(Old != New && "Not making a change?");
1047   bool MadeChange = false;
1048   for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1049     ReplaceMBBInJumpTable(i, Old, New);
1050   return MadeChange;
1051 }
1052 
1053 /// If MBB is present in any jump tables, remove it.
RemoveMBBFromJumpTables(MachineBasicBlock * MBB)1054 bool MachineJumpTableInfo::RemoveMBBFromJumpTables(MachineBasicBlock *MBB) {
1055   bool MadeChange = false;
1056   for (MachineJumpTableEntry &JTE : JumpTables) {
1057     auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB);
1058     MadeChange |= (removeBeginItr != JTE.MBBs.end());
1059     JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end());
1060   }
1061   return MadeChange;
1062 }
1063 
1064 /// If Old is a target of the jump tables, update the jump table to branch to
1065 /// New instead.
ReplaceMBBInJumpTable(unsigned Idx,MachineBasicBlock * Old,MachineBasicBlock * New)1066 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
1067                                                  MachineBasicBlock *Old,
1068                                                  MachineBasicBlock *New) {
1069   assert(Old != New && "Not making a change?");
1070   bool MadeChange = false;
1071   MachineJumpTableEntry &JTE = JumpTables[Idx];
1072   for (size_t j = 0, e = JTE.MBBs.size(); j != e; ++j)
1073     if (JTE.MBBs[j] == Old) {
1074       JTE.MBBs[j] = New;
1075       MadeChange = true;
1076     }
1077   return MadeChange;
1078 }
1079 
print(raw_ostream & OS) const1080 void MachineJumpTableInfo::print(raw_ostream &OS) const {
1081   if (JumpTables.empty()) return;
1082 
1083   OS << "Jump Tables:\n";
1084 
1085   for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1086     OS << printJumpTableEntryReference(i) << ':';
1087     for (unsigned j = 0, f = JumpTables[i].MBBs.size(); j != f; ++j)
1088       OS << ' ' << printMBBReference(*JumpTables[i].MBBs[j]);
1089     if (i != e)
1090       OS << '\n';
1091   }
1092 
1093   OS << '\n';
1094 }
1095 
1096 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1097 LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); }
1098 #endif
1099 
printJumpTableEntryReference(unsigned Idx)1100 Printable llvm::printJumpTableEntryReference(unsigned Idx) {
1101   return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
1102 }
1103 
1104 //===----------------------------------------------------------------------===//
1105 //  MachineConstantPool implementation
1106 //===----------------------------------------------------------------------===//
1107 
anchor()1108 void MachineConstantPoolValue::anchor() {}
1109 
getSizeInBytes(const DataLayout & DL) const1110 unsigned MachineConstantPoolValue::getSizeInBytes(const DataLayout &DL) const {
1111   return DL.getTypeAllocSize(Ty);
1112 }
1113 
getSizeInBytes(const DataLayout & DL) const1114 unsigned MachineConstantPoolEntry::getSizeInBytes(const DataLayout &DL) const {
1115   if (isMachineConstantPoolEntry())
1116     return Val.MachineCPVal->getSizeInBytes(DL);
1117   return DL.getTypeAllocSize(Val.ConstVal->getType());
1118 }
1119 
needsRelocation() const1120 bool MachineConstantPoolEntry::needsRelocation() const {
1121   if (isMachineConstantPoolEntry())
1122     return true;
1123   return Val.ConstVal->needsRelocation();
1124 }
1125 
1126 SectionKind
getSectionKind(const DataLayout * DL) const1127 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
1128   if (needsRelocation())
1129     return SectionKind::getReadOnlyWithRel();
1130   switch (getSizeInBytes(*DL)) {
1131   case 4:
1132     return SectionKind::getMergeableConst4();
1133   case 8:
1134     return SectionKind::getMergeableConst8();
1135   case 16:
1136     return SectionKind::getMergeableConst16();
1137   case 32:
1138     return SectionKind::getMergeableConst32();
1139   default:
1140     return SectionKind::getReadOnly();
1141   }
1142 }
1143 
~MachineConstantPool()1144 MachineConstantPool::~MachineConstantPool() {
1145   // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1146   // so keep track of which we've deleted to avoid double deletions.
1147   DenseSet<MachineConstantPoolValue*> Deleted;
1148   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1149     if (Constants[i].isMachineConstantPoolEntry()) {
1150       Deleted.insert(Constants[i].Val.MachineCPVal);
1151       delete Constants[i].Val.MachineCPVal;
1152     }
1153   for (DenseSet<MachineConstantPoolValue*>::iterator I =
1154        MachineCPVsSharingEntries.begin(), E = MachineCPVsSharingEntries.end();
1155        I != E; ++I) {
1156     if (Deleted.count(*I) == 0)
1157       delete *I;
1158   }
1159 }
1160 
1161 /// Test whether the given two constants can be allocated the same constant pool
1162 /// entry.
CanShareConstantPoolEntry(const Constant * A,const Constant * B,const DataLayout & DL)1163 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1164                                       const DataLayout &DL) {
1165   // Handle the trivial case quickly.
1166   if (A == B) return true;
1167 
1168   // If they have the same type but weren't the same constant, quickly
1169   // reject them.
1170   if (A->getType() == B->getType()) return false;
1171 
1172   // We can't handle structs or arrays.
1173   if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1174       isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1175     return false;
1176 
1177   // For now, only support constants with the same size.
1178   uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1179   if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1180     return false;
1181 
1182   Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1183 
1184   // Try constant folding a bitcast of both instructions to an integer.  If we
1185   // get two identical ConstantInt's, then we are good to share them.  We use
1186   // the constant folding APIs to do this so that we get the benefit of
1187   // DataLayout.
1188   if (isa<PointerType>(A->getType()))
1189     A = ConstantFoldCastOperand(Instruction::PtrToInt,
1190                                 const_cast<Constant *>(A), IntTy, DL);
1191   else if (A->getType() != IntTy)
1192     A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1193                                 IntTy, DL);
1194   if (isa<PointerType>(B->getType()))
1195     B = ConstantFoldCastOperand(Instruction::PtrToInt,
1196                                 const_cast<Constant *>(B), IntTy, DL);
1197   else if (B->getType() != IntTy)
1198     B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1199                                 IntTy, DL);
1200 
1201   return A == B;
1202 }
1203 
1204 /// Create a new entry in the constant pool or return an existing one.
1205 /// User must specify the log2 of the minimum required alignment for the object.
getConstantPoolIndex(const Constant * C,Align Alignment)1206 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
1207                                                    Align Alignment) {
1208   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1209 
1210   // Check to see if we already have this constant.
1211   //
1212   // FIXME, this could be made much more efficient for large constant pools.
1213   for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1214     if (!Constants[i].isMachineConstantPoolEntry() &&
1215         CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1216       if (Constants[i].getAlign() < Alignment)
1217         Constants[i].Alignment = Alignment;
1218       return i;
1219     }
1220 
1221   Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1222   return Constants.size()-1;
1223 }
1224 
getConstantPoolIndex(MachineConstantPoolValue * V,Align Alignment)1225 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
1226                                                    Align Alignment) {
1227   if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1228 
1229   // Check to see if we already have this constant.
1230   //
1231   // FIXME, this could be made much more efficient for large constant pools.
1232   int Idx = V->getExistingMachineCPValue(this, Alignment);
1233   if (Idx != -1) {
1234     MachineCPVsSharingEntries.insert(V);
1235     return (unsigned)Idx;
1236   }
1237 
1238   Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1239   return Constants.size()-1;
1240 }
1241 
print(raw_ostream & OS) const1242 void MachineConstantPool::print(raw_ostream &OS) const {
1243   if (Constants.empty()) return;
1244 
1245   OS << "Constant Pool:\n";
1246   for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1247     OS << "  cp#" << i << ": ";
1248     if (Constants[i].isMachineConstantPoolEntry())
1249       Constants[i].Val.MachineCPVal->print(OS);
1250     else
1251       Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1252     OS << ", align=" << Constants[i].getAlign().value();
1253     OS << "\n";
1254   }
1255 }
1256 
1257 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1258 LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); }
1259 #endif
1260