1 //===- SelectionDAGBuilder.h - Selection-DAG building -----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
14 #define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
15 
16 #include "StatepointLowering.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/ISDOpcodes.h"
22 #include "llvm/CodeGen/SelectionDAGNodes.h"
23 #include "llvm/CodeGen/SwitchLoweringUtils.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/DebugLoc.h"
27 #include "llvm/IR/Instruction.h"
28 #include "llvm/Support/BranchProbability.h"
29 #include "llvm/Support/CodeGen.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/MachineValueType.h"
32 #include <algorithm>
33 #include <cassert>
34 #include <cstdint>
35 #include <utility>
36 #include <vector>
37 
38 namespace llvm {
39 
40 class AAResults;
41 class AllocaInst;
42 class AtomicCmpXchgInst;
43 class AtomicRMWInst;
44 class BasicBlock;
45 class BranchInst;
46 class CallInst;
47 class CallBrInst;
48 class CatchPadInst;
49 class CatchReturnInst;
50 class CatchSwitchInst;
51 class CleanupPadInst;
52 class CleanupReturnInst;
53 class Constant;
54 class ConstrainedFPIntrinsic;
55 class DbgValueInst;
56 class DataLayout;
57 class DIExpression;
58 class DILocalVariable;
59 class DILocation;
60 class FenceInst;
61 class FunctionLoweringInfo;
62 class GCFunctionInfo;
63 class GCRelocateInst;
64 class GCResultInst;
65 class GCStatepointInst;
66 class IndirectBrInst;
67 class InvokeInst;
68 class LandingPadInst;
69 class LLVMContext;
70 class LoadInst;
71 class MachineBasicBlock;
72 class PHINode;
73 class ResumeInst;
74 class ReturnInst;
75 class SDDbgValue;
76 class SelectionDAG;
77 class StoreInst;
78 class SwiftErrorValueTracking;
79 class SwitchInst;
80 class TargetLibraryInfo;
81 class TargetMachine;
82 class Type;
83 class VAArgInst;
84 class UnreachableInst;
85 class Use;
86 class User;
87 class Value;
88 
89 //===----------------------------------------------------------------------===//
90 /// SelectionDAGBuilder - This is the common target-independent lowering
91 /// implementation that is parameterized by a TargetLowering object.
92 ///
93 class SelectionDAGBuilder {
94   /// The current instruction being visited.
95   const Instruction *CurInst = nullptr;
96 
97   DenseMap<const Value*, SDValue> NodeMap;
98 
99   /// Maps argument value for unused arguments. This is used
100   /// to preserve debug information for incoming arguments.
101   DenseMap<const Value*, SDValue> UnusedArgNodeMap;
102 
103   /// Helper type for DanglingDebugInfoMap.
104   class DanglingDebugInfo {
105     const DbgValueInst* DI = nullptr;
106     DebugLoc dl;
107     unsigned SDNodeOrder = 0;
108 
109   public:
110     DanglingDebugInfo() = default;
DanglingDebugInfo(const DbgValueInst * di,DebugLoc DL,unsigned SDNO)111     DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO)
112         : DI(di), dl(std::move(DL)), SDNodeOrder(SDNO) {}
113 
getDI()114     const DbgValueInst* getDI() { return DI; }
getdl()115     DebugLoc getdl() { return dl; }
getSDNodeOrder()116     unsigned getSDNodeOrder() { return SDNodeOrder; }
117   };
118 
119   /// Helper type for DanglingDebugInfoMap.
120   typedef std::vector<DanglingDebugInfo> DanglingDebugInfoVector;
121 
122   /// Keeps track of dbg_values for which we have not yet seen the referent.
123   /// We defer handling these until we do see it.
124   MapVector<const Value*, DanglingDebugInfoVector> DanglingDebugInfoMap;
125 
126 public:
127   /// Loads are not emitted to the program immediately.  We bunch them up and
128   /// then emit token factor nodes when possible.  This allows us to get simple
129   /// disambiguation between loads without worrying about alias analysis.
130   SmallVector<SDValue, 8> PendingLoads;
131 
132   /// State used while lowering a statepoint sequence (gc_statepoint,
133   /// gc_relocate, and gc_result).  See StatepointLowering.hpp/cpp for details.
134   StatepointLoweringState StatepointLowering;
135 
136 private:
137   /// CopyToReg nodes that copy values to virtual registers for export to other
138   /// blocks need to be emitted before any terminator instruction, but they have
139   /// no other ordering requirements. We bunch them up and the emit a single
140   /// tokenfactor for them just before terminator instructions.
141   SmallVector<SDValue, 8> PendingExports;
142 
143   /// Similar to loads, nodes corresponding to constrained FP intrinsics are
144   /// bunched up and emitted when necessary.  These can be moved across each
145   /// other and any (normal) memory operation (load or store), but not across
146   /// calls or instructions having unspecified side effects.  As a special
147   /// case, constrained FP intrinsics using fpexcept.strict may not be deleted
148   /// even if otherwise unused, so they need to be chained before any
149   /// terminator instruction (like PendingExports).  We track the latter
150   /// set of nodes in a separate list.
151   SmallVector<SDValue, 8> PendingConstrainedFP;
152   SmallVector<SDValue, 8> PendingConstrainedFPStrict;
153 
154   /// Update root to include all chains from the Pending list.
155   SDValue updateRoot(SmallVectorImpl<SDValue> &Pending);
156 
157   /// A unique monotonically increasing number used to order the SDNodes we
158   /// create.
159   unsigned SDNodeOrder;
160 
161   /// Determine the rank by weight of CC in [First,Last]. If CC has more weight
162   /// than each cluster in the range, its rank is 0.
163   unsigned caseClusterRank(const SwitchCG::CaseCluster &CC,
164                            SwitchCG::CaseClusterIt First,
165                            SwitchCG::CaseClusterIt Last);
166 
167   /// Emit comparison and split W into two subtrees.
168   void splitWorkItem(SwitchCG::SwitchWorkList &WorkList,
169                      const SwitchCG::SwitchWorkListItem &W, Value *Cond,
170                      MachineBasicBlock *SwitchMBB);
171 
172   /// Lower W.
173   void lowerWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
174                      MachineBasicBlock *SwitchMBB,
175                      MachineBasicBlock *DefaultMBB);
176 
177   /// Peel the top probability case if it exceeds the threshold
178   MachineBasicBlock *
179   peelDominantCaseCluster(const SwitchInst &SI,
180                           SwitchCG::CaseClusterVector &Clusters,
181                           BranchProbability &PeeledCaseProb);
182 
183   /// A class which encapsulates all of the information needed to generate a
184   /// stack protector check and signals to isel via its state being initialized
185   /// that a stack protector needs to be generated.
186   ///
187   /// *NOTE* The following is a high level documentation of SelectionDAG Stack
188   /// Protector Generation. The reason that it is placed here is for a lack of
189   /// other good places to stick it.
190   ///
191   /// High Level Overview of SelectionDAG Stack Protector Generation:
192   ///
193   /// Previously, generation of stack protectors was done exclusively in the
194   /// pre-SelectionDAG Codegen LLVM IR Pass "Stack Protector". This necessitated
195   /// splitting basic blocks at the IR level to create the success/failure basic
196   /// blocks in the tail of the basic block in question. As a result of this,
197   /// calls that would have qualified for the sibling call optimization were no
198   /// longer eligible for optimization since said calls were no longer right in
199   /// the "tail position" (i.e. the immediate predecessor of a ReturnInst
200   /// instruction).
201   ///
202   /// Then it was noticed that since the sibling call optimization causes the
203   /// callee to reuse the caller's stack, if we could delay the generation of
204   /// the stack protector check until later in CodeGen after the sibling call
205   /// decision was made, we get both the tail call optimization and the stack
206   /// protector check!
207   ///
208   /// A few goals in solving this problem were:
209   ///
210   ///   1. Preserve the architecture independence of stack protector generation.
211   ///
212   ///   2. Preserve the normal IR level stack protector check for platforms like
213   ///      OpenBSD for which we support platform-specific stack protector
214   ///      generation.
215   ///
216   /// The main problem that guided the present solution is that one can not
217   /// solve this problem in an architecture independent manner at the IR level
218   /// only. This is because:
219   ///
220   ///   1. The decision on whether or not to perform a sibling call on certain
221   ///      platforms (for instance i386) requires lower level information
222   ///      related to available registers that can not be known at the IR level.
223   ///
224   ///   2. Even if the previous point were not true, the decision on whether to
225   ///      perform a tail call is done in LowerCallTo in SelectionDAG which
226   ///      occurs after the Stack Protector Pass. As a result, one would need to
227   ///      put the relevant callinst into the stack protector check success
228   ///      basic block (where the return inst is placed) and then move it back
229   ///      later at SelectionDAG/MI time before the stack protector check if the
230   ///      tail call optimization failed. The MI level option was nixed
231   ///      immediately since it would require platform-specific pattern
232   ///      matching. The SelectionDAG level option was nixed because
233   ///      SelectionDAG only processes one IR level basic block at a time
234   ///      implying one could not create a DAG Combine to move the callinst.
235   ///
236   /// To get around this problem a few things were realized:
237   ///
238   ///   1. While one can not handle multiple IR level basic blocks at the
239   ///      SelectionDAG Level, one can generate multiple machine basic blocks
240   ///      for one IR level basic block. This is how we handle bit tests and
241   ///      switches.
242   ///
243   ///   2. At the MI level, tail calls are represented via a special return
244   ///      MIInst called "tcreturn". Thus if we know the basic block in which we
245   ///      wish to insert the stack protector check, we get the correct behavior
246   ///      by always inserting the stack protector check right before the return
247   ///      statement. This is a "magical transformation" since no matter where
248   ///      the stack protector check intrinsic is, we always insert the stack
249   ///      protector check code at the end of the BB.
250   ///
251   /// Given the aforementioned constraints, the following solution was devised:
252   ///
253   ///   1. On platforms that do not support SelectionDAG stack protector check
254   ///      generation, allow for the normal IR level stack protector check
255   ///      generation to continue.
256   ///
257   ///   2. On platforms that do support SelectionDAG stack protector check
258   ///      generation:
259   ///
260   ///     a. Use the IR level stack protector pass to decide if a stack
261   ///        protector is required/which BB we insert the stack protector check
262   ///        in by reusing the logic already therein. If we wish to generate a
263   ///        stack protector check in a basic block, we place a special IR
264   ///        intrinsic called llvm.stackprotectorcheck right before the BB's
265   ///        returninst or if there is a callinst that could potentially be
266   ///        sibling call optimized, before the call inst.
267   ///
268   ///     b. Then when a BB with said intrinsic is processed, we codegen the BB
269   ///        normally via SelectBasicBlock. In said process, when we visit the
270   ///        stack protector check, we do not actually emit anything into the
271   ///        BB. Instead, we just initialize the stack protector descriptor
272   ///        class (which involves stashing information/creating the success
273   ///        mbbb and the failure mbb if we have not created one for this
274   ///        function yet) and export the guard variable that we are going to
275   ///        compare.
276   ///
277   ///     c. After we finish selecting the basic block, in FinishBasicBlock if
278   ///        the StackProtectorDescriptor attached to the SelectionDAGBuilder is
279   ///        initialized, we produce the validation code with one of these
280   ///        techniques:
281   ///          1) with a call to a guard check function
282   ///          2) with inlined instrumentation
283   ///
284   ///        1) We insert a call to the check function before the terminator.
285   ///
286   ///        2) We first find a splice point in the parent basic block
287   ///        before the terminator and then splice the terminator of said basic
288   ///        block into the success basic block. Then we code-gen a new tail for
289   ///        the parent basic block consisting of the two loads, the comparison,
290   ///        and finally two branches to the success/failure basic blocks. We
291   ///        conclude by code-gening the failure basic block if we have not
292   ///        code-gened it already (all stack protector checks we generate in
293   ///        the same function, use the same failure basic block).
294   class StackProtectorDescriptor {
295   public:
296     StackProtectorDescriptor() = default;
297 
298     /// Returns true if all fields of the stack protector descriptor are
299     /// initialized implying that we should/are ready to emit a stack protector.
shouldEmitStackProtector()300     bool shouldEmitStackProtector() const {
301       return ParentMBB && SuccessMBB && FailureMBB;
302     }
303 
shouldEmitFunctionBasedCheckStackProtector()304     bool shouldEmitFunctionBasedCheckStackProtector() const {
305       return ParentMBB && !SuccessMBB && !FailureMBB;
306     }
307 
308     /// Initialize the stack protector descriptor structure for a new basic
309     /// block.
initialize(const BasicBlock * BB,MachineBasicBlock * MBB,bool FunctionBasedInstrumentation)310     void initialize(const BasicBlock *BB, MachineBasicBlock *MBB,
311                     bool FunctionBasedInstrumentation) {
312       // Make sure we are not initialized yet.
313       assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
314              "already initialized!");
315       ParentMBB = MBB;
316       if (!FunctionBasedInstrumentation) {
317         SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true);
318         FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
319       }
320     }
321 
322     /// Reset state that changes when we handle different basic blocks.
323     ///
324     /// This currently includes:
325     ///
326     /// 1. The specific basic block we are generating a
327     /// stack protector for (ParentMBB).
328     ///
329     /// 2. The successor machine basic block that will contain the tail of
330     /// parent mbb after we create the stack protector check (SuccessMBB). This
331     /// BB is visited only on stack protector check success.
resetPerBBState()332     void resetPerBBState() {
333       ParentMBB = nullptr;
334       SuccessMBB = nullptr;
335     }
336 
337     /// Reset state that only changes when we switch functions.
338     ///
339     /// This currently includes:
340     ///
341     /// 1. FailureMBB since we reuse the failure code path for all stack
342     /// protector checks created in an individual function.
343     ///
344     /// 2.The guard variable since the guard variable we are checking against is
345     /// always the same.
resetPerFunctionState()346     void resetPerFunctionState() {
347       FailureMBB = nullptr;
348     }
349 
getParentMBB()350     MachineBasicBlock *getParentMBB() { return ParentMBB; }
getSuccessMBB()351     MachineBasicBlock *getSuccessMBB() { return SuccessMBB; }
getFailureMBB()352     MachineBasicBlock *getFailureMBB() { return FailureMBB; }
353 
354   private:
355     /// The basic block for which we are generating the stack protector.
356     ///
357     /// As a result of stack protector generation, we will splice the
358     /// terminators of this basic block into the successor mbb SuccessMBB and
359     /// replace it with a compare/branch to the successor mbbs
360     /// SuccessMBB/FailureMBB depending on whether or not the stack protector
361     /// was violated.
362     MachineBasicBlock *ParentMBB = nullptr;
363 
364     /// A basic block visited on stack protector check success that contains the
365     /// terminators of ParentMBB.
366     MachineBasicBlock *SuccessMBB = nullptr;
367 
368     /// This basic block visited on stack protector check failure that will
369     /// contain a call to __stack_chk_fail().
370     MachineBasicBlock *FailureMBB = nullptr;
371 
372     /// Add a successor machine basic block to ParentMBB. If the successor mbb
373     /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
374     /// block will be created. Assign a large weight if IsLikely is true.
375     MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB,
376                                        MachineBasicBlock *ParentMBB,
377                                        bool IsLikely,
378                                        MachineBasicBlock *SuccMBB = nullptr);
379   };
380 
381 private:
382   const TargetMachine &TM;
383 
384 public:
385   /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling
386   /// nodes without a corresponding SDNode.
387   static const unsigned LowestSDNodeOrder = 1;
388 
389   SelectionDAG &DAG;
390   const DataLayout *DL = nullptr;
391   AAResults *AA = nullptr;
392   const TargetLibraryInfo *LibInfo;
393 
394   class SDAGSwitchLowering : public SwitchCG::SwitchLowering {
395   public:
SDAGSwitchLowering(SelectionDAGBuilder * sdb,FunctionLoweringInfo & funcinfo)396     SDAGSwitchLowering(SelectionDAGBuilder *sdb, FunctionLoweringInfo &funcinfo)
397         : SwitchCG::SwitchLowering(funcinfo), SDB(sdb) {}
398 
399     virtual void addSuccessorWithProb(
400         MachineBasicBlock *Src, MachineBasicBlock *Dst,
401         BranchProbability Prob = BranchProbability::getUnknown()) override {
402       SDB->addSuccessorWithProb(Src, Dst, Prob);
403     }
404 
405   private:
406     SelectionDAGBuilder *SDB;
407   };
408 
409   // Data related to deferred switch lowerings. Used to construct additional
410   // Basic Blocks in SelectionDAGISel::FinishBasicBlock.
411   std::unique_ptr<SDAGSwitchLowering> SL;
412 
413   /// A StackProtectorDescriptor structure used to communicate stack protector
414   /// information in between SelectBasicBlock and FinishBasicBlock.
415   StackProtectorDescriptor SPDescriptor;
416 
417   // Emit PHI-node-operand constants only once even if used by multiple
418   // PHI nodes.
419   DenseMap<const Constant *, unsigned> ConstantsOut;
420 
421   /// Information about the function as a whole.
422   FunctionLoweringInfo &FuncInfo;
423 
424   /// Information about the swifterror values used throughout the function.
425   SwiftErrorValueTracking &SwiftError;
426 
427   /// Garbage collection metadata for the function.
428   GCFunctionInfo *GFI;
429 
430   /// Map a landing pad to the call site indexes.
431   DenseMap<MachineBasicBlock *, SmallVector<unsigned, 4>> LPadToCallSiteMap;
432 
433   /// This is set to true if a call in the current block has been translated as
434   /// a tail call. In this case, no subsequent DAG nodes should be created.
435   bool HasTailCall = false;
436 
437   LLVMContext *Context;
438 
SelectionDAGBuilder(SelectionDAG & dag,FunctionLoweringInfo & funcinfo,SwiftErrorValueTracking & swifterror,CodeGenOpt::Level ol)439   SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
440                       SwiftErrorValueTracking &swifterror, CodeGenOpt::Level ol)
441       : SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()), DAG(dag),
442         SL(std::make_unique<SDAGSwitchLowering>(this, funcinfo)), FuncInfo(funcinfo),
443         SwiftError(swifterror) {}
444 
445   void init(GCFunctionInfo *gfi, AAResults *AA,
446             const TargetLibraryInfo *li);
447 
448   /// Clear out the current SelectionDAG and the associated state and prepare
449   /// this SelectionDAGBuilder object to be used for a new block. This doesn't
450   /// clear out information about additional blocks that are needed to complete
451   /// switch lowering or PHI node updating; that information is cleared out as
452   /// it is consumed.
453   void clear();
454 
455   /// Clear the dangling debug information map. This function is separated from
456   /// the clear so that debug information that is dangling in a basic block can
457   /// be properly resolved in a different basic block. This allows the
458   /// SelectionDAG to resolve dangling debug information attached to PHI nodes.
459   void clearDanglingDebugInfo();
460 
461   /// Return the current virtual root of the Selection DAG, flushing any
462   /// PendingLoad items. This must be done before emitting a store or any other
463   /// memory node that may need to be ordered after any prior load instructions.
464   SDValue getMemoryRoot();
465 
466   /// Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict)
467   /// items. This must be done before emitting any call other any other node
468   /// that may need to be ordered after FP instructions due to other side
469   /// effects.
470   SDValue getRoot();
471 
472   /// Similar to getRoot, but instead of flushing all the PendingLoad items,
473   /// flush all the PendingExports (and PendingConstrainedFPStrict) items.
474   /// It is necessary to do this before emitting a terminator instruction.
475   SDValue getControlRoot();
476 
getCurSDLoc()477   SDLoc getCurSDLoc() const {
478     return SDLoc(CurInst, SDNodeOrder);
479   }
480 
getCurDebugLoc()481   DebugLoc getCurDebugLoc() const {
482     return CurInst ? CurInst->getDebugLoc() : DebugLoc();
483   }
484 
485   void CopyValueToVirtualRegister(const Value *V, unsigned Reg);
486 
487   void visit(const Instruction &I);
488 
489   void visit(unsigned Opcode, const User &I);
490 
491   /// If there was virtual register allocated for the value V emit CopyFromReg
492   /// of the specified type Ty. Return empty SDValue() otherwise.
493   SDValue getCopyFromRegs(const Value *V, Type *Ty);
494 
495   /// Register a dbg_value which relies on a Value which we have not yet seen.
496   void addDanglingDebugInfo(const DbgValueInst *DI, DebugLoc DL,
497                             unsigned Order);
498 
499   /// If we have dangling debug info that describes \p Variable, or an
500   /// overlapping part of variable considering the \p Expr, then this method
501   /// will drop that debug info as it isn't valid any longer.
502   void dropDanglingDebugInfo(const DILocalVariable *Variable,
503                              const DIExpression *Expr);
504 
505   /// If we saw an earlier dbg_value referring to V, generate the debug data
506   /// structures now that we've seen its definition.
507   void resolveDanglingDebugInfo(const Value *V, SDValue Val);
508 
509   /// For the given dangling debuginfo record, perform last-ditch efforts to
510   /// resolve the debuginfo to something that is represented in this DAG. If
511   /// this cannot be done, produce an Undef debug value record.
512   void salvageUnresolvedDbgValue(DanglingDebugInfo &DDI);
513 
514   /// For a given list of Values, attempt to create and record a SDDbgValue in
515   /// the SelectionDAG.
516   bool handleDebugValue(ArrayRef<const Value *> Values, DILocalVariable *Var,
517                         DIExpression *Expr, DebugLoc CurDL, DebugLoc InstDL,
518                         unsigned Order, bool IsVariadic);
519 
520   /// Evict any dangling debug information, attempting to salvage it first.
521   void resolveOrClearDbgInfo();
522 
523   SDValue getValue(const Value *V);
524 
525   SDValue getNonRegisterValue(const Value *V);
526   SDValue getValueImpl(const Value *V);
527 
setValue(const Value * V,SDValue NewN)528   void setValue(const Value *V, SDValue NewN) {
529     SDValue &N = NodeMap[V];
530     assert(!N.getNode() && "Already set a value for this node!");
531     N = NewN;
532   }
533 
setUnusedArgValue(const Value * V,SDValue NewN)534   void setUnusedArgValue(const Value *V, SDValue NewN) {
535     SDValue &N = UnusedArgNodeMap[V];
536     assert(!N.getNode() && "Already set a value for this node!");
537     N = NewN;
538   }
539 
540   void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
541                             MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
542                             MachineBasicBlock *SwitchBB,
543                             Instruction::BinaryOps Opc, BranchProbability TProb,
544                             BranchProbability FProb, bool InvertCond);
545   void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
546                                     MachineBasicBlock *FBB,
547                                     MachineBasicBlock *CurBB,
548                                     MachineBasicBlock *SwitchBB,
549                                     BranchProbability TProb, BranchProbability FProb,
550                                     bool InvertCond);
551   bool ShouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
552   bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB);
553   void CopyToExportRegsIfNeeded(const Value *V);
554   void ExportFromCurrentBlock(const Value *V);
555   void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall,
556                    bool IsMustTailCall, const BasicBlock *EHPadBB = nullptr);
557 
558   // Lower range metadata from 0 to N to assert zext to an integer of nearest
559   // floor power of two.
560   SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I,
561                                  SDValue Op);
562 
563   void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI,
564                                 const CallBase *Call, unsigned ArgIdx,
565                                 unsigned NumArgs, SDValue Callee,
566                                 Type *ReturnTy, bool IsPatchPoint);
567 
568   std::pair<SDValue, SDValue>
569   lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
570                  const BasicBlock *EHPadBB = nullptr);
571 
572   /// When an MBB was split during scheduling, update the
573   /// references that need to refer to the last resulting block.
574   void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last);
575 
576   /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes
577   /// of lowering into a STATEPOINT node.
578   struct StatepointLoweringInfo {
579     /// Bases[i] is the base pointer for Ptrs[i].  Together they denote the set
580     /// of gc pointers this STATEPOINT has to relocate.
581     SmallVector<const Value *, 16> Bases;
582     SmallVector<const Value *, 16> Ptrs;
583 
584     /// The set of gc.relocate calls associated with this gc.statepoint.
585     SmallVector<const GCRelocateInst *, 16> GCRelocates;
586 
587     /// The full list of gc arguments to the gc.statepoint being lowered.
588     ArrayRef<const Use> GCArgs;
589 
590     /// The gc.statepoint instruction.
591     const Instruction *StatepointInstr = nullptr;
592 
593     /// The list of gc transition arguments present in the gc.statepoint being
594     /// lowered.
595     ArrayRef<const Use> GCTransitionArgs;
596 
597     /// The ID that the resulting STATEPOINT instruction has to report.
598     unsigned ID = -1;
599 
600     /// Information regarding the underlying call instruction.
601     TargetLowering::CallLoweringInfo CLI;
602 
603     /// The deoptimization state associated with this gc.statepoint call, if
604     /// any.
605     ArrayRef<const Use> DeoptState;
606 
607     /// Flags associated with the meta arguments being lowered.
608     uint64_t StatepointFlags = -1;
609 
610     /// The number of patchable bytes the call needs to get lowered into.
611     unsigned NumPatchBytes = -1;
612 
613     /// The exception handling unwind destination, in case this represents an
614     /// invoke of gc.statepoint.
615     const BasicBlock *EHPadBB = nullptr;
616 
StatepointLoweringInfoStatepointLoweringInfo617     explicit StatepointLoweringInfo(SelectionDAG &DAG) : CLI(DAG) {}
618   };
619 
620   /// Lower \p SLI into a STATEPOINT instruction.
621   SDValue LowerAsSTATEPOINT(StatepointLoweringInfo &SI);
622 
623   // This function is responsible for the whole statepoint lowering process.
624   // It uniformly handles invoke and call statepoints.
625   void LowerStatepoint(const GCStatepointInst &I,
626                        const BasicBlock *EHPadBB = nullptr);
627 
628   void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee,
629                                     const BasicBlock *EHPadBB);
630 
631   void LowerDeoptimizeCall(const CallInst *CI);
632   void LowerDeoptimizingReturn();
633 
634   void LowerCallSiteWithDeoptBundleImpl(const CallBase *Call, SDValue Callee,
635                                         const BasicBlock *EHPadBB,
636                                         bool VarArgDisallowed,
637                                         bool ForceVoidReturnTy);
638 
639   /// Returns the type of FrameIndex and TargetFrameIndex nodes.
getFrameIndexTy()640   MVT getFrameIndexTy() {
641     return DAG.getTargetLoweringInfo().getFrameIndexTy(DAG.getDataLayout());
642   }
643 
644 private:
645   // Terminator instructions.
646   void visitRet(const ReturnInst &I);
647   void visitBr(const BranchInst &I);
648   void visitSwitch(const SwitchInst &I);
649   void visitIndirectBr(const IndirectBrInst &I);
650   void visitUnreachable(const UnreachableInst &I);
651   void visitCleanupRet(const CleanupReturnInst &I);
652   void visitCatchSwitch(const CatchSwitchInst &I);
653   void visitCatchRet(const CatchReturnInst &I);
654   void visitCatchPad(const CatchPadInst &I);
655   void visitCleanupPad(const CleanupPadInst &CPI);
656 
657   BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
658                                        const MachineBasicBlock *Dst) const;
659   void addSuccessorWithProb(
660       MachineBasicBlock *Src, MachineBasicBlock *Dst,
661       BranchProbability Prob = BranchProbability::getUnknown());
662 
663 public:
664   void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB);
665   void visitSPDescriptorParent(StackProtectorDescriptor &SPD,
666                                MachineBasicBlock *ParentBB);
667   void visitSPDescriptorFailure(StackProtectorDescriptor &SPD);
668   void visitBitTestHeader(SwitchCG::BitTestBlock &B,
669                           MachineBasicBlock *SwitchBB);
670   void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
671                         BranchProbability BranchProbToNext, unsigned Reg,
672                         SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);
673   void visitJumpTable(SwitchCG::JumpTable &JT);
674   void visitJumpTableHeader(SwitchCG::JumpTable &JT,
675                             SwitchCG::JumpTableHeader &JTH,
676                             MachineBasicBlock *SwitchBB);
677 
678 private:
679   // These all get lowered before this pass.
680   void visitInvoke(const InvokeInst &I);
681   void visitCallBr(const CallBrInst &I);
682   void visitResume(const ResumeInst &I);
683 
684   void visitUnary(const User &I, unsigned Opcode);
visitFNeg(const User & I)685   void visitFNeg(const User &I) { visitUnary(I, ISD::FNEG); }
686 
687   void visitBinary(const User &I, unsigned Opcode);
688   void visitShift(const User &I, unsigned Opcode);
visitAdd(const User & I)689   void visitAdd(const User &I)  { visitBinary(I, ISD::ADD); }
visitFAdd(const User & I)690   void visitFAdd(const User &I) { visitBinary(I, ISD::FADD); }
visitSub(const User & I)691   void visitSub(const User &I)  { visitBinary(I, ISD::SUB); }
visitFSub(const User & I)692   void visitFSub(const User &I) { visitBinary(I, ISD::FSUB); }
visitMul(const User & I)693   void visitMul(const User &I)  { visitBinary(I, ISD::MUL); }
visitFMul(const User & I)694   void visitFMul(const User &I) { visitBinary(I, ISD::FMUL); }
visitURem(const User & I)695   void visitURem(const User &I) { visitBinary(I, ISD::UREM); }
visitSRem(const User & I)696   void visitSRem(const User &I) { visitBinary(I, ISD::SREM); }
visitFRem(const User & I)697   void visitFRem(const User &I) { visitBinary(I, ISD::FREM); }
visitUDiv(const User & I)698   void visitUDiv(const User &I) { visitBinary(I, ISD::UDIV); }
699   void visitSDiv(const User &I);
visitFDiv(const User & I)700   void visitFDiv(const User &I) { visitBinary(I, ISD::FDIV); }
visitAnd(const User & I)701   void visitAnd (const User &I) { visitBinary(I, ISD::AND); }
visitOr(const User & I)702   void visitOr  (const User &I) { visitBinary(I, ISD::OR); }
visitXor(const User & I)703   void visitXor (const User &I) { visitBinary(I, ISD::XOR); }
visitShl(const User & I)704   void visitShl (const User &I) { visitShift(I, ISD::SHL); }
visitLShr(const User & I)705   void visitLShr(const User &I) { visitShift(I, ISD::SRL); }
visitAShr(const User & I)706   void visitAShr(const User &I) { visitShift(I, ISD::SRA); }
707   void visitICmp(const User &I);
708   void visitFCmp(const User &I);
709   // Visit the conversion instructions
710   void visitTrunc(const User &I);
711   void visitZExt(const User &I);
712   void visitSExt(const User &I);
713   void visitFPTrunc(const User &I);
714   void visitFPExt(const User &I);
715   void visitFPToUI(const User &I);
716   void visitFPToSI(const User &I);
717   void visitUIToFP(const User &I);
718   void visitSIToFP(const User &I);
719   void visitPtrToInt(const User &I);
720   void visitIntToPtr(const User &I);
721   void visitBitCast(const User &I);
722   void visitAddrSpaceCast(const User &I);
723 
724   void visitExtractElement(const User &I);
725   void visitInsertElement(const User &I);
726   void visitShuffleVector(const User &I);
727 
728   void visitExtractValue(const User &I);
729   void visitInsertValue(const User &I);
730   void visitLandingPad(const LandingPadInst &LP);
731 
732   void visitGetElementPtr(const User &I);
733   void visitSelect(const User &I);
734 
735   void visitAlloca(const AllocaInst &I);
736   void visitLoad(const LoadInst &I);
737   void visitStore(const StoreInst &I);
738   void visitMaskedLoad(const CallInst &I, bool IsExpanding = false);
739   void visitMaskedStore(const CallInst &I, bool IsCompressing = false);
740   void visitMaskedGather(const CallInst &I);
741   void visitMaskedScatter(const CallInst &I);
742   void visitAtomicCmpXchg(const AtomicCmpXchgInst &I);
743   void visitAtomicRMW(const AtomicRMWInst &I);
744   void visitFence(const FenceInst &I);
745   void visitPHI(const PHINode &I);
746   void visitCall(const CallInst &I);
747   bool visitMemCmpBCmpCall(const CallInst &I);
748   bool visitMemPCpyCall(const CallInst &I);
749   bool visitMemChrCall(const CallInst &I);
750   bool visitStrCpyCall(const CallInst &I, bool isStpcpy);
751   bool visitStrCmpCall(const CallInst &I);
752   bool visitStrLenCall(const CallInst &I);
753   bool visitStrNLenCall(const CallInst &I);
754   bool visitUnaryFloatCall(const CallInst &I, unsigned Opcode);
755   bool visitBinaryFloatCall(const CallInst &I, unsigned Opcode);
756   void visitAtomicLoad(const LoadInst &I);
757   void visitAtomicStore(const StoreInst &I);
758   void visitLoadFromSwiftError(const LoadInst &I);
759   void visitStoreToSwiftError(const StoreInst &I);
760   void visitFreeze(const FreezeInst &I);
761 
762   void visitInlineAsm(const CallBase &Call,
763                       const BasicBlock *EHPadBB = nullptr);
764   void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic);
765   void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic);
766   void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI);
767   void visitVectorPredicationIntrinsic(const VPIntrinsic &VPIntrin);
768 
769   void visitVAStart(const CallInst &I);
770   void visitVAArg(const VAArgInst &I);
771   void visitVAEnd(const CallInst &I);
772   void visitVACopy(const CallInst &I);
773   void visitStackmap(const CallInst &I);
774   void visitPatchpoint(const CallBase &CB, const BasicBlock *EHPadBB = nullptr);
775 
776   // These two are implemented in StatepointLowering.cpp
777   void visitGCRelocate(const GCRelocateInst &Relocate);
778   void visitGCResult(const GCResultInst &I);
779 
780   void visitVectorReduce(const CallInst &I, unsigned Intrinsic);
781   void visitVectorReverse(const CallInst &I);
782   void visitVectorSplice(const CallInst &I);
783   void visitStepVector(const CallInst &I);
784 
visitUserOp1(const Instruction & I)785   void visitUserOp1(const Instruction &I) {
786     llvm_unreachable("UserOp1 should not exist at instruction selection time!");
787   }
visitUserOp2(const Instruction & I)788   void visitUserOp2(const Instruction &I) {
789     llvm_unreachable("UserOp2 should not exist at instruction selection time!");
790   }
791 
792   void processIntegerCallValue(const Instruction &I,
793                                SDValue Value, bool IsSigned);
794 
795   void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
796 
797   void emitInlineAsmError(const CallBase &Call, const Twine &Message);
798 
799   /// If V is an function argument then create corresponding DBG_VALUE machine
800   /// instruction for it now. At the end of instruction selection, they will be
801   /// inserted to the entry BB.
802   bool EmitFuncArgumentDbgValue(const Value *V, DILocalVariable *Variable,
803                                 DIExpression *Expr, DILocation *DL,
804                                 bool IsDbgDeclare, const SDValue &N);
805 
806   /// Return the next block after MBB, or nullptr if there is none.
807   MachineBasicBlock *NextBlock(MachineBasicBlock *MBB);
808 
809   /// Update the DAG and DAG builder with the relevant information after
810   /// a new root node has been created which could be a tail call.
811   void updateDAGForMaybeTailCall(SDValue MaybeTC);
812 
813   /// Return the appropriate SDDbgValue based on N.
814   SDDbgValue *getDbgValue(SDValue N, DILocalVariable *Variable,
815                           DIExpression *Expr, const DebugLoc &dl,
816                           unsigned DbgSDNodeOrder);
817 
818   /// Lowers CallInst to an external symbol.
819   void lowerCallToExternalSymbol(const CallInst &I, const char *FunctionName);
820 
821   SDValue lowerStartEH(SDValue Chain, const BasicBlock *EHPadBB,
822                        MCSymbol *&BeginLabel);
823   SDValue lowerEndEH(SDValue Chain, const InvokeInst *II,
824                      const BasicBlock *EHPadBB, MCSymbol *BeginLabel);
825 };
826 
827 /// This struct represents the registers (physical or virtual)
828 /// that a particular set of values is assigned, and the type information about
829 /// the value. The most common situation is to represent one value at a time,
830 /// but struct or array values are handled element-wise as multiple values.  The
831 /// splitting of aggregates is performed recursively, so that we never have
832 /// aggregate-typed registers. The values at this point do not necessarily have
833 /// legal types, so each value may require one or more registers of some legal
834 /// type.
835 ///
836 struct RegsForValue {
837   /// The value types of the values, which may not be legal, and
838   /// may need be promoted or synthesized from one or more registers.
839   SmallVector<EVT, 4> ValueVTs;
840 
841   /// The value types of the registers. This is the same size as ValueVTs and it
842   /// records, for each value, what the type of the assigned register or
843   /// registers are. (Individual values are never synthesized from more than one
844   /// type of register.)
845   ///
846   /// With virtual registers, the contents of RegVTs is redundant with TLI's
847   /// getRegisterType member function, however when with physical registers
848   /// it is necessary to have a separate record of the types.
849   SmallVector<MVT, 4> RegVTs;
850 
851   /// This list holds the registers assigned to the values.
852   /// Each legal or promoted value requires one register, and each
853   /// expanded value requires multiple registers.
854   SmallVector<unsigned, 4> Regs;
855 
856   /// This list holds the number of registers for each value.
857   SmallVector<unsigned, 4> RegCount;
858 
859   /// Records if this value needs to be treated in an ABI dependant manner,
860   /// different to normal type legalization.
861   Optional<CallingConv::ID> CallConv;
862 
863   RegsForValue() = default;
864   RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt, EVT valuevt,
865                Optional<CallingConv::ID> CC = None);
866   RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
867                const DataLayout &DL, unsigned Reg, Type *Ty,
868                Optional<CallingConv::ID> CC);
869 
isABIMangledRegsForValue870   bool isABIMangled() const {
871     return CallConv.hasValue();
872   }
873 
874   /// Add the specified values to this one.
appendRegsForValue875   void append(const RegsForValue &RHS) {
876     ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
877     RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
878     Regs.append(RHS.Regs.begin(), RHS.Regs.end());
879     RegCount.push_back(RHS.Regs.size());
880   }
881 
882   /// Emit a series of CopyFromReg nodes that copies from this value and returns
883   /// the result as a ValueVTs value. This uses Chain/Flag as the input and
884   /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no
885   /// flag is used.
886   SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
887                           const SDLoc &dl, SDValue &Chain, SDValue *Flag,
888                           const Value *V = nullptr) const;
889 
890   /// Emit a series of CopyToReg nodes that copies the specified value into the
891   /// registers specified by this object. This uses Chain/Flag as the input and
892   /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no
893   /// flag is used. If V is not nullptr, then it is used in printing better
894   /// diagnostic messages on error.
895   void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl,
896                      SDValue &Chain, SDValue *Flag, const Value *V = nullptr,
897                      ISD::NodeType PreferredExtendType = ISD::ANY_EXTEND) const;
898 
899   /// Add this value to the specified inlineasm node operand list. This adds the
900   /// code marker, matching input operand index (if applicable), and includes
901   /// the number of values added into it.
902   void AddInlineAsmOperands(unsigned Code, bool HasMatching,
903                             unsigned MatchingIdx, const SDLoc &dl,
904                             SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
905 
906   /// Check if the total RegCount is greater than one.
occupiesMultipleRegsRegsForValue907   bool occupiesMultipleRegs() const {
908     return std::accumulate(RegCount.begin(), RegCount.end(), 0) > 1;
909   }
910 
911   /// Return a list of registers and their sizes.
912   SmallVector<std::pair<unsigned, TypeSize>, 4> getRegsAndSizes() const;
913 };
914 
915 } // end namespace llvm
916 
917 #endif // LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
918