1 //===- CodeGenCommonISel.h - Common code between ISels ---------*- C++ -*--===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares common utilities that are shared between SelectionDAG and
10 // GlobalISel frameworks.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CODEGEN_CODEGENCOMMONISEL_H
15 #define LLVM_CODEGEN_CODEGENCOMMONISEL_H
16 
17 #include "llvm/CodeGen/MachineBasicBlock.h"
18 #include <cassert>
19 namespace llvm {
20 
21 class BasicBlock;
22 /// Encapsulates all of the information needed to generate a stack protector
23 /// check, and signals to isel when initialized that one needs to be generated.
24 ///
25 /// *NOTE* The following is a high level documentation of SelectionDAG Stack
26 /// Protector Generation. This is now also ported be shared with GlobalISel,
27 /// but without any significant changes.
28 ///
29 /// High Level Overview of ISel Stack Protector Generation:
30 ///
31 /// Previously, the "stack protector" IR pass handled stack protector
32 /// generation. This necessitated splitting basic blocks at the IR level to
33 /// create the success/failure basic blocks in the tail of the basic block in
34 /// question. As a result of this, calls that would have qualified for the
35 /// sibling call optimization were no longer eligible for optimization since
36 /// said calls were no longer right in the "tail position" (i.e. the immediate
37 /// predecessor of a ReturnInst instruction).
38 ///
39 /// Since the sibling call optimization causes the callee to reuse the caller's
40 /// stack, if we could delay the generation of the stack protector check until
41 /// later in CodeGen after the sibling call decision was made, we get both the
42 /// tail call optimization and the stack protector check!
43 ///
44 /// A few goals in solving this problem were:
45 ///
46 ///   1. Preserve the architecture independence of stack protector generation.
47 ///
48 ///   2. Preserve the normal IR level stack protector check for platforms like
49 ///      OpenBSD for which we support platform-specific stack protector
50 ///      generation.
51 ///
52 /// The main problem that guided the present solution is that one can not
53 /// solve this problem in an architecture independent manner at the IR level
54 /// only. This is because:
55 ///
56 ///   1. The decision on whether or not to perform a sibling call on certain
57 ///      platforms (for instance i386) requires lower level information
58 ///      related to available registers that can not be known at the IR level.
59 ///
60 ///   2. Even if the previous point were not true, the decision on whether to
61 ///      perform a tail call is done in LowerCallTo in SelectionDAG (or
62 ///      CallLowering in GlobalISel) which occurs after the Stack Protector
63 ///      Pass. As a result, one would need to put the relevant callinst into the
64 ///      stack protector check success basic block (where the return inst is
65 ///      placed) and then move it back later at ISel/MI time before the
66 ///      stack protector check if the tail call optimization failed. The MI
67 ///      level option was nixed immediately since it would require
68 ///      platform-specific pattern matching. The ISel level option was
69 ///      nixed because SelectionDAG only processes one IR level basic block at a
70 ///      time implying one could not create a DAG Combine to move the callinst.
71 ///
72 /// To get around this problem:
73 ///
74 ///   1. SelectionDAG can only process one block at a time, we can generate
75 ///      multiple machine basic blocks for one IR level basic block.
76 ///      This is how we handle bit tests and switches.
77 ///
78 ///   2. At the MI level, tail calls are represented via a special return
79 ///      MIInst called "tcreturn". Thus if we know the basic block in which we
80 ///      wish to insert the stack protector check, we get the correct behavior
81 ///      by always inserting the stack protector check right before the return
82 ///      statement. This is a "magical transformation" since no matter where
83 ///      the stack protector check intrinsic is, we always insert the stack
84 ///      protector check code at the end of the BB.
85 ///
86 /// Given the aforementioned constraints, the following solution was devised:
87 ///
88 ///   1. On platforms that do not support ISel stack protector check
89 ///      generation, allow for the normal IR level stack protector check
90 ///      generation to continue.
91 ///
92 ///   2. On platforms that do support ISel stack protector check
93 ///      generation:
94 ///
95 ///     a. Use the IR level stack protector pass to decide if a stack
96 ///        protector is required/which BB we insert the stack protector check
97 ///        in by reusing the logic already therein.
98 ///
99 ///     b. After we finish selecting the basic block, we produce the validation
100 ///        code with one of these techniques:
101 ///          1) with a call to a guard check function
102 ///          2) with inlined instrumentation
103 ///
104 ///        1) We insert a call to the check function before the terminator.
105 ///
106 ///        2) We first find a splice point in the parent basic block
107 ///        before the terminator and then splice the terminator of said basic
108 ///        block into the success basic block. Then we code-gen a new tail for
109 ///        the parent basic block consisting of the two loads, the comparison,
110 ///        and finally two branches to the success/failure basic blocks. We
111 ///        conclude by code-gening the failure basic block if we have not
112 ///        code-gened it already (all stack protector checks we generate in
113 ///        the same function, use the same failure basic block).
114 class StackProtectorDescriptor {
115 public:
116   StackProtectorDescriptor() = default;
117 
118   /// Returns true if all fields of the stack protector descriptor are
119   /// initialized implying that we should/are ready to emit a stack protector.
120   bool shouldEmitStackProtector() const {
121     return ParentMBB && SuccessMBB && FailureMBB;
122   }
123 
124   bool shouldEmitFunctionBasedCheckStackProtector() const {
125     return ParentMBB && !SuccessMBB && !FailureMBB;
126   }
127 
128   /// Initialize the stack protector descriptor structure for a new basic
129   /// block.
130   void initialize(const BasicBlock *BB, MachineBasicBlock *MBB,
131                   bool FunctionBasedInstrumentation) {
132     // Make sure we are not initialized yet.
133     assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
134                                           "already initialized!");
135     ParentMBB = MBB;
136     if (!FunctionBasedInstrumentation) {
137       SuccessMBB = addSuccessorMBB(BB, MBB, /* IsLikely */ true);
138       FailureMBB = addSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
139     }
140   }
141 
142   /// Reset state that changes when we handle different basic blocks.
143   ///
144   /// This currently includes:
145   ///
146   /// 1. The specific basic block we are generating a
147   /// stack protector for (ParentMBB).
148   ///
149   /// 2. The successor machine basic block that will contain the tail of
150   /// parent mbb after we create the stack protector check (SuccessMBB). This
151   /// BB is visited only on stack protector check success.
152   void resetPerBBState() {
153     ParentMBB = nullptr;
154     SuccessMBB = nullptr;
155   }
156 
157   /// Reset state that only changes when we switch functions.
158   ///
159   /// This currently includes:
160   ///
161   /// 1. FailureMBB since we reuse the failure code path for all stack
162   /// protector checks created in an individual function.
163   ///
164   /// 2.The guard variable since the guard variable we are checking against is
165   /// always the same.
166   void resetPerFunctionState() { FailureMBB = nullptr; }
167 
168   MachineBasicBlock *getParentMBB() { return ParentMBB; }
169   MachineBasicBlock *getSuccessMBB() { return SuccessMBB; }
170   MachineBasicBlock *getFailureMBB() { return FailureMBB; }
171 
172 private:
173   /// The basic block for which we are generating the stack protector.
174   ///
175   /// As a result of stack protector generation, we will splice the
176   /// terminators of this basic block into the successor mbb SuccessMBB and
177   /// replace it with a compare/branch to the successor mbbs
178   /// SuccessMBB/FailureMBB depending on whether or not the stack protector
179   /// was violated.
180   MachineBasicBlock *ParentMBB = nullptr;
181 
182   /// A basic block visited on stack protector check success that contains the
183   /// terminators of ParentMBB.
184   MachineBasicBlock *SuccessMBB = nullptr;
185 
186   /// This basic block visited on stack protector check failure that will
187   /// contain a call to __stack_chk_fail().
188   MachineBasicBlock *FailureMBB = nullptr;
189 
190   /// Add a successor machine basic block to ParentMBB. If the successor mbb
191   /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
192   /// block will be created. Assign a large weight if IsLikely is true.
193   MachineBasicBlock *addSuccessorMBB(const BasicBlock *BB,
194                                      MachineBasicBlock *ParentMBB,
195                                      bool IsLikely,
196                                      MachineBasicBlock *SuccMBB = nullptr);
197 };
198 
199 /// Find the split point at which to splice the end of BB into its success stack
200 /// protector check machine basic block.
201 ///
202 /// On many platforms, due to ABI constraints, terminators, even before register
203 /// allocation, use physical registers. This creates an issue for us since
204 /// physical registers at this point can not travel across basic
205 /// blocks. Luckily, selectiondag always moves physical registers into vregs
206 /// when they enter functions and moves them through a sequence of copies back
207 /// into the physical registers right before the terminator creating a
208 /// ``Terminator Sequence''. This function is searching for the beginning of the
209 /// terminator sequence so that we can ensure that we splice off not just the
210 /// terminator, but additionally the copies that move the vregs into the
211 /// physical registers.
212 MachineBasicBlock::iterator
213 findSplitPointForStackProtector(MachineBasicBlock *BB,
214                                 const TargetInstrInfo &TII);
215 /// Evaluates if the specified FP class test is an inversion of a simpler test.
216 /// An example is the test "inf|normal|subnormal|zero", which is an inversion
217 /// of "nan".
218 /// \param Test The test as specified in 'is_fpclass' intrinsic invocation.
219 /// \returns The inverted test, or zero, if inversion does not produce simpler
220 /// test.
221 unsigned getInvertedFPClassTest(unsigned Test);
222 
223 /// Assuming the instruction \p MI is going to be deleted, attempt to salvage
224 /// debug users of \p MI by writing the effect of \p MI in a DIExpression.
225 void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI,
226                                  MachineInstr &MI,
227                                  ArrayRef<MachineOperand *> DbgUsers);
228 
229 } // namespace llvm
230 
231 #endif // LLVM_CODEGEN_CODEGENCOMMONISEL_H
232