1 //===- ThreadSafetyTIL.cpp -------------------------------------*- C++ --*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT in the llvm repository for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
11 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
12
13 namespace clang {
14 namespace threadSafety {
15 namespace til {
16
17
getUnaryOpcodeString(TIL_UnaryOpcode Op)18 StringRef getUnaryOpcodeString(TIL_UnaryOpcode Op) {
19 switch (Op) {
20 case UOP_Minus: return "-";
21 case UOP_BitNot: return "~";
22 case UOP_LogicNot: return "!";
23 }
24 return "";
25 }
26
27
getBinaryOpcodeString(TIL_BinaryOpcode Op)28 StringRef getBinaryOpcodeString(TIL_BinaryOpcode Op) {
29 switch (Op) {
30 case BOP_Mul: return "*";
31 case BOP_Div: return "/";
32 case BOP_Rem: return "%";
33 case BOP_Add: return "+";
34 case BOP_Sub: return "-";
35 case BOP_Shl: return "<<";
36 case BOP_Shr: return ">>";
37 case BOP_BitAnd: return "&";
38 case BOP_BitXor: return "^";
39 case BOP_BitOr: return "|";
40 case BOP_Eq: return "==";
41 case BOP_Neq: return "!=";
42 case BOP_Lt: return "<";
43 case BOP_Leq: return "<=";
44 case BOP_LogicAnd: return "&&";
45 case BOP_LogicOr: return "||";
46 }
47 return "";
48 }
49
50
force()51 SExpr* Future::force() {
52 Status = FS_evaluating;
53 Result = compute();
54 Status = FS_done;
55 return Result;
56 }
57
58
addPredecessor(BasicBlock * Pred)59 unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
60 unsigned Idx = Predecessors.size();
61 Predecessors.reserveCheck(1, Arena);
62 Predecessors.push_back(Pred);
63 for (SExpr *E : Args) {
64 if (Phi* Ph = dyn_cast<Phi>(E)) {
65 Ph->values().reserveCheck(1, Arena);
66 Ph->values().push_back(nullptr);
67 }
68 }
69 return Idx;
70 }
71
72
reservePredecessors(unsigned NumPreds)73 void BasicBlock::reservePredecessors(unsigned NumPreds) {
74 Predecessors.reserve(NumPreds, Arena);
75 for (SExpr *E : Args) {
76 if (Phi* Ph = dyn_cast<Phi>(E)) {
77 Ph->values().reserve(NumPreds, Arena);
78 }
79 }
80 }
81
82
83 // If E is a variable, then trace back through any aliases or redundant
84 // Phi nodes to find the canonical definition.
getCanonicalVal(const SExpr * E)85 const SExpr *getCanonicalVal(const SExpr *E) {
86 while (true) {
87 if (auto *V = dyn_cast<Variable>(E)) {
88 if (V->kind() == Variable::VK_Let) {
89 E = V->definition();
90 continue;
91 }
92 }
93 if (const Phi *Ph = dyn_cast<Phi>(E)) {
94 if (Ph->status() == Phi::PH_SingleVal) {
95 E = Ph->values()[0];
96 continue;
97 }
98 }
99 break;
100 }
101 return E;
102 }
103
104
105 // If E is a variable, then trace back through any aliases or redundant
106 // Phi nodes to find the canonical definition.
107 // The non-const version will simplify incomplete Phi nodes.
simplifyToCanonicalVal(SExpr * E)108 SExpr *simplifyToCanonicalVal(SExpr *E) {
109 while (true) {
110 if (auto *V = dyn_cast<Variable>(E)) {
111 if (V->kind() != Variable::VK_Let)
112 return V;
113 // Eliminate redundant variables, e.g. x = y, or x = 5,
114 // but keep anything more complicated.
115 if (til::ThreadSafetyTIL::isTrivial(V->definition())) {
116 E = V->definition();
117 continue;
118 }
119 return V;
120 }
121 if (auto *Ph = dyn_cast<Phi>(E)) {
122 if (Ph->status() == Phi::PH_Incomplete)
123 simplifyIncompleteArg(Ph);
124 // Eliminate redundant Phi nodes.
125 if (Ph->status() == Phi::PH_SingleVal) {
126 E = Ph->values()[0];
127 continue;
128 }
129 }
130 return E;
131 }
132 }
133
134
135 // Trace the arguments of an incomplete Phi node to see if they have the same
136 // canonical definition. If so, mark the Phi node as redundant.
137 // getCanonicalVal() will recursively call simplifyIncompletePhi().
simplifyIncompleteArg(til::Phi * Ph)138 void simplifyIncompleteArg(til::Phi *Ph) {
139 assert(Ph && Ph->status() == Phi::PH_Incomplete);
140
141 // eliminate infinite recursion -- assume that this node is not redundant.
142 Ph->setStatus(Phi::PH_MultiVal);
143
144 SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
145 for (unsigned i=1, n=Ph->values().size(); i<n; ++i) {
146 SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
147 if (Ei == Ph)
148 continue; // Recursive reference to itself. Don't count.
149 if (Ei != E0) {
150 return; // Status is already set to MultiVal.
151 }
152 }
153 Ph->setStatus(Phi::PH_SingleVal);
154 }
155
156
157 // Renumbers the arguments and instructions to have unique, sequential IDs.
renumberInstrs(int ID)158 int BasicBlock::renumberInstrs(int ID) {
159 for (auto *Arg : Args)
160 Arg->setID(this, ID++);
161 for (auto *Instr : Instrs)
162 Instr->setID(this, ID++);
163 TermInstr->setID(this, ID++);
164 return ID;
165 }
166
167 // Sorts the CFGs blocks using a reverse post-order depth-first traversal.
168 // Each block will be written into the Blocks array in order, and its BlockID
169 // will be set to the index in the array. Sorting should start from the entry
170 // block, and ID should be the total number of blocks.
topologicalSort(SimpleArray<BasicBlock * > & Blocks,int ID)171 int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
172 if (Visited) return ID;
173 Visited = true;
174 for (auto *Block : successors())
175 ID = Block->topologicalSort(Blocks, ID);
176 // set ID and update block array in place.
177 // We may lose pointers to unreachable blocks.
178 assert(ID > 0);
179 BlockID = --ID;
180 Blocks[BlockID] = this;
181 return ID;
182 }
183
184 // Performs a reverse topological traversal, starting from the exit block and
185 // following back-edges. The dominator is serialized before any predecessors,
186 // which guarantees that all blocks are serialized after their dominator and
187 // before their post-dominator (because it's a reverse topological traversal).
188 // ID should be initially set to 0.
189 //
190 // This sort assumes that (1) dominators have been computed, (2) there are no
191 // critical edges, and (3) the entry block is reachable from the exit block
192 // and no blocks are accessable via traversal of back-edges from the exit that
193 // weren't accessable via forward edges from the entry.
topologicalFinalSort(SimpleArray<BasicBlock * > & Blocks,int ID)194 int BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
195 // Visited is assumed to have been set by the topologicalSort. This pass
196 // assumes !Visited means that we've visited this node before.
197 if (!Visited) return ID;
198 Visited = false;
199 if (DominatorNode.Parent)
200 ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID);
201 for (auto *Pred : Predecessors)
202 ID = Pred->topologicalFinalSort(Blocks, ID);
203 assert(static_cast<size_t>(ID) < Blocks.size());
204 BlockID = ID++;
205 Blocks[BlockID] = this;
206 return ID;
207 }
208
209 // Computes the immediate dominator of the current block. Assumes that all of
210 // its predecessors have already computed their dominators. This is achieved
211 // by visiting the nodes in topological order.
computeDominator()212 void BasicBlock::computeDominator() {
213 BasicBlock *Candidate = nullptr;
214 // Walk backwards from each predecessor to find the common dominator node.
215 for (auto *Pred : Predecessors) {
216 // Skip back-edges
217 if (Pred->BlockID >= BlockID) continue;
218 // If we don't yet have a candidate for dominator yet, take this one.
219 if (Candidate == nullptr) {
220 Candidate = Pred;
221 continue;
222 }
223 // Walk the alternate and current candidate back to find a common ancestor.
224 auto *Alternate = Pred;
225 while (Alternate != Candidate) {
226 if (Candidate->BlockID > Alternate->BlockID)
227 Candidate = Candidate->DominatorNode.Parent;
228 else
229 Alternate = Alternate->DominatorNode.Parent;
230 }
231 }
232 DominatorNode.Parent = Candidate;
233 DominatorNode.SizeOfSubTree = 1;
234 }
235
236 // Computes the immediate post-dominator of the current block. Assumes that all
237 // of its successors have already computed their post-dominators. This is
238 // achieved visiting the nodes in reverse topological order.
computePostDominator()239 void BasicBlock::computePostDominator() {
240 BasicBlock *Candidate = nullptr;
241 // Walk back from each predecessor to find the common post-dominator node.
242 for (auto *Succ : successors()) {
243 // Skip back-edges
244 if (Succ->BlockID <= BlockID) continue;
245 // If we don't yet have a candidate for post-dominator yet, take this one.
246 if (Candidate == nullptr) {
247 Candidate = Succ;
248 continue;
249 }
250 // Walk the alternate and current candidate back to find a common ancestor.
251 auto *Alternate = Succ;
252 while (Alternate != Candidate) {
253 if (Candidate->BlockID < Alternate->BlockID)
254 Candidate = Candidate->PostDominatorNode.Parent;
255 else
256 Alternate = Alternate->PostDominatorNode.Parent;
257 }
258 }
259 PostDominatorNode.Parent = Candidate;
260 PostDominatorNode.SizeOfSubTree = 1;
261 }
262
263
264 // Renumber instructions in all blocks
renumberInstrs()265 void SCFG::renumberInstrs() {
266 int InstrID = 0;
267 for (auto *Block : Blocks)
268 InstrID = Block->renumberInstrs(InstrID);
269 }
270
271
computeNodeSize(BasicBlock * B,BasicBlock::TopologyNode BasicBlock::* TN)272 static inline void computeNodeSize(BasicBlock *B,
273 BasicBlock::TopologyNode BasicBlock::*TN) {
274 BasicBlock::TopologyNode *N = &(B->*TN);
275 if (N->Parent) {
276 BasicBlock::TopologyNode *P = &(N->Parent->*TN);
277 // Initially set ID relative to the (as yet uncomputed) parent ID
278 N->NodeID = P->SizeOfSubTree;
279 P->SizeOfSubTree += N->SizeOfSubTree;
280 }
281 }
282
computeNodeID(BasicBlock * B,BasicBlock::TopologyNode BasicBlock::* TN)283 static inline void computeNodeID(BasicBlock *B,
284 BasicBlock::TopologyNode BasicBlock::*TN) {
285 BasicBlock::TopologyNode *N = &(B->*TN);
286 if (N->Parent) {
287 BasicBlock::TopologyNode *P = &(N->Parent->*TN);
288 N->NodeID += P->NodeID; // Fix NodeIDs relative to starting node.
289 }
290 }
291
292
293 // Normalizes a CFG. Normalization has a few major components:
294 // 1) Removing unreachable blocks.
295 // 2) Computing dominators and post-dominators
296 // 3) Topologically sorting the blocks into the "Blocks" array.
computeNormalForm()297 void SCFG::computeNormalForm() {
298 // Topologically sort the blocks starting from the entry block.
299 int NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
300 if (NumUnreachableBlocks > 0) {
301 // If there were unreachable blocks shift everything down, and delete them.
302 for (size_t I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
303 size_t NI = I - NumUnreachableBlocks;
304 Blocks[NI] = Blocks[I];
305 Blocks[NI]->BlockID = NI;
306 // FIXME: clean up predecessor pointers to unreachable blocks?
307 }
308 Blocks.drop(NumUnreachableBlocks);
309 }
310
311 // Compute dominators.
312 for (auto *Block : Blocks)
313 Block->computeDominator();
314
315 // Once dominators have been computed, the final sort may be performed.
316 int NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
317 assert(static_cast<size_t>(NumBlocks) == Blocks.size());
318 (void) NumBlocks;
319
320 // Renumber the instructions now that we have a final sort.
321 renumberInstrs();
322
323 // Compute post-dominators and compute the sizes of each node in the
324 // dominator tree.
325 for (auto *Block : Blocks.reverse()) {
326 Block->computePostDominator();
327 computeNodeSize(Block, &BasicBlock::DominatorNode);
328 }
329 // Compute the sizes of each node in the post-dominator tree and assign IDs in
330 // the dominator tree.
331 for (auto *Block : Blocks) {
332 computeNodeID(Block, &BasicBlock::DominatorNode);
333 computeNodeSize(Block, &BasicBlock::PostDominatorNode);
334 }
335 // Assign IDs in the post-dominator tree.
336 for (auto *Block : Blocks.reverse()) {
337 computeNodeID(Block, &BasicBlock::PostDominatorNode);
338 }
339 }
340
341 } // end namespace til
342 } // end namespace threadSafety
343 } // end namespace clang
344