1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
12 /// in Objective C.
13 ///
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
17 ///
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
20 ///
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
24 //
25 //===----------------------------------------------------------------------===//
26
27 #include "ARCRuntimeEntryPoints.h"
28 #include "BlotMapVector.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARC.h"
31 #include "ProvenanceAnalysis.h"
32 #include "PtrState.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/None.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/Analysis/AliasAnalysis.h"
40 #include "llvm/Analysis/EHPersonalities.h"
41 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
42 #include "llvm/Analysis/ObjCARCAnalysisUtils.h"
43 #include "llvm/Analysis/ObjCARCInstKind.h"
44 #include "llvm/Analysis/ObjCARCUtil.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/CFG.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GlobalVariable.h"
52 #include "llvm/IR/InstIterator.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instruction.h"
55 #include "llvm/IR/Instructions.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/InitializePasses.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/Debug.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Transforms/ObjCARC.h"
70 #include <cassert>
71 #include <iterator>
72 #include <utility>
73
74 using namespace llvm;
75 using namespace llvm::objcarc;
76
77 #define DEBUG_TYPE "objc-arc-opts"
78
79 static cl::opt<unsigned> MaxPtrStates("arc-opt-max-ptr-states",
80 cl::Hidden,
81 cl::desc("Maximum number of ptr states the optimizer keeps track of"),
82 cl::init(4095));
83
84 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
85 /// @{
86
87 /// This is similar to GetRCIdentityRoot but it stops as soon
88 /// as it finds a value with multiple uses.
FindSingleUseIdentifiedObject(const Value * Arg)89 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
90 // ConstantData (like ConstantPointerNull and UndefValue) is used across
91 // modules. It's never a single-use value.
92 if (isa<ConstantData>(Arg))
93 return nullptr;
94
95 if (Arg->hasOneUse()) {
96 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
97 return FindSingleUseIdentifiedObject(BC->getOperand(0));
98 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
99 if (GEP->hasAllZeroIndices())
100 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
101 if (IsForwarding(GetBasicARCInstKind(Arg)))
102 return FindSingleUseIdentifiedObject(
103 cast<CallInst>(Arg)->getArgOperand(0));
104 if (!IsObjCIdentifiedObject(Arg))
105 return nullptr;
106 return Arg;
107 }
108
109 // If we found an identifiable object but it has multiple uses, but they are
110 // trivial uses, we can still consider this to be a single-use value.
111 if (IsObjCIdentifiedObject(Arg)) {
112 for (const User *U : Arg->users())
113 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
114 return nullptr;
115
116 return Arg;
117 }
118
119 return nullptr;
120 }
121
122 /// @}
123 ///
124 /// \defgroup ARCOpt ARC Optimization.
125 /// @{
126
127 // TODO: On code like this:
128 //
129 // objc_retain(%x)
130 // stuff_that_cannot_release()
131 // objc_autorelease(%x)
132 // stuff_that_cannot_release()
133 // objc_retain(%x)
134 // stuff_that_cannot_release()
135 // objc_autorelease(%x)
136 //
137 // The second retain and autorelease can be deleted.
138
139 // TODO: It should be possible to delete
140 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
141 // pairs if nothing is actually autoreleased between them. Also, autorelease
142 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
143 // after inlining) can be turned into plain release calls.
144
145 // TODO: Critical-edge splitting. If the optimial insertion point is
146 // a critical edge, the current algorithm has to fail, because it doesn't
147 // know how to split edges. It should be possible to make the optimizer
148 // think in terms of edges, rather than blocks, and then split critical
149 // edges on demand.
150
151 // TODO: OptimizeSequences could generalized to be Interprocedural.
152
153 // TODO: Recognize that a bunch of other objc runtime calls have
154 // non-escaping arguments and non-releasing arguments, and may be
155 // non-autoreleasing.
156
157 // TODO: Sink autorelease calls as far as possible. Unfortunately we
158 // usually can't sink them past other calls, which would be the main
159 // case where it would be useful.
160
161 // TODO: The pointer returned from objc_loadWeakRetained is retained.
162
163 // TODO: Delete release+retain pairs (rare).
164
165 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
166 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
167 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
168 STATISTIC(NumRets, "Number of return value forwarding "
169 "retain+autoreleases eliminated");
170 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
171 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
172 #ifndef NDEBUG
173 STATISTIC(NumRetainsBeforeOpt,
174 "Number of retains before optimization");
175 STATISTIC(NumReleasesBeforeOpt,
176 "Number of releases before optimization");
177 STATISTIC(NumRetainsAfterOpt,
178 "Number of retains after optimization");
179 STATISTIC(NumReleasesAfterOpt,
180 "Number of releases after optimization");
181 #endif
182
183 namespace {
184
185 /// Per-BasicBlock state.
186 class BBState {
187 /// The number of unique control paths from the entry which can reach this
188 /// block.
189 unsigned TopDownPathCount = 0;
190
191 /// The number of unique control paths to exits from this block.
192 unsigned BottomUpPathCount = 0;
193
194 /// The top-down traversal uses this to record information known about a
195 /// pointer at the bottom of each block.
196 BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
197
198 /// The bottom-up traversal uses this to record information known about a
199 /// pointer at the top of each block.
200 BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
201
202 /// Effective predecessors of the current block ignoring ignorable edges and
203 /// ignored backedges.
204 SmallVector<BasicBlock *, 2> Preds;
205
206 /// Effective successors of the current block ignoring ignorable edges and
207 /// ignored backedges.
208 SmallVector<BasicBlock *, 2> Succs;
209
210 public:
211 static const unsigned OverflowOccurredValue;
212
213 BBState() = default;
214
215 using top_down_ptr_iterator = decltype(PerPtrTopDown)::iterator;
216 using const_top_down_ptr_iterator = decltype(PerPtrTopDown)::const_iterator;
217
top_down_ptr_begin()218 top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
top_down_ptr_end()219 top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
top_down_ptr_begin() const220 const_top_down_ptr_iterator top_down_ptr_begin() const {
221 return PerPtrTopDown.begin();
222 }
top_down_ptr_end() const223 const_top_down_ptr_iterator top_down_ptr_end() const {
224 return PerPtrTopDown.end();
225 }
hasTopDownPtrs() const226 bool hasTopDownPtrs() const {
227 return !PerPtrTopDown.empty();
228 }
229
top_down_ptr_list_size() const230 unsigned top_down_ptr_list_size() const {
231 return std::distance(top_down_ptr_begin(), top_down_ptr_end());
232 }
233
234 using bottom_up_ptr_iterator = decltype(PerPtrBottomUp)::iterator;
235 using const_bottom_up_ptr_iterator =
236 decltype(PerPtrBottomUp)::const_iterator;
237
bottom_up_ptr_begin()238 bottom_up_ptr_iterator bottom_up_ptr_begin() {
239 return PerPtrBottomUp.begin();
240 }
bottom_up_ptr_end()241 bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
bottom_up_ptr_begin() const242 const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
243 return PerPtrBottomUp.begin();
244 }
bottom_up_ptr_end() const245 const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
246 return PerPtrBottomUp.end();
247 }
hasBottomUpPtrs() const248 bool hasBottomUpPtrs() const {
249 return !PerPtrBottomUp.empty();
250 }
251
bottom_up_ptr_list_size() const252 unsigned bottom_up_ptr_list_size() const {
253 return std::distance(bottom_up_ptr_begin(), bottom_up_ptr_end());
254 }
255
256 /// Mark this block as being an entry block, which has one path from the
257 /// entry by definition.
SetAsEntry()258 void SetAsEntry() { TopDownPathCount = 1; }
259
260 /// Mark this block as being an exit block, which has one path to an exit by
261 /// definition.
SetAsExit()262 void SetAsExit() { BottomUpPathCount = 1; }
263
264 /// Attempt to find the PtrState object describing the top down state for
265 /// pointer Arg. Return a new initialized PtrState describing the top down
266 /// state for Arg if we do not find one.
getPtrTopDownState(const Value * Arg)267 TopDownPtrState &getPtrTopDownState(const Value *Arg) {
268 return PerPtrTopDown[Arg];
269 }
270
271 /// Attempt to find the PtrState object describing the bottom up state for
272 /// pointer Arg. Return a new initialized PtrState describing the bottom up
273 /// state for Arg if we do not find one.
getPtrBottomUpState(const Value * Arg)274 BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
275 return PerPtrBottomUp[Arg];
276 }
277
278 /// Attempt to find the PtrState object describing the bottom up state for
279 /// pointer Arg.
findPtrBottomUpState(const Value * Arg)280 bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
281 return PerPtrBottomUp.find(Arg);
282 }
283
clearBottomUpPointers()284 void clearBottomUpPointers() {
285 PerPtrBottomUp.clear();
286 }
287
clearTopDownPointers()288 void clearTopDownPointers() {
289 PerPtrTopDown.clear();
290 }
291
292 void InitFromPred(const BBState &Other);
293 void InitFromSucc(const BBState &Other);
294 void MergePred(const BBState &Other);
295 void MergeSucc(const BBState &Other);
296
297 /// Compute the number of possible unique paths from an entry to an exit
298 /// which pass through this block. This is only valid after both the
299 /// top-down and bottom-up traversals are complete.
300 ///
301 /// Returns true if overflow occurred. Returns false if overflow did not
302 /// occur.
GetAllPathCountWithOverflow(unsigned & PathCount) const303 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
304 if (TopDownPathCount == OverflowOccurredValue ||
305 BottomUpPathCount == OverflowOccurredValue)
306 return true;
307 unsigned long long Product =
308 (unsigned long long)TopDownPathCount*BottomUpPathCount;
309 // Overflow occurred if any of the upper bits of Product are set or if all
310 // the lower bits of Product are all set.
311 return (Product >> 32) ||
312 ((PathCount = Product) == OverflowOccurredValue);
313 }
314
315 // Specialized CFG utilities.
316 using edge_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
317
pred_begin() const318 edge_iterator pred_begin() const { return Preds.begin(); }
pred_end() const319 edge_iterator pred_end() const { return Preds.end(); }
succ_begin() const320 edge_iterator succ_begin() const { return Succs.begin(); }
succ_end() const321 edge_iterator succ_end() const { return Succs.end(); }
322
addSucc(BasicBlock * Succ)323 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
addPred(BasicBlock * Pred)324 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
325
isExit() const326 bool isExit() const { return Succs.empty(); }
327 };
328
329 } // end anonymous namespace
330
331 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
332
333 namespace llvm {
334
335 raw_ostream &operator<<(raw_ostream &OS,
336 BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
337
338 } // end namespace llvm
339
InitFromPred(const BBState & Other)340 void BBState::InitFromPred(const BBState &Other) {
341 PerPtrTopDown = Other.PerPtrTopDown;
342 TopDownPathCount = Other.TopDownPathCount;
343 }
344
InitFromSucc(const BBState & Other)345 void BBState::InitFromSucc(const BBState &Other) {
346 PerPtrBottomUp = Other.PerPtrBottomUp;
347 BottomUpPathCount = Other.BottomUpPathCount;
348 }
349
350 /// The top-down traversal uses this to merge information about predecessors to
351 /// form the initial state for a new block.
MergePred(const BBState & Other)352 void BBState::MergePred(const BBState &Other) {
353 if (TopDownPathCount == OverflowOccurredValue)
354 return;
355
356 // Other.TopDownPathCount can be 0, in which case it is either dead or a
357 // loop backedge. Loop backedges are special.
358 TopDownPathCount += Other.TopDownPathCount;
359
360 // In order to be consistent, we clear the top down pointers when by adding
361 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
362 // has not occurred.
363 if (TopDownPathCount == OverflowOccurredValue) {
364 clearTopDownPointers();
365 return;
366 }
367
368 // Check for overflow. If we have overflow, fall back to conservative
369 // behavior.
370 if (TopDownPathCount < Other.TopDownPathCount) {
371 TopDownPathCount = OverflowOccurredValue;
372 clearTopDownPointers();
373 return;
374 }
375
376 // For each entry in the other set, if our set has an entry with the same key,
377 // merge the entries. Otherwise, copy the entry and merge it with an empty
378 // entry.
379 for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
380 MI != ME; ++MI) {
381 auto Pair = PerPtrTopDown.insert(*MI);
382 Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
383 /*TopDown=*/true);
384 }
385
386 // For each entry in our set, if the other set doesn't have an entry with the
387 // same key, force it to merge with an empty entry.
388 for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
389 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
390 MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
391 }
392
393 /// The bottom-up traversal uses this to merge information about successors to
394 /// form the initial state for a new block.
MergeSucc(const BBState & Other)395 void BBState::MergeSucc(const BBState &Other) {
396 if (BottomUpPathCount == OverflowOccurredValue)
397 return;
398
399 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
400 // loop backedge. Loop backedges are special.
401 BottomUpPathCount += Other.BottomUpPathCount;
402
403 // In order to be consistent, we clear the top down pointers when by adding
404 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
405 // has not occurred.
406 if (BottomUpPathCount == OverflowOccurredValue) {
407 clearBottomUpPointers();
408 return;
409 }
410
411 // Check for overflow. If we have overflow, fall back to conservative
412 // behavior.
413 if (BottomUpPathCount < Other.BottomUpPathCount) {
414 BottomUpPathCount = OverflowOccurredValue;
415 clearBottomUpPointers();
416 return;
417 }
418
419 // For each entry in the other set, if our set has an entry with the
420 // same key, merge the entries. Otherwise, copy the entry and merge
421 // it with an empty entry.
422 for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
423 MI != ME; ++MI) {
424 auto Pair = PerPtrBottomUp.insert(*MI);
425 Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
426 /*TopDown=*/false);
427 }
428
429 // For each entry in our set, if the other set doesn't have an entry
430 // with the same key, force it to merge with an empty entry.
431 for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
432 ++MI)
433 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
434 MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
435 }
436
operator <<(raw_ostream & OS,BBState & BBInfo)437 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
438 // Dump the pointers we are tracking.
439 OS << " TopDown State:\n";
440 if (!BBInfo.hasTopDownPtrs()) {
441 LLVM_DEBUG(dbgs() << " NONE!\n");
442 } else {
443 for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
444 I != E; ++I) {
445 const PtrState &P = I->second;
446 OS << " Ptr: " << *I->first
447 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
448 << "\n ImpreciseRelease: "
449 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
450 << " HasCFGHazards: "
451 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
452 << " KnownPositive: "
453 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
454 << " Seq: "
455 << P.GetSeq() << "\n";
456 }
457 }
458
459 OS << " BottomUp State:\n";
460 if (!BBInfo.hasBottomUpPtrs()) {
461 LLVM_DEBUG(dbgs() << " NONE!\n");
462 } else {
463 for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
464 I != E; ++I) {
465 const PtrState &P = I->second;
466 OS << " Ptr: " << *I->first
467 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
468 << "\n ImpreciseRelease: "
469 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
470 << " HasCFGHazards: "
471 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
472 << " KnownPositive: "
473 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
474 << " Seq: "
475 << P.GetSeq() << "\n";
476 }
477 }
478
479 return OS;
480 }
481
482 namespace {
483
484 /// The main ARC optimization pass.
485 class ObjCARCOpt {
486 bool Changed;
487 bool CFGChanged;
488 ProvenanceAnalysis PA;
489
490 /// A cache of references to runtime entry point constants.
491 ARCRuntimeEntryPoints EP;
492
493 /// A cache of MDKinds that can be passed into other functions to propagate
494 /// MDKind identifiers.
495 ARCMDKindCache MDKindCache;
496
497 BundledRetainClaimRVs *BundledInsts = nullptr;
498
499 /// A flag indicating whether the optimization that removes or moves
500 /// retain/release pairs should be performed.
501 bool DisableRetainReleasePairing = false;
502
503 /// Flags which determine whether each of the interesting runtime functions
504 /// is in fact used in the current function.
505 unsigned UsedInThisFunction;
506
507 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
508 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
509 ARCInstKind &Class);
510 void OptimizeIndividualCalls(Function &F);
511
512 /// Optimize an individual call, optionally passing the
513 /// GetArgRCIdentityRoot if it has already been computed.
514 void OptimizeIndividualCallImpl(
515 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
516 Instruction *Inst, ARCInstKind Class, const Value *Arg);
517
518 /// Try to optimize an AutoreleaseRV with a RetainRV or ClaimRV. If the
519 /// optimization occurs, returns true to indicate that the caller should
520 /// assume the instructions are dead.
521 bool OptimizeInlinedAutoreleaseRVCall(
522 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
523 Instruction *Inst, const Value *&Arg, ARCInstKind Class,
524 Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg);
525
526 void CheckForCFGHazards(const BasicBlock *BB,
527 DenseMap<const BasicBlock *, BBState> &BBStates,
528 BBState &MyStates) const;
529 bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
530 BlotMapVector<Value *, RRInfo> &Retains,
531 BBState &MyStates);
532 bool VisitBottomUp(BasicBlock *BB,
533 DenseMap<const BasicBlock *, BBState> &BBStates,
534 BlotMapVector<Value *, RRInfo> &Retains);
535 bool VisitInstructionTopDown(
536 Instruction *Inst, DenseMap<Value *, RRInfo> &Releases, BBState &MyStates,
537 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
538 &ReleaseInsertPtToRCIdentityRoots);
539 bool VisitTopDown(
540 BasicBlock *BB, DenseMap<const BasicBlock *, BBState> &BBStates,
541 DenseMap<Value *, RRInfo> &Releases,
542 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
543 &ReleaseInsertPtToRCIdentityRoots);
544 bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
545 BlotMapVector<Value *, RRInfo> &Retains,
546 DenseMap<Value *, RRInfo> &Releases);
547
548 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
549 BlotMapVector<Value *, RRInfo> &Retains,
550 DenseMap<Value *, RRInfo> &Releases,
551 SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
552
553 bool PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
554 BlotMapVector<Value *, RRInfo> &Retains,
555 DenseMap<Value *, RRInfo> &Releases, Module *M,
556 Instruction *Retain,
557 SmallVectorImpl<Instruction *> &DeadInsts,
558 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
559 Value *Arg, bool KnownSafe,
560 bool &AnyPairsCompletelyEliminated);
561
562 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
563 BlotMapVector<Value *, RRInfo> &Retains,
564 DenseMap<Value *, RRInfo> &Releases, Module *M);
565
566 void OptimizeWeakCalls(Function &F);
567
568 bool OptimizeSequences(Function &F);
569
570 void OptimizeReturns(Function &F);
571
572 #ifndef NDEBUG
573 void GatherStatistics(Function &F, bool AfterOptimization = false);
574 #endif
575
576 public:
577 void init(Module &M);
578 bool run(Function &F, AAResults &AA);
579 void releaseMemory();
hasCFGChanged() const580 bool hasCFGChanged() const { return CFGChanged; }
581 };
582
583 /// The main ARC optimization pass.
584 class ObjCARCOptLegacyPass : public FunctionPass {
585 public:
ObjCARCOptLegacyPass()586 ObjCARCOptLegacyPass() : FunctionPass(ID) {
587 initializeObjCARCOptLegacyPassPass(*PassRegistry::getPassRegistry());
588 }
589 void getAnalysisUsage(AnalysisUsage &AU) const override;
doInitialization(Module & M)590 bool doInitialization(Module &M) override {
591 OCAO.init(M);
592 return false;
593 }
runOnFunction(Function & F)594 bool runOnFunction(Function &F) override {
595 return OCAO.run(F, getAnalysis<AAResultsWrapperPass>().getAAResults());
596 }
releaseMemory()597 void releaseMemory() override { OCAO.releaseMemory(); }
598 static char ID;
599
600 private:
601 ObjCARCOpt OCAO;
602 };
603 } // end anonymous namespace
604
605 char ObjCARCOptLegacyPass::ID = 0;
606
607 INITIALIZE_PASS_BEGIN(ObjCARCOptLegacyPass, "objc-arc", "ObjC ARC optimization",
608 false, false)
INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)609 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
610 INITIALIZE_PASS_END(ObjCARCOptLegacyPass, "objc-arc", "ObjC ARC optimization",
611 false, false)
612
613 Pass *llvm::createObjCARCOptPass() { return new ObjCARCOptLegacyPass(); }
614
getAnalysisUsage(AnalysisUsage & AU) const615 void ObjCARCOptLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
616 AU.addRequired<ObjCARCAAWrapperPass>();
617 AU.addRequired<AAResultsWrapperPass>();
618 }
619
620 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
621 /// not a return value.
622 bool
OptimizeRetainRVCall(Function & F,Instruction * RetainRV)623 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
624 // Check for the argument being from an immediately preceding call or invoke.
625 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
626 if (const Instruction *Call = dyn_cast<CallBase>(Arg)) {
627 if (Call->getParent() == RetainRV->getParent()) {
628 BasicBlock::const_iterator I(Call);
629 ++I;
630 while (IsNoopInstruction(&*I))
631 ++I;
632 if (&*I == RetainRV)
633 return false;
634 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
635 BasicBlock *RetainRVParent = RetainRV->getParent();
636 if (II->getNormalDest() == RetainRVParent) {
637 BasicBlock::const_iterator I = RetainRVParent->begin();
638 while (IsNoopInstruction(&*I))
639 ++I;
640 if (&*I == RetainRV)
641 return false;
642 }
643 }
644 }
645
646 assert(!BundledInsts->contains(RetainRV) &&
647 "a bundled retainRV's argument should be a call");
648
649 // Turn it to a plain objc_retain.
650 Changed = true;
651 ++NumPeeps;
652
653 LLVM_DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
654 "objc_retain since the operand is not a return value.\n"
655 "Old = "
656 << *RetainRV << "\n");
657
658 Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
659 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
660
661 LLVM_DEBUG(dbgs() << "New = " << *RetainRV << "\n");
662
663 return false;
664 }
665
OptimizeInlinedAutoreleaseRVCall(Function & F,DenseMap<BasicBlock *,ColorVector> & BlockColors,Instruction * Inst,const Value * & Arg,ARCInstKind Class,Instruction * AutoreleaseRV,const Value * & AutoreleaseRVArg)666 bool ObjCARCOpt::OptimizeInlinedAutoreleaseRVCall(
667 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
668 Instruction *Inst, const Value *&Arg, ARCInstKind Class,
669 Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg) {
670 if (BundledInsts->contains(Inst))
671 return false;
672
673 // Must be in the same basic block.
674 assert(Inst->getParent() == AutoreleaseRV->getParent());
675
676 // Must operate on the same root.
677 Arg = GetArgRCIdentityRoot(Inst);
678 AutoreleaseRVArg = GetArgRCIdentityRoot(AutoreleaseRV);
679 if (Arg != AutoreleaseRVArg) {
680 // If there isn't an exact match, check if we have equivalent PHIs.
681 const PHINode *PN = dyn_cast<PHINode>(Arg);
682 if (!PN)
683 return false;
684
685 SmallVector<const Value *, 4> ArgUsers;
686 getEquivalentPHIs(*PN, ArgUsers);
687 if (!llvm::is_contained(ArgUsers, AutoreleaseRVArg))
688 return false;
689 }
690
691 // Okay, this is a match. Merge them.
692 ++NumPeeps;
693 LLVM_DEBUG(dbgs() << "Found inlined objc_autoreleaseReturnValue '"
694 << *AutoreleaseRV << "' paired with '" << *Inst << "'\n");
695
696 // Delete the RV pair, starting with the AutoreleaseRV.
697 AutoreleaseRV->replaceAllUsesWith(
698 cast<CallInst>(AutoreleaseRV)->getArgOperand(0));
699 Changed = true;
700 EraseInstruction(AutoreleaseRV);
701 if (Class == ARCInstKind::RetainRV) {
702 // AutoreleaseRV and RetainRV cancel out. Delete the RetainRV.
703 Inst->replaceAllUsesWith(cast<CallInst>(Inst)->getArgOperand(0));
704 EraseInstruction(Inst);
705 return true;
706 }
707
708 // ClaimRV is a frontend peephole for RetainRV + Release. Since the
709 // AutoreleaseRV and RetainRV cancel out, replace the ClaimRV with a Release.
710 assert(Class == ARCInstKind::ClaimRV);
711 Value *CallArg = cast<CallInst>(Inst)->getArgOperand(0);
712 CallInst *Release = CallInst::Create(
713 EP.get(ARCRuntimeEntryPointKind::Release), CallArg, "", Inst);
714 assert(IsAlwaysTail(ARCInstKind::ClaimRV) &&
715 "Expected ClaimRV to be safe to tail call");
716 Release->setTailCall();
717 Inst->replaceAllUsesWith(CallArg);
718 EraseInstruction(Inst);
719
720 // Run the normal optimizations on Release.
721 OptimizeIndividualCallImpl(F, BlockColors, Release, ARCInstKind::Release,
722 Arg);
723 return true;
724 }
725
726 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
727 /// used as a return value.
OptimizeAutoreleaseRVCall(Function & F,Instruction * AutoreleaseRV,ARCInstKind & Class)728 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
729 Instruction *AutoreleaseRV,
730 ARCInstKind &Class) {
731 // Check for a return of the pointer value.
732 const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
733
734 // If the argument is ConstantPointerNull or UndefValue, its other users
735 // aren't actually interesting to look at.
736 if (isa<ConstantData>(Ptr))
737 return;
738
739 SmallVector<const Value *, 2> Users;
740 Users.push_back(Ptr);
741
742 // Add PHIs that are equivalent to Ptr to Users.
743 if (const PHINode *PN = dyn_cast<PHINode>(Ptr))
744 getEquivalentPHIs(*PN, Users);
745
746 do {
747 Ptr = Users.pop_back_val();
748 for (const User *U : Ptr->users()) {
749 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
750 return;
751 if (isa<BitCastInst>(U))
752 Users.push_back(U);
753 }
754 } while (!Users.empty());
755
756 Changed = true;
757 ++NumPeeps;
758
759 LLVM_DEBUG(
760 dbgs() << "Transforming objc_autoreleaseReturnValue => "
761 "objc_autorelease since its operand is not used as a return "
762 "value.\n"
763 "Old = "
764 << *AutoreleaseRV << "\n");
765
766 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
767 Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
768 AutoreleaseRVCI->setCalledFunction(NewDecl);
769 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
770 Class = ARCInstKind::Autorelease;
771
772 LLVM_DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
773 }
774
775 namespace {
776 Instruction *
CloneCallInstForBB(CallInst & CI,BasicBlock & BB,const DenseMap<BasicBlock *,ColorVector> & BlockColors)777 CloneCallInstForBB(CallInst &CI, BasicBlock &BB,
778 const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
779 SmallVector<OperandBundleDef, 1> OpBundles;
780 for (unsigned I = 0, E = CI.getNumOperandBundles(); I != E; ++I) {
781 auto Bundle = CI.getOperandBundleAt(I);
782 // Funclets will be reassociated in the future.
783 if (Bundle.getTagID() == LLVMContext::OB_funclet)
784 continue;
785 OpBundles.emplace_back(Bundle);
786 }
787
788 if (!BlockColors.empty()) {
789 const ColorVector &CV = BlockColors.find(&BB)->second;
790 assert(CV.size() == 1 && "non-unique color for block!");
791 Instruction *EHPad = CV.front()->getFirstNonPHI();
792 if (EHPad->isEHPad())
793 OpBundles.emplace_back("funclet", EHPad);
794 }
795
796 return CallInst::Create(&CI, OpBundles);
797 }
798 }
799
800 /// Visit each call, one at a time, and make simplifications without doing any
801 /// additional analysis.
OptimizeIndividualCalls(Function & F)802 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
803 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
804 // Reset all the flags in preparation for recomputing them.
805 UsedInThisFunction = 0;
806
807 DenseMap<BasicBlock *, ColorVector> BlockColors;
808 if (F.hasPersonalityFn() &&
809 isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
810 BlockColors = colorEHFunclets(F);
811
812 // Store any delayed AutoreleaseRV intrinsics, so they can be easily paired
813 // with RetainRV and ClaimRV.
814 Instruction *DelayedAutoreleaseRV = nullptr;
815 const Value *DelayedAutoreleaseRVArg = nullptr;
816 auto setDelayedAutoreleaseRV = [&](Instruction *AutoreleaseRV) {
817 assert(!DelayedAutoreleaseRV || !AutoreleaseRV);
818 DelayedAutoreleaseRV = AutoreleaseRV;
819 DelayedAutoreleaseRVArg = nullptr;
820 };
821 auto optimizeDelayedAutoreleaseRV = [&]() {
822 if (!DelayedAutoreleaseRV)
823 return;
824 OptimizeIndividualCallImpl(F, BlockColors, DelayedAutoreleaseRV,
825 ARCInstKind::AutoreleaseRV,
826 DelayedAutoreleaseRVArg);
827 setDelayedAutoreleaseRV(nullptr);
828 };
829 auto shouldDelayAutoreleaseRV = [&](Instruction *NonARCInst) {
830 // Nothing to delay, but we may as well skip the logic below.
831 if (!DelayedAutoreleaseRV)
832 return true;
833
834 // If we hit the end of the basic block we're not going to find an RV-pair.
835 // Stop delaying.
836 if (NonARCInst->isTerminator())
837 return false;
838
839 // Given the frontend rules for emitting AutoreleaseRV, RetainRV, and
840 // ClaimRV, it's probably safe to skip over even opaque function calls
841 // here since OptimizeInlinedAutoreleaseRVCall will confirm that they
842 // have the same RCIdentityRoot. However, what really matters is
843 // skipping instructions or intrinsics that the inliner could leave behind;
844 // be conservative for now and don't skip over opaque calls, which could
845 // potentially include other ARC calls.
846 auto *CB = dyn_cast<CallBase>(NonARCInst);
847 if (!CB)
848 return true;
849 return CB->getIntrinsicID() != Intrinsic::not_intrinsic;
850 };
851
852 // Visit all objc_* calls in F.
853 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
854 Instruction *Inst = &*I++;
855
856 if (auto *CI = dyn_cast<CallInst>(Inst))
857 if (objcarc::hasAttachedCallOpBundle(CI)) {
858 BundledInsts->insertRVCall(&*I, CI);
859 Changed = true;
860 }
861
862 ARCInstKind Class = GetBasicARCInstKind(Inst);
863
864 // Skip this loop if this instruction isn't itself an ARC intrinsic.
865 const Value *Arg = nullptr;
866 switch (Class) {
867 default:
868 optimizeDelayedAutoreleaseRV();
869 break;
870 case ARCInstKind::CallOrUser:
871 case ARCInstKind::User:
872 case ARCInstKind::None:
873 // This is a non-ARC instruction. If we're delaying an AutoreleaseRV,
874 // check if it's safe to skip over it; if not, optimize the AutoreleaseRV
875 // now.
876 if (!shouldDelayAutoreleaseRV(Inst))
877 optimizeDelayedAutoreleaseRV();
878 continue;
879 case ARCInstKind::AutoreleaseRV:
880 optimizeDelayedAutoreleaseRV();
881 setDelayedAutoreleaseRV(Inst);
882 continue;
883 case ARCInstKind::RetainRV:
884 case ARCInstKind::ClaimRV:
885 if (DelayedAutoreleaseRV) {
886 // We have a potential RV pair. Check if they cancel out.
887 if (OptimizeInlinedAutoreleaseRVCall(F, BlockColors, Inst, Arg, Class,
888 DelayedAutoreleaseRV,
889 DelayedAutoreleaseRVArg)) {
890 setDelayedAutoreleaseRV(nullptr);
891 continue;
892 }
893 optimizeDelayedAutoreleaseRV();
894 }
895 break;
896 }
897
898 OptimizeIndividualCallImpl(F, BlockColors, Inst, Class, Arg);
899 }
900
901 // Catch the final delayed AutoreleaseRV.
902 optimizeDelayedAutoreleaseRV();
903 }
904
905 /// This function returns true if the value is inert. An ObjC ARC runtime call
906 /// taking an inert operand can be safely deleted.
isInertARCValue(Value * V,SmallPtrSet<Value *,1> & VisitedPhis)907 static bool isInertARCValue(Value *V, SmallPtrSet<Value *, 1> &VisitedPhis) {
908 V = V->stripPointerCasts();
909
910 if (IsNullOrUndef(V))
911 return true;
912
913 // See if this is a global attribute annotated with an 'objc_arc_inert'.
914 if (auto *GV = dyn_cast<GlobalVariable>(V))
915 if (GV->hasAttribute("objc_arc_inert"))
916 return true;
917
918 if (auto PN = dyn_cast<PHINode>(V)) {
919 // Ignore this phi if it has already been discovered.
920 if (!VisitedPhis.insert(PN).second)
921 return true;
922 // Look through phis's operands.
923 for (Value *Opnd : PN->incoming_values())
924 if (!isInertARCValue(Opnd, VisitedPhis))
925 return false;
926 return true;
927 }
928
929 return false;
930 }
931
OptimizeIndividualCallImpl(Function & F,DenseMap<BasicBlock *,ColorVector> & BlockColors,Instruction * Inst,ARCInstKind Class,const Value * Arg)932 void ObjCARCOpt::OptimizeIndividualCallImpl(
933 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
934 Instruction *Inst, ARCInstKind Class, const Value *Arg) {
935 LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
936
937 // We can delete this call if it takes an inert value.
938 SmallPtrSet<Value *, 1> VisitedPhis;
939
940 if (BundledInsts->contains(Inst)) {
941 UsedInThisFunction |= 1 << unsigned(Class);
942 return;
943 }
944
945 if (IsNoopOnGlobal(Class))
946 if (isInertARCValue(Inst->getOperand(0), VisitedPhis)) {
947 if (!Inst->getType()->isVoidTy())
948 Inst->replaceAllUsesWith(Inst->getOperand(0));
949 Inst->eraseFromParent();
950 Changed = true;
951 return;
952 }
953
954 switch (Class) {
955 default:
956 break;
957
958 // Delete no-op casts. These function calls have special semantics, but
959 // the semantics are entirely implemented via lowering in the front-end,
960 // so by the time they reach the optimizer, they are just no-op calls
961 // which return their argument.
962 //
963 // There are gray areas here, as the ability to cast reference-counted
964 // pointers to raw void* and back allows code to break ARC assumptions,
965 // however these are currently considered to be unimportant.
966 case ARCInstKind::NoopCast:
967 Changed = true;
968 ++NumNoops;
969 LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
970 EraseInstruction(Inst);
971 return;
972
973 // If the pointer-to-weak-pointer is null, it's undefined behavior.
974 case ARCInstKind::StoreWeak:
975 case ARCInstKind::LoadWeak:
976 case ARCInstKind::LoadWeakRetained:
977 case ARCInstKind::InitWeak:
978 case ARCInstKind::DestroyWeak: {
979 CallInst *CI = cast<CallInst>(Inst);
980 if (IsNullOrUndef(CI->getArgOperand(0))) {
981 Changed = true;
982 Type *Ty = CI->getArgOperand(0)->getType();
983 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
984 Constant::getNullValue(Ty), CI);
985 Value *NewValue = UndefValue::get(CI->getType());
986 LLVM_DEBUG(
987 dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
988 "\nOld = "
989 << *CI << "\nNew = " << *NewValue << "\n");
990 CI->replaceAllUsesWith(NewValue);
991 CI->eraseFromParent();
992 return;
993 }
994 break;
995 }
996 case ARCInstKind::CopyWeak:
997 case ARCInstKind::MoveWeak: {
998 CallInst *CI = cast<CallInst>(Inst);
999 if (IsNullOrUndef(CI->getArgOperand(0)) ||
1000 IsNullOrUndef(CI->getArgOperand(1))) {
1001 Changed = true;
1002 Type *Ty = CI->getArgOperand(0)->getType();
1003 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1004 Constant::getNullValue(Ty), CI);
1005
1006 Value *NewValue = UndefValue::get(CI->getType());
1007 LLVM_DEBUG(
1008 dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1009 "\nOld = "
1010 << *CI << "\nNew = " << *NewValue << "\n");
1011
1012 CI->replaceAllUsesWith(NewValue);
1013 CI->eraseFromParent();
1014 return;
1015 }
1016 break;
1017 }
1018 case ARCInstKind::RetainRV:
1019 if (OptimizeRetainRVCall(F, Inst))
1020 return;
1021 break;
1022 case ARCInstKind::AutoreleaseRV:
1023 OptimizeAutoreleaseRVCall(F, Inst, Class);
1024 break;
1025 }
1026
1027 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1028 if (IsAutorelease(Class) && Inst->use_empty()) {
1029 CallInst *Call = cast<CallInst>(Inst);
1030 const Value *Arg = Call->getArgOperand(0);
1031 Arg = FindSingleUseIdentifiedObject(Arg);
1032 if (Arg) {
1033 Changed = true;
1034 ++NumAutoreleases;
1035
1036 // Create the declaration lazily.
1037 LLVMContext &C = Inst->getContext();
1038
1039 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1040 CallInst *NewCall =
1041 CallInst::Create(Decl, Call->getArgOperand(0), "", Call);
1042 NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
1043 MDNode::get(C, None));
1044
1045 LLVM_DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1046 "since x is otherwise unused.\nOld: "
1047 << *Call << "\nNew: " << *NewCall << "\n");
1048
1049 EraseInstruction(Call);
1050 Inst = NewCall;
1051 Class = ARCInstKind::Release;
1052 }
1053 }
1054
1055 // For functions which can never be passed stack arguments, add
1056 // a tail keyword.
1057 if (IsAlwaysTail(Class) && !cast<CallInst>(Inst)->isNoTailCall()) {
1058 Changed = true;
1059 LLVM_DEBUG(
1060 dbgs() << "Adding tail keyword to function since it can never be "
1061 "passed stack args: "
1062 << *Inst << "\n");
1063 cast<CallInst>(Inst)->setTailCall();
1064 }
1065
1066 // Ensure that functions that can never have a "tail" keyword due to the
1067 // semantics of ARC truly do not do so.
1068 if (IsNeverTail(Class)) {
1069 Changed = true;
1070 LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
1071 << "\n");
1072 cast<CallInst>(Inst)->setTailCall(false);
1073 }
1074
1075 // Set nounwind as needed.
1076 if (IsNoThrow(Class)) {
1077 Changed = true;
1078 LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1079 << "\n");
1080 cast<CallInst>(Inst)->setDoesNotThrow();
1081 }
1082
1083 // Note: This catches instructions unrelated to ARC.
1084 if (!IsNoopOnNull(Class)) {
1085 UsedInThisFunction |= 1 << unsigned(Class);
1086 return;
1087 }
1088
1089 // If we haven't already looked up the root, look it up now.
1090 if (!Arg)
1091 Arg = GetArgRCIdentityRoot(Inst);
1092
1093 // ARC calls with null are no-ops. Delete them.
1094 if (IsNullOrUndef(Arg)) {
1095 Changed = true;
1096 ++NumNoops;
1097 LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1098 << "\n");
1099 EraseInstruction(Inst);
1100 return;
1101 }
1102
1103 // Keep track of which of retain, release, autorelease, and retain_block
1104 // are actually present in this function.
1105 UsedInThisFunction |= 1 << unsigned(Class);
1106
1107 // If Arg is a PHI, and one or more incoming values to the
1108 // PHI are null, and the call is control-equivalent to the PHI, and there
1109 // are no relevant side effects between the PHI and the call, and the call
1110 // is not a release that doesn't have the clang.imprecise_release tag, the
1111 // call could be pushed up to just those paths with non-null incoming
1112 // values. For now, don't bother splitting critical edges for this.
1113 if (Class == ARCInstKind::Release &&
1114 !Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
1115 return;
1116
1117 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1118 Worklist.push_back(std::make_pair(Inst, Arg));
1119 do {
1120 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1121 Inst = Pair.first;
1122 Arg = Pair.second;
1123
1124 const PHINode *PN = dyn_cast<PHINode>(Arg);
1125 if (!PN)
1126 continue;
1127
1128 // Determine if the PHI has any null operands, or any incoming
1129 // critical edges.
1130 bool HasNull = false;
1131 bool HasCriticalEdges = false;
1132 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1133 Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1134 if (IsNullOrUndef(Incoming))
1135 HasNull = true;
1136 else if (PN->getIncomingBlock(i)->getTerminator()->getNumSuccessors() !=
1137 1) {
1138 HasCriticalEdges = true;
1139 break;
1140 }
1141 }
1142 // If we have null operands and no critical edges, optimize.
1143 if (HasCriticalEdges)
1144 continue;
1145 if (!HasNull)
1146 continue;
1147
1148 Instruction *DepInst = nullptr;
1149
1150 // Check that there is nothing that cares about the reference
1151 // count between the call and the phi.
1152 switch (Class) {
1153 case ARCInstKind::Retain:
1154 case ARCInstKind::RetainBlock:
1155 // These can always be moved up.
1156 break;
1157 case ARCInstKind::Release:
1158 // These can't be moved across things that care about the retain
1159 // count.
1160 DepInst = findSingleDependency(NeedsPositiveRetainCount, Arg,
1161 Inst->getParent(), Inst, PA);
1162 break;
1163 case ARCInstKind::Autorelease:
1164 // These can't be moved across autorelease pool scope boundaries.
1165 DepInst = findSingleDependency(AutoreleasePoolBoundary, Arg,
1166 Inst->getParent(), Inst, PA);
1167 break;
1168 case ARCInstKind::ClaimRV:
1169 case ARCInstKind::RetainRV:
1170 case ARCInstKind::AutoreleaseRV:
1171 // Don't move these; the RV optimization depends on the autoreleaseRV
1172 // being tail called, and the retainRV being immediately after a call
1173 // (which might still happen if we get lucky with codegen layout, but
1174 // it's not worth taking the chance).
1175 continue;
1176 default:
1177 llvm_unreachable("Invalid dependence flavor");
1178 }
1179
1180 if (DepInst != PN)
1181 continue;
1182
1183 Changed = true;
1184 ++NumPartialNoops;
1185 // Clone the call into each predecessor that has a non-null value.
1186 CallInst *CInst = cast<CallInst>(Inst);
1187 Type *ParamTy = CInst->getArgOperand(0)->getType();
1188 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1189 Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1190 if (IsNullOrUndef(Incoming))
1191 continue;
1192 Value *Op = PN->getIncomingValue(i);
1193 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1194 CallInst *Clone = cast<CallInst>(
1195 CloneCallInstForBB(*CInst, *InsertPos->getParent(), BlockColors));
1196 if (Op->getType() != ParamTy)
1197 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1198 Clone->setArgOperand(0, Op);
1199 Clone->insertBefore(InsertPos);
1200
1201 LLVM_DEBUG(dbgs() << "Cloning " << *CInst << "\n"
1202 "And inserting clone at "
1203 << *InsertPos << "\n");
1204 Worklist.push_back(std::make_pair(Clone, Incoming));
1205 }
1206 // Erase the original call.
1207 LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1208 EraseInstruction(CInst);
1209 } while (!Worklist.empty());
1210 }
1211
1212 /// If we have a top down pointer in the S_Use state, make sure that there are
1213 /// no CFG hazards by checking the states of various bottom up pointers.
CheckForUseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe,bool & ShouldContinue)1214 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1215 const bool SuccSRRIKnownSafe,
1216 TopDownPtrState &S,
1217 bool &SomeSuccHasSame,
1218 bool &AllSuccsHaveSame,
1219 bool &NotAllSeqEqualButKnownSafe,
1220 bool &ShouldContinue) {
1221 switch (SuccSSeq) {
1222 case S_CanRelease: {
1223 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1224 S.ClearSequenceProgress();
1225 break;
1226 }
1227 S.SetCFGHazardAfflicted(true);
1228 ShouldContinue = true;
1229 break;
1230 }
1231 case S_Use:
1232 SomeSuccHasSame = true;
1233 break;
1234 case S_Stop:
1235 case S_MovableRelease:
1236 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1237 AllSuccsHaveSame = false;
1238 else
1239 NotAllSeqEqualButKnownSafe = true;
1240 break;
1241 case S_Retain:
1242 llvm_unreachable("bottom-up pointer in retain state!");
1243 case S_None:
1244 llvm_unreachable("This should have been handled earlier.");
1245 }
1246 }
1247
1248 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1249 /// there are no CFG hazards by checking the states of various bottom up
1250 /// pointers.
CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe)1251 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1252 const bool SuccSRRIKnownSafe,
1253 TopDownPtrState &S,
1254 bool &SomeSuccHasSame,
1255 bool &AllSuccsHaveSame,
1256 bool &NotAllSeqEqualButKnownSafe) {
1257 switch (SuccSSeq) {
1258 case S_CanRelease:
1259 SomeSuccHasSame = true;
1260 break;
1261 case S_Stop:
1262 case S_MovableRelease:
1263 case S_Use:
1264 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1265 AllSuccsHaveSame = false;
1266 else
1267 NotAllSeqEqualButKnownSafe = true;
1268 break;
1269 case S_Retain:
1270 llvm_unreachable("bottom-up pointer in retain state!");
1271 case S_None:
1272 llvm_unreachable("This should have been handled earlier.");
1273 }
1274 }
1275
1276 /// Check for critical edges, loop boundaries, irreducible control flow, or
1277 /// other CFG structures where moving code across the edge would result in it
1278 /// being executed more.
1279 void
CheckForCFGHazards(const BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BBState & MyStates) const1280 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1281 DenseMap<const BasicBlock *, BBState> &BBStates,
1282 BBState &MyStates) const {
1283 // If any top-down local-use or possible-dec has a succ which is earlier in
1284 // the sequence, forget it.
1285 for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1286 I != E; ++I) {
1287 TopDownPtrState &S = I->second;
1288 const Sequence Seq = I->second.GetSeq();
1289
1290 // We only care about S_Retain, S_CanRelease, and S_Use.
1291 if (Seq == S_None)
1292 continue;
1293
1294 // Make sure that if extra top down states are added in the future that this
1295 // code is updated to handle it.
1296 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1297 "Unknown top down sequence state.");
1298
1299 const Value *Arg = I->first;
1300 bool SomeSuccHasSame = false;
1301 bool AllSuccsHaveSame = true;
1302 bool NotAllSeqEqualButKnownSafe = false;
1303
1304 for (const BasicBlock *Succ : successors(BB)) {
1305 // If VisitBottomUp has pointer information for this successor, take
1306 // what we know about it.
1307 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1308 BBStates.find(Succ);
1309 assert(BBI != BBStates.end());
1310 const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1311 const Sequence SuccSSeq = SuccS.GetSeq();
1312
1313 // If bottom up, the pointer is in an S_None state, clear the sequence
1314 // progress since the sequence in the bottom up state finished
1315 // suggesting a mismatch in between retains/releases. This is true for
1316 // all three cases that we are handling here: S_Retain, S_Use, and
1317 // S_CanRelease.
1318 if (SuccSSeq == S_None) {
1319 S.ClearSequenceProgress();
1320 continue;
1321 }
1322
1323 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1324 // checks.
1325 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1326
1327 // *NOTE* We do not use Seq from above here since we are allowing for
1328 // S.GetSeq() to change while we are visiting basic blocks.
1329 switch(S.GetSeq()) {
1330 case S_Use: {
1331 bool ShouldContinue = false;
1332 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1333 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1334 ShouldContinue);
1335 if (ShouldContinue)
1336 continue;
1337 break;
1338 }
1339 case S_CanRelease:
1340 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1341 SomeSuccHasSame, AllSuccsHaveSame,
1342 NotAllSeqEqualButKnownSafe);
1343 break;
1344 case S_Retain:
1345 case S_None:
1346 case S_Stop:
1347 case S_MovableRelease:
1348 break;
1349 }
1350 }
1351
1352 // If the state at the other end of any of the successor edges
1353 // matches the current state, require all edges to match. This
1354 // guards against loops in the middle of a sequence.
1355 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1356 S.ClearSequenceProgress();
1357 } else if (NotAllSeqEqualButKnownSafe) {
1358 // If we would have cleared the state foregoing the fact that we are known
1359 // safe, stop code motion. This is because whether or not it is safe to
1360 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1361 // are allowed to perform code motion.
1362 S.SetCFGHazardAfflicted(true);
1363 }
1364 }
1365 }
1366
VisitInstructionBottomUp(Instruction * Inst,BasicBlock * BB,BlotMapVector<Value *,RRInfo> & Retains,BBState & MyStates)1367 bool ObjCARCOpt::VisitInstructionBottomUp(
1368 Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1369 BBState &MyStates) {
1370 bool NestingDetected = false;
1371 ARCInstKind Class = GetARCInstKind(Inst);
1372 const Value *Arg = nullptr;
1373
1374 LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1375
1376 switch (Class) {
1377 case ARCInstKind::Release: {
1378 Arg = GetArgRCIdentityRoot(Inst);
1379
1380 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1381 NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1382 break;
1383 }
1384 case ARCInstKind::RetainBlock:
1385 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1386 // objc_retainBlocks to objc_retains. Thus at this point any
1387 // objc_retainBlocks that we see are not optimizable.
1388 break;
1389 case ARCInstKind::Retain:
1390 case ARCInstKind::RetainRV: {
1391 Arg = GetArgRCIdentityRoot(Inst);
1392 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1393 if (S.MatchWithRetain()) {
1394 // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1395 // it's better to let it remain as the first instruction after a call.
1396 if (Class != ARCInstKind::RetainRV) {
1397 LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1398 Retains[Inst] = S.GetRRInfo();
1399 }
1400 S.ClearSequenceProgress();
1401 }
1402 // A retain moving bottom up can be a use.
1403 break;
1404 }
1405 case ARCInstKind::AutoreleasepoolPop:
1406 // Conservatively, clear MyStates for all known pointers.
1407 MyStates.clearBottomUpPointers();
1408 return NestingDetected;
1409 case ARCInstKind::AutoreleasepoolPush:
1410 case ARCInstKind::None:
1411 // These are irrelevant.
1412 return NestingDetected;
1413 default:
1414 break;
1415 }
1416
1417 // Consider any other possible effects of this instruction on each
1418 // pointer being tracked.
1419 for (auto MI = MyStates.bottom_up_ptr_begin(),
1420 ME = MyStates.bottom_up_ptr_end();
1421 MI != ME; ++MI) {
1422 const Value *Ptr = MI->first;
1423 if (Ptr == Arg)
1424 continue; // Handled above.
1425 BottomUpPtrState &S = MI->second;
1426
1427 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1428 continue;
1429
1430 S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1431 }
1432
1433 return NestingDetected;
1434 }
1435
VisitBottomUp(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains)1436 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1437 DenseMap<const BasicBlock *, BBState> &BBStates,
1438 BlotMapVector<Value *, RRInfo> &Retains) {
1439 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1440
1441 bool NestingDetected = false;
1442 BBState &MyStates = BBStates[BB];
1443
1444 // Merge the states from each successor to compute the initial state
1445 // for the current block.
1446 BBState::edge_iterator SI(MyStates.succ_begin()),
1447 SE(MyStates.succ_end());
1448 if (SI != SE) {
1449 const BasicBlock *Succ = *SI;
1450 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1451 assert(I != BBStates.end());
1452 MyStates.InitFromSucc(I->second);
1453 ++SI;
1454 for (; SI != SE; ++SI) {
1455 Succ = *SI;
1456 I = BBStates.find(Succ);
1457 assert(I != BBStates.end());
1458 MyStates.MergeSucc(I->second);
1459 }
1460 }
1461
1462 LLVM_DEBUG(dbgs() << "Before:\n"
1463 << BBStates[BB] << "\n"
1464 << "Performing Dataflow:\n");
1465
1466 // Visit all the instructions, bottom-up.
1467 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1468 Instruction *Inst = &*std::prev(I);
1469
1470 // Invoke instructions are visited as part of their successors (below).
1471 if (isa<InvokeInst>(Inst))
1472 continue;
1473
1474 LLVM_DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1475
1476 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1477
1478 // Bail out if the number of pointers being tracked becomes too large so
1479 // that this pass can complete in a reasonable amount of time.
1480 if (MyStates.bottom_up_ptr_list_size() > MaxPtrStates) {
1481 DisableRetainReleasePairing = true;
1482 return false;
1483 }
1484 }
1485
1486 // If there's a predecessor with an invoke, visit the invoke as if it were
1487 // part of this block, since we can't insert code after an invoke in its own
1488 // block, and we don't want to split critical edges.
1489 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1490 PE(MyStates.pred_end()); PI != PE; ++PI) {
1491 BasicBlock *Pred = *PI;
1492 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1493 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1494 }
1495
1496 LLVM_DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1497
1498 return NestingDetected;
1499 }
1500
1501 // Fill ReleaseInsertPtToRCIdentityRoots, which is a map from insertion points
1502 // to the set of RC identity roots that would be released by the release calls
1503 // moved to the insertion points.
collectReleaseInsertPts(const BlotMapVector<Value *,RRInfo> & Retains,DenseMap<const Instruction *,SmallPtrSet<const Value *,2>> & ReleaseInsertPtToRCIdentityRoots)1504 static void collectReleaseInsertPts(
1505 const BlotMapVector<Value *, RRInfo> &Retains,
1506 DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1507 &ReleaseInsertPtToRCIdentityRoots) {
1508 for (auto &P : Retains) {
1509 // Retains is a map from an objc_retain call to a RRInfo of the RC identity
1510 // root of the call. Get the RC identity root of the objc_retain call.
1511 Instruction *Retain = cast<Instruction>(P.first);
1512 Value *Root = GetRCIdentityRoot(Retain->getOperand(0));
1513 // Collect all the insertion points of the objc_release calls that release
1514 // the RC identity root of the objc_retain call.
1515 for (const Instruction *InsertPt : P.second.ReverseInsertPts)
1516 ReleaseInsertPtToRCIdentityRoots[InsertPt].insert(Root);
1517 }
1518 }
1519
1520 // Get the RC identity roots from an insertion point of an objc_release call.
1521 // Return nullptr if the passed instruction isn't an insertion point.
1522 static const SmallPtrSet<const Value *, 2> *
getRCIdentityRootsFromReleaseInsertPt(const Instruction * InsertPt,const DenseMap<const Instruction *,SmallPtrSet<const Value *,2>> & ReleaseInsertPtToRCIdentityRoots)1523 getRCIdentityRootsFromReleaseInsertPt(
1524 const Instruction *InsertPt,
1525 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1526 &ReleaseInsertPtToRCIdentityRoots) {
1527 auto I = ReleaseInsertPtToRCIdentityRoots.find(InsertPt);
1528 if (I == ReleaseInsertPtToRCIdentityRoots.end())
1529 return nullptr;
1530 return &I->second;
1531 }
1532
VisitInstructionTopDown(Instruction * Inst,DenseMap<Value *,RRInfo> & Releases,BBState & MyStates,const DenseMap<const Instruction *,SmallPtrSet<const Value *,2>> & ReleaseInsertPtToRCIdentityRoots)1533 bool ObjCARCOpt::VisitInstructionTopDown(
1534 Instruction *Inst, DenseMap<Value *, RRInfo> &Releases, BBState &MyStates,
1535 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1536 &ReleaseInsertPtToRCIdentityRoots) {
1537 bool NestingDetected = false;
1538 ARCInstKind Class = GetARCInstKind(Inst);
1539 const Value *Arg = nullptr;
1540
1541 // Make sure a call to objc_retain isn't moved past insertion points of calls
1542 // to objc_release.
1543 if (const SmallPtrSet<const Value *, 2> *Roots =
1544 getRCIdentityRootsFromReleaseInsertPt(
1545 Inst, ReleaseInsertPtToRCIdentityRoots))
1546 for (auto *Root : *Roots) {
1547 TopDownPtrState &S = MyStates.getPtrTopDownState(Root);
1548 // Disable code motion if the current position is S_Retain to prevent
1549 // moving the objc_retain call past objc_release calls. If it's
1550 // S_CanRelease or larger, it's not necessary to disable code motion as
1551 // the insertion points that prevent the objc_retain call from moving down
1552 // should have been set already.
1553 if (S.GetSeq() == S_Retain)
1554 S.SetCFGHazardAfflicted(true);
1555 }
1556
1557 LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1558
1559 switch (Class) {
1560 case ARCInstKind::RetainBlock:
1561 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1562 // objc_retainBlocks to objc_retains. Thus at this point any
1563 // objc_retainBlocks that we see are not optimizable. We need to break since
1564 // a retain can be a potential use.
1565 break;
1566 case ARCInstKind::Retain:
1567 case ARCInstKind::RetainRV: {
1568 Arg = GetArgRCIdentityRoot(Inst);
1569 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1570 NestingDetected |= S.InitTopDown(Class, Inst);
1571 // A retain can be a potential use; proceed to the generic checking
1572 // code below.
1573 break;
1574 }
1575 case ARCInstKind::Release: {
1576 Arg = GetArgRCIdentityRoot(Inst);
1577 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1578 // Try to form a tentative pair in between this release instruction and the
1579 // top down pointers that we are tracking.
1580 if (S.MatchWithRelease(MDKindCache, Inst)) {
1581 // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1582 // Map}. Then we clear S.
1583 LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1584 Releases[Inst] = S.GetRRInfo();
1585 S.ClearSequenceProgress();
1586 }
1587 break;
1588 }
1589 case ARCInstKind::AutoreleasepoolPop:
1590 // Conservatively, clear MyStates for all known pointers.
1591 MyStates.clearTopDownPointers();
1592 return false;
1593 case ARCInstKind::AutoreleasepoolPush:
1594 case ARCInstKind::None:
1595 // These can not be uses of
1596 return false;
1597 default:
1598 break;
1599 }
1600
1601 // Consider any other possible effects of this instruction on each
1602 // pointer being tracked.
1603 for (auto MI = MyStates.top_down_ptr_begin(),
1604 ME = MyStates.top_down_ptr_end();
1605 MI != ME; ++MI) {
1606 const Value *Ptr = MI->first;
1607 if (Ptr == Arg)
1608 continue; // Handled above.
1609 TopDownPtrState &S = MI->second;
1610 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class, *BundledInsts))
1611 continue;
1612
1613 S.HandlePotentialUse(Inst, Ptr, PA, Class);
1614 }
1615
1616 return NestingDetected;
1617 }
1618
VisitTopDown(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,DenseMap<Value *,RRInfo> & Releases,const DenseMap<const Instruction *,SmallPtrSet<const Value *,2>> & ReleaseInsertPtToRCIdentityRoots)1619 bool ObjCARCOpt::VisitTopDown(
1620 BasicBlock *BB, DenseMap<const BasicBlock *, BBState> &BBStates,
1621 DenseMap<Value *, RRInfo> &Releases,
1622 const DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1623 &ReleaseInsertPtToRCIdentityRoots) {
1624 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1625 bool NestingDetected = false;
1626 BBState &MyStates = BBStates[BB];
1627
1628 // Merge the states from each predecessor to compute the initial state
1629 // for the current block.
1630 BBState::edge_iterator PI(MyStates.pred_begin()),
1631 PE(MyStates.pred_end());
1632 if (PI != PE) {
1633 const BasicBlock *Pred = *PI;
1634 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1635 assert(I != BBStates.end());
1636 MyStates.InitFromPred(I->second);
1637 ++PI;
1638 for (; PI != PE; ++PI) {
1639 Pred = *PI;
1640 I = BBStates.find(Pred);
1641 assert(I != BBStates.end());
1642 MyStates.MergePred(I->second);
1643 }
1644 }
1645
1646 // Check that BB and MyStates have the same number of predecessors. This
1647 // prevents retain calls that live outside a loop from being moved into the
1648 // loop.
1649 if (!BB->hasNPredecessors(MyStates.pred_end() - MyStates.pred_begin()))
1650 for (auto I = MyStates.top_down_ptr_begin(),
1651 E = MyStates.top_down_ptr_end();
1652 I != E; ++I)
1653 I->second.SetCFGHazardAfflicted(true);
1654
1655 LLVM_DEBUG(dbgs() << "Before:\n"
1656 << BBStates[BB] << "\n"
1657 << "Performing Dataflow:\n");
1658
1659 // Visit all the instructions, top-down.
1660 for (Instruction &Inst : *BB) {
1661 LLVM_DEBUG(dbgs() << " Visiting " << Inst << "\n");
1662
1663 NestingDetected |= VisitInstructionTopDown(
1664 &Inst, Releases, MyStates, ReleaseInsertPtToRCIdentityRoots);
1665
1666 // Bail out if the number of pointers being tracked becomes too large so
1667 // that this pass can complete in a reasonable amount of time.
1668 if (MyStates.top_down_ptr_list_size() > MaxPtrStates) {
1669 DisableRetainReleasePairing = true;
1670 return false;
1671 }
1672 }
1673
1674 LLVM_DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
1675 << BBStates[BB] << "\n\n");
1676 CheckForCFGHazards(BB, BBStates, MyStates);
1677 LLVM_DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1678 return NestingDetected;
1679 }
1680
1681 static void
ComputePostOrders(Function & F,SmallVectorImpl<BasicBlock * > & PostOrder,SmallVectorImpl<BasicBlock * > & ReverseCFGPostOrder,unsigned NoObjCARCExceptionsMDKind,DenseMap<const BasicBlock *,BBState> & BBStates)1682 ComputePostOrders(Function &F,
1683 SmallVectorImpl<BasicBlock *> &PostOrder,
1684 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1685 unsigned NoObjCARCExceptionsMDKind,
1686 DenseMap<const BasicBlock *, BBState> &BBStates) {
1687 /// The visited set, for doing DFS walks.
1688 SmallPtrSet<BasicBlock *, 16> Visited;
1689
1690 // Do DFS, computing the PostOrder.
1691 SmallPtrSet<BasicBlock *, 16> OnStack;
1692 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1693
1694 // Functions always have exactly one entry block, and we don't have
1695 // any other block that we treat like an entry block.
1696 BasicBlock *EntryBB = &F.getEntryBlock();
1697 BBState &MyStates = BBStates[EntryBB];
1698 MyStates.SetAsEntry();
1699 Instruction *EntryTI = EntryBB->getTerminator();
1700 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1701 Visited.insert(EntryBB);
1702 OnStack.insert(EntryBB);
1703 do {
1704 dfs_next_succ:
1705 BasicBlock *CurrBB = SuccStack.back().first;
1706 succ_iterator SE(CurrBB->getTerminator(), false);
1707
1708 while (SuccStack.back().second != SE) {
1709 BasicBlock *SuccBB = *SuccStack.back().second++;
1710 if (Visited.insert(SuccBB).second) {
1711 SuccStack.push_back(
1712 std::make_pair(SuccBB, succ_iterator(SuccBB->getTerminator())));
1713 BBStates[CurrBB].addSucc(SuccBB);
1714 BBState &SuccStates = BBStates[SuccBB];
1715 SuccStates.addPred(CurrBB);
1716 OnStack.insert(SuccBB);
1717 goto dfs_next_succ;
1718 }
1719
1720 if (!OnStack.count(SuccBB)) {
1721 BBStates[CurrBB].addSucc(SuccBB);
1722 BBStates[SuccBB].addPred(CurrBB);
1723 }
1724 }
1725 OnStack.erase(CurrBB);
1726 PostOrder.push_back(CurrBB);
1727 SuccStack.pop_back();
1728 } while (!SuccStack.empty());
1729
1730 Visited.clear();
1731
1732 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1733 // Functions may have many exits, and there also blocks which we treat
1734 // as exits due to ignored edges.
1735 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1736 for (BasicBlock &ExitBB : F) {
1737 BBState &MyStates = BBStates[&ExitBB];
1738 if (!MyStates.isExit())
1739 continue;
1740
1741 MyStates.SetAsExit();
1742
1743 PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1744 Visited.insert(&ExitBB);
1745 while (!PredStack.empty()) {
1746 reverse_dfs_next_succ:
1747 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1748 while (PredStack.back().second != PE) {
1749 BasicBlock *BB = *PredStack.back().second++;
1750 if (Visited.insert(BB).second) {
1751 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1752 goto reverse_dfs_next_succ;
1753 }
1754 }
1755 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1756 }
1757 }
1758 }
1759
1760 // Visit the function both top-down and bottom-up.
Visit(Function & F,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases)1761 bool ObjCARCOpt::Visit(Function &F,
1762 DenseMap<const BasicBlock *, BBState> &BBStates,
1763 BlotMapVector<Value *, RRInfo> &Retains,
1764 DenseMap<Value *, RRInfo> &Releases) {
1765 // Use reverse-postorder traversals, because we magically know that loops
1766 // will be well behaved, i.e. they won't repeatedly call retain on a single
1767 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1768 // class here because we want the reverse-CFG postorder to consider each
1769 // function exit point, and we want to ignore selected cycle edges.
1770 SmallVector<BasicBlock *, 16> PostOrder;
1771 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1772 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1773 MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1774 BBStates);
1775
1776 // Use reverse-postorder on the reverse CFG for bottom-up.
1777 bool BottomUpNestingDetected = false;
1778 for (BasicBlock *BB : llvm::reverse(ReverseCFGPostOrder)) {
1779 BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
1780 if (DisableRetainReleasePairing)
1781 return false;
1782 }
1783
1784 DenseMap<const Instruction *, SmallPtrSet<const Value *, 2>>
1785 ReleaseInsertPtToRCIdentityRoots;
1786 collectReleaseInsertPts(Retains, ReleaseInsertPtToRCIdentityRoots);
1787
1788 // Use reverse-postorder for top-down.
1789 bool TopDownNestingDetected = false;
1790 for (BasicBlock *BB : llvm::reverse(PostOrder)) {
1791 TopDownNestingDetected |=
1792 VisitTopDown(BB, BBStates, Releases, ReleaseInsertPtToRCIdentityRoots);
1793 if (DisableRetainReleasePairing)
1794 return false;
1795 }
1796
1797 return TopDownNestingDetected && BottomUpNestingDetected;
1798 }
1799
1800 /// Move the calls in RetainsToMove and ReleasesToMove.
MoveCalls(Value * Arg,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,SmallVectorImpl<Instruction * > & DeadInsts,Module * M)1801 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1802 RRInfo &ReleasesToMove,
1803 BlotMapVector<Value *, RRInfo> &Retains,
1804 DenseMap<Value *, RRInfo> &Releases,
1805 SmallVectorImpl<Instruction *> &DeadInsts,
1806 Module *M) {
1807 Type *ArgTy = Arg->getType();
1808 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1809
1810 LLVM_DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1811
1812 // Insert the new retain and release calls.
1813 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1814 Value *MyArg = ArgTy == ParamTy ? Arg :
1815 new BitCastInst(Arg, ParamTy, "", InsertPt);
1816 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1817 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1818 Call->setDoesNotThrow();
1819 Call->setTailCall();
1820
1821 LLVM_DEBUG(dbgs() << "Inserting new Retain: " << *Call
1822 << "\n"
1823 "At insertion point: "
1824 << *InsertPt << "\n");
1825 }
1826 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1827 Value *MyArg = ArgTy == ParamTy ? Arg :
1828 new BitCastInst(Arg, ParamTy, "", InsertPt);
1829 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1830 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1831 // Attach a clang.imprecise_release metadata tag, if appropriate.
1832 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1833 Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1834 Call->setDoesNotThrow();
1835 if (ReleasesToMove.IsTailCallRelease)
1836 Call->setTailCall();
1837
1838 LLVM_DEBUG(dbgs() << "Inserting new Release: " << *Call
1839 << "\n"
1840 "At insertion point: "
1841 << *InsertPt << "\n");
1842 }
1843
1844 // Delete the original retain and release calls.
1845 for (Instruction *OrigRetain : RetainsToMove.Calls) {
1846 Retains.blot(OrigRetain);
1847 DeadInsts.push_back(OrigRetain);
1848 LLVM_DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1849 }
1850 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1851 Releases.erase(OrigRelease);
1852 DeadInsts.push_back(OrigRelease);
1853 LLVM_DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1854 }
1855 }
1856
PairUpRetainsAndReleases(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M,Instruction * Retain,SmallVectorImpl<Instruction * > & DeadInsts,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,Value * Arg,bool KnownSafe,bool & AnyPairsCompletelyEliminated)1857 bool ObjCARCOpt::PairUpRetainsAndReleases(
1858 DenseMap<const BasicBlock *, BBState> &BBStates,
1859 BlotMapVector<Value *, RRInfo> &Retains,
1860 DenseMap<Value *, RRInfo> &Releases, Module *M,
1861 Instruction *Retain,
1862 SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1863 RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1864 bool &AnyPairsCompletelyEliminated) {
1865 // If a pair happens in a region where it is known that the reference count
1866 // is already incremented, we can similarly ignore possible decrements unless
1867 // we are dealing with a retainable object with multiple provenance sources.
1868 bool KnownSafeTD = true, KnownSafeBU = true;
1869 bool CFGHazardAfflicted = false;
1870
1871 // Connect the dots between the top-down-collected RetainsToMove and
1872 // bottom-up-collected ReleasesToMove to form sets of related calls.
1873 // This is an iterative process so that we connect multiple releases
1874 // to multiple retains if needed.
1875 unsigned OldDelta = 0;
1876 unsigned NewDelta = 0;
1877 unsigned OldCount = 0;
1878 unsigned NewCount = 0;
1879 bool FirstRelease = true;
1880 for (SmallVector<Instruction *, 4> NewRetains{Retain};;) {
1881 SmallVector<Instruction *, 4> NewReleases;
1882 for (Instruction *NewRetain : NewRetains) {
1883 auto It = Retains.find(NewRetain);
1884 assert(It != Retains.end());
1885 const RRInfo &NewRetainRRI = It->second;
1886 KnownSafeTD &= NewRetainRRI.KnownSafe;
1887 CFGHazardAfflicted |= NewRetainRRI.CFGHazardAfflicted;
1888 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1889 auto Jt = Releases.find(NewRetainRelease);
1890 if (Jt == Releases.end())
1891 return false;
1892 const RRInfo &NewRetainReleaseRRI = Jt->second;
1893
1894 // If the release does not have a reference to the retain as well,
1895 // something happened which is unaccounted for. Do not do anything.
1896 //
1897 // This can happen if we catch an additive overflow during path count
1898 // merging.
1899 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1900 return false;
1901
1902 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1903 // If we overflow when we compute the path count, don't remove/move
1904 // anything.
1905 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1906 unsigned PathCount = BBState::OverflowOccurredValue;
1907 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1908 return false;
1909 assert(PathCount != BBState::OverflowOccurredValue &&
1910 "PathCount at this point can not be "
1911 "OverflowOccurredValue.");
1912 OldDelta -= PathCount;
1913
1914 // Merge the ReleaseMetadata and IsTailCallRelease values.
1915 if (FirstRelease) {
1916 ReleasesToMove.ReleaseMetadata =
1917 NewRetainReleaseRRI.ReleaseMetadata;
1918 ReleasesToMove.IsTailCallRelease =
1919 NewRetainReleaseRRI.IsTailCallRelease;
1920 FirstRelease = false;
1921 } else {
1922 if (ReleasesToMove.ReleaseMetadata !=
1923 NewRetainReleaseRRI.ReleaseMetadata)
1924 ReleasesToMove.ReleaseMetadata = nullptr;
1925 if (ReleasesToMove.IsTailCallRelease !=
1926 NewRetainReleaseRRI.IsTailCallRelease)
1927 ReleasesToMove.IsTailCallRelease = false;
1928 }
1929
1930 // Collect the optimal insertion points.
1931 if (!KnownSafe)
1932 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1933 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1934 // If we overflow when we compute the path count, don't
1935 // remove/move anything.
1936 const BBState &RIPBBState = BBStates[RIP->getParent()];
1937 PathCount = BBState::OverflowOccurredValue;
1938 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1939 return false;
1940 assert(PathCount != BBState::OverflowOccurredValue &&
1941 "PathCount at this point can not be "
1942 "OverflowOccurredValue.");
1943 NewDelta -= PathCount;
1944 }
1945 }
1946 NewReleases.push_back(NewRetainRelease);
1947 }
1948 }
1949 }
1950 NewRetains.clear();
1951 if (NewReleases.empty()) break;
1952
1953 // Back the other way.
1954 for (Instruction *NewRelease : NewReleases) {
1955 auto It = Releases.find(NewRelease);
1956 assert(It != Releases.end());
1957 const RRInfo &NewReleaseRRI = It->second;
1958 KnownSafeBU &= NewReleaseRRI.KnownSafe;
1959 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1960 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1961 auto Jt = Retains.find(NewReleaseRetain);
1962 if (Jt == Retains.end())
1963 return false;
1964 const RRInfo &NewReleaseRetainRRI = Jt->second;
1965
1966 // If the retain does not have a reference to the release as well,
1967 // something happened which is unaccounted for. Do not do anything.
1968 //
1969 // This can happen if we catch an additive overflow during path count
1970 // merging.
1971 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1972 return false;
1973
1974 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1975 // If we overflow when we compute the path count, don't remove/move
1976 // anything.
1977 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1978 unsigned PathCount = BBState::OverflowOccurredValue;
1979 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1980 return false;
1981 assert(PathCount != BBState::OverflowOccurredValue &&
1982 "PathCount at this point can not be "
1983 "OverflowOccurredValue.");
1984 OldDelta += PathCount;
1985 OldCount += PathCount;
1986
1987 // Collect the optimal insertion points.
1988 if (!KnownSafe)
1989 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1990 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1991 // If we overflow when we compute the path count, don't
1992 // remove/move anything.
1993 const BBState &RIPBBState = BBStates[RIP->getParent()];
1994
1995 PathCount = BBState::OverflowOccurredValue;
1996 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1997 return false;
1998 assert(PathCount != BBState::OverflowOccurredValue &&
1999 "PathCount at this point can not be "
2000 "OverflowOccurredValue.");
2001 NewDelta += PathCount;
2002 NewCount += PathCount;
2003 }
2004 }
2005 NewRetains.push_back(NewReleaseRetain);
2006 }
2007 }
2008 }
2009 if (NewRetains.empty()) break;
2010 }
2011
2012 // We can only remove pointers if we are known safe in both directions.
2013 bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
2014 if (UnconditionallySafe) {
2015 RetainsToMove.ReverseInsertPts.clear();
2016 ReleasesToMove.ReverseInsertPts.clear();
2017 NewCount = 0;
2018 } else {
2019 // Determine whether the new insertion points we computed preserve the
2020 // balance of retain and release calls through the program.
2021 // TODO: If the fully aggressive solution isn't valid, try to find a
2022 // less aggressive solution which is.
2023 if (NewDelta != 0)
2024 return false;
2025
2026 // At this point, we are not going to remove any RR pairs, but we still are
2027 // able to move RR pairs. If one of our pointers is afflicted with
2028 // CFGHazards, we cannot perform such code motion so exit early.
2029 const bool WillPerformCodeMotion =
2030 !RetainsToMove.ReverseInsertPts.empty() ||
2031 !ReleasesToMove.ReverseInsertPts.empty();
2032 if (CFGHazardAfflicted && WillPerformCodeMotion)
2033 return false;
2034 }
2035
2036 // Determine whether the original call points are balanced in the retain and
2037 // release calls through the program. If not, conservatively don't touch
2038 // them.
2039 // TODO: It's theoretically possible to do code motion in this case, as
2040 // long as the existing imbalances are maintained.
2041 if (OldDelta != 0)
2042 return false;
2043
2044 Changed = true;
2045 assert(OldCount != 0 && "Unreachable code?");
2046 NumRRs += OldCount - NewCount;
2047 // Set to true if we completely removed any RR pairs.
2048 AnyPairsCompletelyEliminated = NewCount == 0;
2049
2050 // We can move calls!
2051 return true;
2052 }
2053
2054 /// Identify pairings between the retains and releases, and delete and/or move
2055 /// them.
PerformCodePlacement(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M)2056 bool ObjCARCOpt::PerformCodePlacement(
2057 DenseMap<const BasicBlock *, BBState> &BBStates,
2058 BlotMapVector<Value *, RRInfo> &Retains,
2059 DenseMap<Value *, RRInfo> &Releases, Module *M) {
2060 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2061
2062 bool AnyPairsCompletelyEliminated = false;
2063 SmallVector<Instruction *, 8> DeadInsts;
2064
2065 // Visit each retain.
2066 for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2067 E = Retains.end();
2068 I != E; ++I) {
2069 Value *V = I->first;
2070 if (!V) continue; // blotted
2071
2072 Instruction *Retain = cast<Instruction>(V);
2073
2074 LLVM_DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2075
2076 Value *Arg = GetArgRCIdentityRoot(Retain);
2077
2078 // If the object being released is in static or stack storage, we know it's
2079 // not being managed by ObjC reference counting, so we can delete pairs
2080 // regardless of what possible decrements or uses lie between them.
2081 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2082
2083 // A constant pointer can't be pointing to an object on the heap. It may
2084 // be reference-counted, but it won't be deleted.
2085 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2086 if (const GlobalVariable *GV =
2087 dyn_cast<GlobalVariable>(
2088 GetRCIdentityRoot(LI->getPointerOperand())))
2089 if (GV->isConstant())
2090 KnownSafe = true;
2091
2092 // Connect the dots between the top-down-collected RetainsToMove and
2093 // bottom-up-collected ReleasesToMove to form sets of related calls.
2094 RRInfo RetainsToMove, ReleasesToMove;
2095
2096 bool PerformMoveCalls = PairUpRetainsAndReleases(
2097 BBStates, Retains, Releases, M, Retain, DeadInsts,
2098 RetainsToMove, ReleasesToMove, Arg, KnownSafe,
2099 AnyPairsCompletelyEliminated);
2100
2101 if (PerformMoveCalls) {
2102 // Ok, everything checks out and we're all set. Let's move/delete some
2103 // code!
2104 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2105 Retains, Releases, DeadInsts, M);
2106 }
2107 }
2108
2109 // Now that we're done moving everything, we can delete the newly dead
2110 // instructions, as we no longer need them as insert points.
2111 while (!DeadInsts.empty())
2112 EraseInstruction(DeadInsts.pop_back_val());
2113
2114 return AnyPairsCompletelyEliminated;
2115 }
2116
2117 /// Weak pointer optimizations.
OptimizeWeakCalls(Function & F)2118 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2119 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2120
2121 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2122 // itself because it uses AliasAnalysis and we need to do provenance
2123 // queries instead.
2124 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2125 Instruction *Inst = &*I++;
2126
2127 LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2128
2129 ARCInstKind Class = GetBasicARCInstKind(Inst);
2130 if (Class != ARCInstKind::LoadWeak &&
2131 Class != ARCInstKind::LoadWeakRetained)
2132 continue;
2133
2134 // Delete objc_loadWeak calls with no users.
2135 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
2136 Inst->eraseFromParent();
2137 Changed = true;
2138 continue;
2139 }
2140
2141 // TODO: For now, just look for an earlier available version of this value
2142 // within the same block. Theoretically, we could do memdep-style non-local
2143 // analysis too, but that would want caching. A better approach would be to
2144 // use the technique that EarlyCSE uses.
2145 inst_iterator Current = std::prev(I);
2146 BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
2147 for (BasicBlock::iterator B = CurrentBB->begin(),
2148 J = Current.getInstructionIterator();
2149 J != B; --J) {
2150 Instruction *EarlierInst = &*std::prev(J);
2151 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
2152 switch (EarlierClass) {
2153 case ARCInstKind::LoadWeak:
2154 case ARCInstKind::LoadWeakRetained: {
2155 // If this is loading from the same pointer, replace this load's value
2156 // with that one.
2157 CallInst *Call = cast<CallInst>(Inst);
2158 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2159 Value *Arg = Call->getArgOperand(0);
2160 Value *EarlierArg = EarlierCall->getArgOperand(0);
2161 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2162 case AliasResult::MustAlias:
2163 Changed = true;
2164 // If the load has a builtin retain, insert a plain retain for it.
2165 if (Class == ARCInstKind::LoadWeakRetained) {
2166 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2167 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2168 CI->setTailCall();
2169 }
2170 // Zap the fully redundant load.
2171 Call->replaceAllUsesWith(EarlierCall);
2172 Call->eraseFromParent();
2173 goto clobbered;
2174 case AliasResult::MayAlias:
2175 case AliasResult::PartialAlias:
2176 goto clobbered;
2177 case AliasResult::NoAlias:
2178 break;
2179 }
2180 break;
2181 }
2182 case ARCInstKind::StoreWeak:
2183 case ARCInstKind::InitWeak: {
2184 // If this is storing to the same pointer and has the same size etc.
2185 // replace this load's value with the stored value.
2186 CallInst *Call = cast<CallInst>(Inst);
2187 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2188 Value *Arg = Call->getArgOperand(0);
2189 Value *EarlierArg = EarlierCall->getArgOperand(0);
2190 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2191 case AliasResult::MustAlias:
2192 Changed = true;
2193 // If the load has a builtin retain, insert a plain retain for it.
2194 if (Class == ARCInstKind::LoadWeakRetained) {
2195 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2196 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2197 CI->setTailCall();
2198 }
2199 // Zap the fully redundant load.
2200 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2201 Call->eraseFromParent();
2202 goto clobbered;
2203 case AliasResult::MayAlias:
2204 case AliasResult::PartialAlias:
2205 goto clobbered;
2206 case AliasResult::NoAlias:
2207 break;
2208 }
2209 break;
2210 }
2211 case ARCInstKind::MoveWeak:
2212 case ARCInstKind::CopyWeak:
2213 // TOOD: Grab the copied value.
2214 goto clobbered;
2215 case ARCInstKind::AutoreleasepoolPush:
2216 case ARCInstKind::None:
2217 case ARCInstKind::IntrinsicUser:
2218 case ARCInstKind::User:
2219 // Weak pointers are only modified through the weak entry points
2220 // (and arbitrary calls, which could call the weak entry points).
2221 break;
2222 default:
2223 // Anything else could modify the weak pointer.
2224 goto clobbered;
2225 }
2226 }
2227 clobbered:;
2228 }
2229
2230 // Then, for each destroyWeak with an alloca operand, check to see if
2231 // the alloca and all its users can be zapped.
2232 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2233 Instruction *Inst = &*I++;
2234 ARCInstKind Class = GetBasicARCInstKind(Inst);
2235 if (Class != ARCInstKind::DestroyWeak)
2236 continue;
2237
2238 CallInst *Call = cast<CallInst>(Inst);
2239 Value *Arg = Call->getArgOperand(0);
2240 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2241 for (User *U : Alloca->users()) {
2242 const Instruction *UserInst = cast<Instruction>(U);
2243 switch (GetBasicARCInstKind(UserInst)) {
2244 case ARCInstKind::InitWeak:
2245 case ARCInstKind::StoreWeak:
2246 case ARCInstKind::DestroyWeak:
2247 continue;
2248 default:
2249 goto done;
2250 }
2251 }
2252 Changed = true;
2253 for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
2254 CallInst *UserInst = cast<CallInst>(*UI++);
2255 switch (GetBasicARCInstKind(UserInst)) {
2256 case ARCInstKind::InitWeak:
2257 case ARCInstKind::StoreWeak:
2258 // These functions return their second argument.
2259 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2260 break;
2261 case ARCInstKind::DestroyWeak:
2262 // No return value.
2263 break;
2264 default:
2265 llvm_unreachable("alloca really is used!");
2266 }
2267 UserInst->eraseFromParent();
2268 }
2269 Alloca->eraseFromParent();
2270 done:;
2271 }
2272 }
2273 }
2274
2275 /// Identify program paths which execute sequences of retains and releases which
2276 /// can be eliminated.
OptimizeSequences(Function & F)2277 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2278 // Releases, Retains - These are used to store the results of the main flow
2279 // analysis. These use Value* as the key instead of Instruction* so that the
2280 // map stays valid when we get around to rewriting code and calls get
2281 // replaced by arguments.
2282 DenseMap<Value *, RRInfo> Releases;
2283 BlotMapVector<Value *, RRInfo> Retains;
2284
2285 // This is used during the traversal of the function to track the
2286 // states for each identified object at each block.
2287 DenseMap<const BasicBlock *, BBState> BBStates;
2288
2289 // Analyze the CFG of the function, and all instructions.
2290 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2291
2292 if (DisableRetainReleasePairing)
2293 return false;
2294
2295 // Transform.
2296 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2297 Releases,
2298 F.getParent());
2299
2300 return AnyPairsCompletelyEliminated && NestingDetected;
2301 }
2302
2303 /// Check if there is a dependent call earlier that does not have anything in
2304 /// between the Retain and the call that can affect the reference count of their
2305 /// shared pointer argument. Note that Retain need not be in BB.
HasSafePathToPredecessorCall(const Value * Arg,Instruction * Retain,ProvenanceAnalysis & PA)2306 static CallInst *HasSafePathToPredecessorCall(const Value *Arg,
2307 Instruction *Retain,
2308 ProvenanceAnalysis &PA) {
2309 auto *Call = dyn_cast_or_null<CallInst>(findSingleDependency(
2310 CanChangeRetainCount, Arg, Retain->getParent(), Retain, PA));
2311
2312 // Check that the pointer is the return value of the call.
2313 if (!Call || Arg != Call)
2314 return nullptr;
2315
2316 // Check that the call is a regular call.
2317 ARCInstKind Class = GetBasicARCInstKind(Call);
2318 return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call
2319 ? Call
2320 : nullptr;
2321 }
2322
2323 /// Find a dependent retain that precedes the given autorelease for which there
2324 /// is nothing in between the two instructions that can affect the ref count of
2325 /// Arg.
2326 static CallInst *
FindPredecessorRetainWithSafePath(const Value * Arg,BasicBlock * BB,Instruction * Autorelease,ProvenanceAnalysis & PA)2327 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2328 Instruction *Autorelease,
2329 ProvenanceAnalysis &PA) {
2330 auto *Retain = dyn_cast_or_null<CallInst>(
2331 findSingleDependency(CanChangeRetainCount, Arg, BB, Autorelease, PA));
2332
2333 // Check that we found a retain with the same argument.
2334 if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2335 GetArgRCIdentityRoot(Retain) != Arg) {
2336 return nullptr;
2337 }
2338
2339 return Retain;
2340 }
2341
2342 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2343 /// no instructions dependent on Arg that need a positive ref count in between
2344 /// the autorelease and the ret.
2345 static CallInst *
FindPredecessorAutoreleaseWithSafePath(const Value * Arg,BasicBlock * BB,ReturnInst * Ret,ProvenanceAnalysis & PA)2346 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2347 ReturnInst *Ret,
2348 ProvenanceAnalysis &PA) {
2349 SmallPtrSet<Instruction *, 4> DepInsts;
2350 auto *Autorelease = dyn_cast_or_null<CallInst>(
2351 findSingleDependency(NeedsPositiveRetainCount, Arg, BB, Ret, PA));
2352
2353 if (!Autorelease)
2354 return nullptr;
2355 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2356 if (!IsAutorelease(AutoreleaseClass))
2357 return nullptr;
2358 if (GetArgRCIdentityRoot(Autorelease) != Arg)
2359 return nullptr;
2360
2361 return Autorelease;
2362 }
2363
2364 /// Look for this pattern:
2365 /// \code
2366 /// %call = call i8* @something(...)
2367 /// %2 = call i8* @objc_retain(i8* %call)
2368 /// %3 = call i8* @objc_autorelease(i8* %2)
2369 /// ret i8* %3
2370 /// \endcode
2371 /// And delete the retain and autorelease.
OptimizeReturns(Function & F)2372 void ObjCARCOpt::OptimizeReturns(Function &F) {
2373 if (!F.getReturnType()->isPointerTy())
2374 return;
2375
2376 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2377
2378 for (BasicBlock &BB: F) {
2379 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2380 if (!Ret)
2381 continue;
2382
2383 LLVM_DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2384
2385 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2386
2387 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2388 // dependent on Arg such that there are no instructions dependent on Arg
2389 // that need a positive ref count in between the autorelease and Ret.
2390 CallInst *Autorelease =
2391 FindPredecessorAutoreleaseWithSafePath(Arg, &BB, Ret, PA);
2392
2393 if (!Autorelease)
2394 continue;
2395
2396 CallInst *Retain = FindPredecessorRetainWithSafePath(
2397 Arg, Autorelease->getParent(), Autorelease, PA);
2398
2399 if (!Retain)
2400 continue;
2401
2402 // Check that there is nothing that can affect the reference count
2403 // between the retain and the call. Note that Retain need not be in BB.
2404 CallInst *Call = HasSafePathToPredecessorCall(Arg, Retain, PA);
2405
2406 // Don't remove retainRV/autoreleaseRV pairs if the call isn't a tail call.
2407 if (!Call ||
2408 (!Call->isTailCall() &&
2409 GetBasicARCInstKind(Retain) == ARCInstKind::RetainRV &&
2410 GetBasicARCInstKind(Autorelease) == ARCInstKind::AutoreleaseRV))
2411 continue;
2412
2413 // If so, we can zap the retain and autorelease.
2414 Changed = true;
2415 ++NumRets;
2416 LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease
2417 << "\n");
2418 BundledInsts->eraseInst(Retain);
2419 EraseInstruction(Autorelease);
2420 }
2421 }
2422
2423 #ifndef NDEBUG
2424 void
GatherStatistics(Function & F,bool AfterOptimization)2425 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2426 Statistic &NumRetains =
2427 AfterOptimization ? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2428 Statistic &NumReleases =
2429 AfterOptimization ? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2430
2431 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2432 Instruction *Inst = &*I++;
2433 switch (GetBasicARCInstKind(Inst)) {
2434 default:
2435 break;
2436 case ARCInstKind::Retain:
2437 ++NumRetains;
2438 break;
2439 case ARCInstKind::Release:
2440 ++NumReleases;
2441 break;
2442 }
2443 }
2444 }
2445 #endif
2446
init(Module & M)2447 void ObjCARCOpt::init(Module &M) {
2448 if (!EnableARCOpts)
2449 return;
2450
2451 // Intuitively, objc_retain and others are nocapture, however in practice
2452 // they are not, because they return their argument value. And objc_release
2453 // calls finalizers which can have arbitrary side effects.
2454 MDKindCache.init(&M);
2455
2456 // Initialize our runtime entry point cache.
2457 EP.init(&M);
2458 }
2459
run(Function & F,AAResults & AA)2460 bool ObjCARCOpt::run(Function &F, AAResults &AA) {
2461 if (!EnableARCOpts)
2462 return false;
2463
2464 Changed = CFGChanged = false;
2465 BundledRetainClaimRVs BRV(EP, false);
2466 BundledInsts = &BRV;
2467
2468 LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName()
2469 << " >>>"
2470 "\n");
2471
2472 std::pair<bool, bool> R = BundledInsts->insertAfterInvokes(F, nullptr);
2473 Changed |= R.first;
2474 CFGChanged |= R.second;
2475
2476 PA.setAA(&AA);
2477
2478 #ifndef NDEBUG
2479 if (AreStatisticsEnabled()) {
2480 GatherStatistics(F, false);
2481 }
2482 #endif
2483
2484 // This pass performs several distinct transformations. As a compile-time aid
2485 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2486 // library functions aren't declared.
2487
2488 // Preliminary optimizations. This also computes UsedInThisFunction.
2489 OptimizeIndividualCalls(F);
2490
2491 // Optimizations for weak pointers.
2492 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2493 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2494 (1 << unsigned(ARCInstKind::StoreWeak)) |
2495 (1 << unsigned(ARCInstKind::InitWeak)) |
2496 (1 << unsigned(ARCInstKind::CopyWeak)) |
2497 (1 << unsigned(ARCInstKind::MoveWeak)) |
2498 (1 << unsigned(ARCInstKind::DestroyWeak))))
2499 OptimizeWeakCalls(F);
2500
2501 // Optimizations for retain+release pairs.
2502 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2503 (1 << unsigned(ARCInstKind::RetainRV)) |
2504 (1 << unsigned(ARCInstKind::RetainBlock))))
2505 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2506 // Run OptimizeSequences until it either stops making changes or
2507 // no retain+release pair nesting is detected.
2508 while (OptimizeSequences(F)) {}
2509
2510 // Optimizations if objc_autorelease is used.
2511 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2512 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2513 OptimizeReturns(F);
2514
2515 // Gather statistics after optimization.
2516 #ifndef NDEBUG
2517 if (AreStatisticsEnabled()) {
2518 GatherStatistics(F, true);
2519 }
2520 #endif
2521
2522 LLVM_DEBUG(dbgs() << "\n");
2523
2524 return Changed;
2525 }
2526
releaseMemory()2527 void ObjCARCOpt::releaseMemory() {
2528 PA.clear();
2529 }
2530
2531 /// @}
2532 ///
2533
run(Function & F,FunctionAnalysisManager & AM)2534 PreservedAnalyses ObjCARCOptPass::run(Function &F,
2535 FunctionAnalysisManager &AM) {
2536 ObjCARCOpt OCAO;
2537 OCAO.init(*F.getParent());
2538
2539 bool Changed = OCAO.run(F, AM.getResult<AAManager>(F));
2540 bool CFGChanged = OCAO.hasCFGChanged();
2541 if (Changed) {
2542 PreservedAnalyses PA;
2543 if (!CFGChanged)
2544 PA.preserveSet<CFGAnalyses>();
2545 return PA;
2546 }
2547 return PreservedAnalyses::all();
2548 }
2549