1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The code below implements dead store elimination using MemorySSA. It uses
10 // the following general approach: given a MemoryDef, walk upwards to find
11 // clobbering MemoryDefs that may be killed by the starting def. Then check
12 // that there are no uses that may read the location of the original MemoryDef
13 // in between both MemoryDefs. A bit more concretely:
14 //
15 // For all MemoryDefs StartDef:
16 // 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking
17 //    upwards.
18 // 2. Check that there are no reads between EarlierAccess and the StartDef by
19 //    checking all uses starting at EarlierAccess and walking until we see
20 //    StartDef.
21 // 3. For each found CurrentDef, check that:
22 //   1. There are no barrier instructions between CurrentDef and StartDef (like
23 //       throws or stores with ordering constraints).
24 //   2. StartDef is executed whenever CurrentDef is executed.
25 //   3. StartDef completely overwrites CurrentDef.
26 // 4. Erase CurrentDef from the function and MemorySSA.
27 //
28 //===----------------------------------------------------------------------===//
29 
30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/MapVector.h"
34 #include "llvm/ADT/PostOrderIterator.h"
35 #include "llvm/ADT/SetVector.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/ADT/StringRef.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/CaptureTracking.h"
42 #include "llvm/Analysis/GlobalsModRef.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/MemoryBuiltins.h"
45 #include "llvm/Analysis/MemoryLocation.h"
46 #include "llvm/Analysis/MemorySSA.h"
47 #include "llvm/Analysis/MemorySSAUpdater.h"
48 #include "llvm/Analysis/MustExecute.h"
49 #include "llvm/Analysis/PostDominators.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/Analysis/ValueTracking.h"
52 #include "llvm/IR/Argument.h"
53 #include "llvm/IR/BasicBlock.h"
54 #include "llvm/IR/Constant.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/InstIterator.h"
60 #include "llvm/IR/InstrTypes.h"
61 #include "llvm/IR/Instruction.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/LLVMContext.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/PassManager.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/InitializePasses.h"
71 #include "llvm/Pass.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/CommandLine.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/DebugCounter.h"
76 #include "llvm/Support/ErrorHandling.h"
77 #include "llvm/Support/MathExtras.h"
78 #include "llvm/Support/raw_ostream.h"
79 #include "llvm/Transforms/Scalar.h"
80 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
81 #include "llvm/Transforms/Utils/Local.h"
82 #include <algorithm>
83 #include <cassert>
84 #include <cstddef>
85 #include <cstdint>
86 #include <iterator>
87 #include <map>
88 #include <utility>
89 
90 using namespace llvm;
91 using namespace PatternMatch;
92 
93 #define DEBUG_TYPE "dse"
94 
95 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
96 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
97 STATISTIC(NumFastStores, "Number of stores deleted");
98 STATISTIC(NumFastOther, "Number of other instrs removed");
99 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
100 STATISTIC(NumModifiedStores, "Number of stores modified");
101 STATISTIC(NumCFGChecks, "Number of stores modified");
102 STATISTIC(NumCFGTries, "Number of stores modified");
103 STATISTIC(NumCFGSuccess, "Number of stores modified");
104 STATISTIC(NumGetDomMemoryDefPassed,
105           "Number of times a valid candidate is returned from getDomMemoryDef");
106 STATISTIC(NumDomMemDefChecks,
107           "Number iterations check for reads in getDomMemoryDef");
108 
109 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
110               "Controls which MemoryDefs are eliminated.");
111 
112 static cl::opt<bool>
113 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
114   cl::init(true), cl::Hidden,
115   cl::desc("Enable partial-overwrite tracking in DSE"));
116 
117 static cl::opt<bool>
118 EnablePartialStoreMerging("enable-dse-partial-store-merging",
119   cl::init(true), cl::Hidden,
120   cl::desc("Enable partial store merging in DSE"));
121 
122 static cl::opt<unsigned>
123     MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
124                        cl::desc("The number of memory instructions to scan for "
125                                 "dead store elimination (default = 100)"));
126 static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
127     "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
128     cl::desc("The maximum number of steps while walking upwards to find "
129              "MemoryDefs that may be killed (default = 90)"));
130 
131 static cl::opt<unsigned> MemorySSAPartialStoreLimit(
132     "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
133     cl::desc("The maximum number candidates that only partially overwrite the "
134              "killing MemoryDef to consider"
135              " (default = 5)"));
136 
137 static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
138     "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
139     cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
140              "other stores per basic block (default = 5000)"));
141 
142 static cl::opt<unsigned> MemorySSASameBBStepCost(
143     "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
144     cl::desc(
145         "The cost of a step in the same basic block as the killing MemoryDef"
146         "(default = 1)"));
147 
148 static cl::opt<unsigned>
149     MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
150                              cl::Hidden,
151                              cl::desc("The cost of a step in a different basic "
152                                       "block than the killing MemoryDef"
153                                       "(default = 5)"));
154 
155 static cl::opt<unsigned> MemorySSAPathCheckLimit(
156     "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
157     cl::desc("The maximum number of blocks to check when trying to prove that "
158              "all paths to an exit go through a killing block (default = 50)"));
159 
160 //===----------------------------------------------------------------------===//
161 // Helper functions
162 //===----------------------------------------------------------------------===//
163 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
164 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
165 
166 /// Does this instruction write some memory?  This only returns true for things
167 /// that we can analyze with other helpers below.
hasAnalyzableMemoryWrite(Instruction * I,const TargetLibraryInfo & TLI)168 static bool hasAnalyzableMemoryWrite(Instruction *I,
169                                      const TargetLibraryInfo &TLI) {
170   if (isa<StoreInst>(I))
171     return true;
172   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
173     switch (II->getIntrinsicID()) {
174     default:
175       return false;
176     case Intrinsic::memset:
177     case Intrinsic::memmove:
178     case Intrinsic::memcpy:
179     case Intrinsic::memcpy_inline:
180     case Intrinsic::memcpy_element_unordered_atomic:
181     case Intrinsic::memmove_element_unordered_atomic:
182     case Intrinsic::memset_element_unordered_atomic:
183     case Intrinsic::init_trampoline:
184     case Intrinsic::lifetime_end:
185     case Intrinsic::masked_store:
186       return true;
187     }
188   }
189   if (auto *CB = dyn_cast<CallBase>(I)) {
190     LibFunc LF;
191     if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
192       switch (LF) {
193       case LibFunc_strcpy:
194       case LibFunc_strncpy:
195       case LibFunc_strcat:
196       case LibFunc_strncat:
197         return true;
198       default:
199         return false;
200       }
201     }
202   }
203   return false;
204 }
205 
206 /// Return a Location stored to by the specified instruction. If isRemovable
207 /// returns true, this function and getLocForRead completely describe the memory
208 /// operations for this instruction.
getLocForWrite(Instruction * Inst,const TargetLibraryInfo & TLI)209 static MemoryLocation getLocForWrite(Instruction *Inst,
210                                      const TargetLibraryInfo &TLI) {
211   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
212     return MemoryLocation::get(SI);
213 
214   // memcpy/memmove/memset.
215   if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst))
216     return MemoryLocation::getForDest(MI);
217 
218   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
219     switch (II->getIntrinsicID()) {
220     default:
221       return MemoryLocation(); // Unhandled intrinsic.
222     case Intrinsic::init_trampoline:
223       return MemoryLocation::getAfter(II->getArgOperand(0));
224     case Intrinsic::masked_store:
225       return MemoryLocation::getForArgument(II, 1, TLI);
226     case Intrinsic::lifetime_end: {
227       uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
228       return MemoryLocation(II->getArgOperand(1), Len);
229     }
230     }
231   }
232   if (auto *CB = dyn_cast<CallBase>(Inst))
233     // All the supported TLI functions so far happen to have dest as their
234     // first argument.
235     return MemoryLocation::getAfter(CB->getArgOperand(0));
236   return MemoryLocation();
237 }
238 
239 /// If the value of this instruction and the memory it writes to is unused, may
240 /// we delete this instruction?
isRemovable(Instruction * I)241 static bool isRemovable(Instruction *I) {
242   // Don't remove volatile/atomic stores.
243   if (StoreInst *SI = dyn_cast<StoreInst>(I))
244     return SI->isUnordered();
245 
246   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
247     switch (II->getIntrinsicID()) {
248     default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
249     case Intrinsic::lifetime_end:
250       // Never remove dead lifetime_end's, e.g. because it is followed by a
251       // free.
252       return false;
253     case Intrinsic::init_trampoline:
254       // Always safe to remove init_trampoline.
255       return true;
256     case Intrinsic::memset:
257     case Intrinsic::memmove:
258     case Intrinsic::memcpy:
259     case Intrinsic::memcpy_inline:
260       // Don't remove volatile memory intrinsics.
261       return !cast<MemIntrinsic>(II)->isVolatile();
262     case Intrinsic::memcpy_element_unordered_atomic:
263     case Intrinsic::memmove_element_unordered_atomic:
264     case Intrinsic::memset_element_unordered_atomic:
265     case Intrinsic::masked_store:
266       return true;
267     }
268   }
269 
270   // note: only get here for calls with analyzable writes - i.e. libcalls
271   if (auto *CB = dyn_cast<CallBase>(I))
272     return CB->use_empty();
273 
274   return false;
275 }
276 
277 /// Returns true if the end of this instruction can be safely shortened in
278 /// length.
isShortenableAtTheEnd(Instruction * I)279 static bool isShortenableAtTheEnd(Instruction *I) {
280   // Don't shorten stores for now
281   if (isa<StoreInst>(I))
282     return false;
283 
284   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
285     switch (II->getIntrinsicID()) {
286       default: return false;
287       case Intrinsic::memset:
288       case Intrinsic::memcpy:
289       case Intrinsic::memcpy_element_unordered_atomic:
290       case Intrinsic::memset_element_unordered_atomic:
291         // Do shorten memory intrinsics.
292         // FIXME: Add memmove if it's also safe to transform.
293         return true;
294     }
295   }
296 
297   // Don't shorten libcalls calls for now.
298 
299   return false;
300 }
301 
302 /// Returns true if the beginning of this instruction can be safely shortened
303 /// in length.
isShortenableAtTheBeginning(Instruction * I)304 static bool isShortenableAtTheBeginning(Instruction *I) {
305   // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
306   // easily done by offsetting the source address.
307   return isa<AnyMemSetInst>(I);
308 }
309 
getPointerSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,const Function * F)310 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
311                                const TargetLibraryInfo &TLI,
312                                const Function *F) {
313   uint64_t Size;
314   ObjectSizeOpts Opts;
315   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
316 
317   if (getObjectSize(V, Size, DL, &TLI, Opts))
318     return Size;
319   return MemoryLocation::UnknownSize;
320 }
321 
322 namespace {
323 
324 enum OverwriteResult {
325   OW_Begin,
326   OW_Complete,
327   OW_End,
328   OW_PartialEarlierWithFullLater,
329   OW_MaybePartial,
330   OW_Unknown
331 };
332 
333 } // end anonymous namespace
334 
335 /// Check if two instruction are masked stores that completely
336 /// overwrite one another. More specifically, \p Later has to
337 /// overwrite \p Earlier.
isMaskedStoreOverwrite(const Instruction * Later,const Instruction * Earlier,BatchAAResults & AA)338 static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later,
339                                               const Instruction *Earlier,
340                                               BatchAAResults &AA) {
341   const auto *IIL = dyn_cast<IntrinsicInst>(Later);
342   const auto *IIE = dyn_cast<IntrinsicInst>(Earlier);
343   if (IIL == nullptr || IIE == nullptr)
344     return OW_Unknown;
345   if (IIL->getIntrinsicID() != Intrinsic::masked_store ||
346       IIE->getIntrinsicID() != Intrinsic::masked_store)
347     return OW_Unknown;
348   // Pointers.
349   Value *LP = IIL->getArgOperand(1)->stripPointerCasts();
350   Value *EP = IIE->getArgOperand(1)->stripPointerCasts();
351   if (LP != EP && !AA.isMustAlias(LP, EP))
352     return OW_Unknown;
353   // Masks.
354   // TODO: check that Later's mask is a superset of the Earlier's mask.
355   if (IIL->getArgOperand(3) != IIE->getArgOperand(3))
356     return OW_Unknown;
357   return OW_Complete;
358 }
359 
360 /// Return 'OW_Complete' if a store to the 'Later' location completely
361 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
362 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
363 /// beginning of the 'Earlier' location is overwritten by 'Later'.
364 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
365 /// overwritten by a latter (smaller) store which doesn't write outside the big
366 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
367 /// NOTE: This function must only be called if both \p Later and \p Earlier
368 /// write to the same underlying object with valid \p EarlierOff and \p
369 /// LaterOff.
isPartialOverwrite(const MemoryLocation & Later,const MemoryLocation & Earlier,int64_t EarlierOff,int64_t LaterOff,Instruction * DepWrite,InstOverlapIntervalsTy & IOL)370 static OverwriteResult isPartialOverwrite(const MemoryLocation &Later,
371                                           const MemoryLocation &Earlier,
372                                           int64_t EarlierOff, int64_t LaterOff,
373                                           Instruction *DepWrite,
374                                           InstOverlapIntervalsTy &IOL) {
375   const uint64_t LaterSize = Later.Size.getValue();
376   const uint64_t EarlierSize = Earlier.Size.getValue();
377   // We may now overlap, although the overlap is not complete. There might also
378   // be other incomplete overlaps, and together, they might cover the complete
379   // earlier write.
380   // Note: The correctness of this logic depends on the fact that this function
381   // is not even called providing DepWrite when there are any intervening reads.
382   if (EnablePartialOverwriteTracking &&
383       LaterOff < int64_t(EarlierOff + EarlierSize) &&
384       int64_t(LaterOff + LaterSize) >= EarlierOff) {
385 
386     // Insert our part of the overlap into the map.
387     auto &IM = IOL[DepWrite];
388     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
389                       << ", " << int64_t(EarlierOff + EarlierSize)
390                       << ") Later [" << LaterOff << ", "
391                       << int64_t(LaterOff + LaterSize) << ")\n");
392 
393     // Make sure that we only insert non-overlapping intervals and combine
394     // adjacent intervals. The intervals are stored in the map with the ending
395     // offset as the key (in the half-open sense) and the starting offset as
396     // the value.
397     int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
398 
399     // Find any intervals ending at, or after, LaterIntStart which start
400     // before LaterIntEnd.
401     auto ILI = IM.lower_bound(LaterIntStart);
402     if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
403       // This existing interval is overlapped with the current store somewhere
404       // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
405       // intervals and adjusting our start and end.
406       LaterIntStart = std::min(LaterIntStart, ILI->second);
407       LaterIntEnd = std::max(LaterIntEnd, ILI->first);
408       ILI = IM.erase(ILI);
409 
410       // Continue erasing and adjusting our end in case other previous
411       // intervals are also overlapped with the current store.
412       //
413       // |--- ealier 1 ---|  |--- ealier 2 ---|
414       //     |------- later---------|
415       //
416       while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
417         assert(ILI->second > LaterIntStart && "Unexpected interval");
418         LaterIntEnd = std::max(LaterIntEnd, ILI->first);
419         ILI = IM.erase(ILI);
420       }
421     }
422 
423     IM[LaterIntEnd] = LaterIntStart;
424 
425     ILI = IM.begin();
426     if (ILI->second <= EarlierOff &&
427         ILI->first >= int64_t(EarlierOff + EarlierSize)) {
428       LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
429                         << EarlierOff << ", "
430                         << int64_t(EarlierOff + EarlierSize)
431                         << ") Composite Later [" << ILI->second << ", "
432                         << ILI->first << ")\n");
433       ++NumCompletePartials;
434       return OW_Complete;
435     }
436   }
437 
438   // Check for an earlier store which writes to all the memory locations that
439   // the later store writes to.
440   if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
441       int64_t(EarlierOff + EarlierSize) > LaterOff &&
442       uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
443     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
444                       << EarlierOff << ", "
445                       << int64_t(EarlierOff + EarlierSize)
446                       << ") by a later store [" << LaterOff << ", "
447                       << int64_t(LaterOff + LaterSize) << ")\n");
448     // TODO: Maybe come up with a better name?
449     return OW_PartialEarlierWithFullLater;
450   }
451 
452   // Another interesting case is if the later store overwrites the end of the
453   // earlier store.
454   //
455   //      |--earlier--|
456   //                |--   later   --|
457   //
458   // In this case we may want to trim the size of earlier to avoid generating
459   // writes to addresses which will definitely be overwritten later
460   if (!EnablePartialOverwriteTracking &&
461       (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
462        int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
463     return OW_End;
464 
465   // Finally, we also need to check if the later store overwrites the beginning
466   // of the earlier store.
467   //
468   //                |--earlier--|
469   //      |--   later   --|
470   //
471   // In this case we may want to move the destination address and trim the size
472   // of earlier to avoid generating writes to addresses which will definitely
473   // be overwritten later.
474   if (!EnablePartialOverwriteTracking &&
475       (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
476     assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
477            "Expect to be handled as OW_Complete");
478     return OW_Begin;
479   }
480   // Otherwise, they don't completely overlap.
481   return OW_Unknown;
482 }
483 
484 /// Returns true if the memory which is accessed by the second instruction is not
485 /// modified between the first and the second instruction.
486 /// Precondition: Second instruction must be dominated by the first
487 /// instruction.
488 static bool
memoryIsNotModifiedBetween(Instruction * FirstI,Instruction * SecondI,BatchAAResults & AA,const DataLayout & DL,DominatorTree * DT)489 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI,
490                            BatchAAResults &AA, const DataLayout &DL,
491                            DominatorTree *DT) {
492   // Do a backwards scan through the CFG from SecondI to FirstI. Look for
493   // instructions which can modify the memory location accessed by SecondI.
494   //
495   // While doing the walk keep track of the address to check. It might be
496   // different in different basic blocks due to PHI translation.
497   using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
498   SmallVector<BlockAddressPair, 16> WorkList;
499   // Keep track of the address we visited each block with. Bail out if we
500   // visit a block with different addresses.
501   DenseMap<BasicBlock *, Value *> Visited;
502 
503   BasicBlock::iterator FirstBBI(FirstI);
504   ++FirstBBI;
505   BasicBlock::iterator SecondBBI(SecondI);
506   BasicBlock *FirstBB = FirstI->getParent();
507   BasicBlock *SecondBB = SecondI->getParent();
508   MemoryLocation MemLoc = MemoryLocation::get(SecondI);
509   auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
510 
511   // Start checking the SecondBB.
512   WorkList.push_back(
513       std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
514   bool isFirstBlock = true;
515 
516   // Check all blocks going backward until we reach the FirstBB.
517   while (!WorkList.empty()) {
518     BlockAddressPair Current = WorkList.pop_back_val();
519     BasicBlock *B = Current.first;
520     PHITransAddr &Addr = Current.second;
521     Value *Ptr = Addr.getAddr();
522 
523     // Ignore instructions before FirstI if this is the FirstBB.
524     BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
525 
526     BasicBlock::iterator EI;
527     if (isFirstBlock) {
528       // Ignore instructions after SecondI if this is the first visit of SecondBB.
529       assert(B == SecondBB && "first block is not the store block");
530       EI = SecondBBI;
531       isFirstBlock = false;
532     } else {
533       // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
534       // In this case we also have to look at instructions after SecondI.
535       EI = B->end();
536     }
537     for (; BI != EI; ++BI) {
538       Instruction *I = &*BI;
539       if (I->mayWriteToMemory() && I != SecondI)
540         if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
541           return false;
542     }
543     if (B != FirstBB) {
544       assert(B != &FirstBB->getParent()->getEntryBlock() &&
545           "Should not hit the entry block because SI must be dominated by LI");
546       for (BasicBlock *Pred : predecessors(B)) {
547         PHITransAddr PredAddr = Addr;
548         if (PredAddr.NeedsPHITranslationFromBlock(B)) {
549           if (!PredAddr.IsPotentiallyPHITranslatable())
550             return false;
551           if (PredAddr.PHITranslateValue(B, Pred, DT, false))
552             return false;
553         }
554         Value *TranslatedPtr = PredAddr.getAddr();
555         auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
556         if (!Inserted.second) {
557           // We already visited this block before. If it was with a different
558           // address - bail out!
559           if (TranslatedPtr != Inserted.first->second)
560             return false;
561           // ... otherwise just skip it.
562           continue;
563         }
564         WorkList.push_back(std::make_pair(Pred, PredAddr));
565       }
566     }
567   }
568   return true;
569 }
570 
tryToShorten(Instruction * EarlierWrite,int64_t & EarlierStart,uint64_t & EarlierSize,int64_t LaterStart,uint64_t LaterSize,bool IsOverwriteEnd)571 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart,
572                          uint64_t &EarlierSize, int64_t LaterStart,
573                          uint64_t LaterSize, bool IsOverwriteEnd) {
574   auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
575   Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne();
576 
577   // We assume that memet/memcpy operates in chunks of the "largest" native
578   // type size and aligned on the same value. That means optimal start and size
579   // of memset/memcpy should be modulo of preferred alignment of that type. That
580   // is it there is no any sense in trying to reduce store size any further
581   // since any "extra" stores comes for free anyway.
582   // On the other hand, maximum alignment we can achieve is limited by alignment
583   // of initial store.
584 
585   // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
586   // "largest" native type.
587   // Note: What is the proper way to get that value?
588   // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
589   // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
590 
591   int64_t ToRemoveStart = 0;
592   uint64_t ToRemoveSize = 0;
593   // Compute start and size of the region to remove. Make sure 'PrefAlign' is
594   // maintained on the remaining store.
595   if (IsOverwriteEnd) {
596     // Calculate required adjustment for 'LaterStart'in order to keep remaining
597     // store size aligned on 'PerfAlign'.
598     uint64_t Off =
599         offsetToAlignment(uint64_t(LaterStart - EarlierStart), PrefAlign);
600     ToRemoveStart = LaterStart + Off;
601     if (EarlierSize <= uint64_t(ToRemoveStart - EarlierStart))
602       return false;
603     ToRemoveSize = EarlierSize - uint64_t(ToRemoveStart - EarlierStart);
604   } else {
605     ToRemoveStart = EarlierStart;
606     assert(LaterSize >= uint64_t(EarlierStart - LaterStart) &&
607            "Not overlapping accesses?");
608     ToRemoveSize = LaterSize - uint64_t(EarlierStart - LaterStart);
609     // Calculate required adjustment for 'ToRemoveSize'in order to keep
610     // start of the remaining store aligned on 'PerfAlign'.
611     uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
612     if (Off != 0) {
613       if (ToRemoveSize <= (PrefAlign.value() - Off))
614         return false;
615       ToRemoveSize -= PrefAlign.value() - Off;
616     }
617     assert(isAligned(PrefAlign, ToRemoveSize) &&
618            "Should preserve selected alignment");
619   }
620 
621   assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove");
622   assert(EarlierSize > ToRemoveSize && "Can't remove more than original size");
623 
624   uint64_t NewSize = EarlierSize - ToRemoveSize;
625   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
626     // When shortening an atomic memory intrinsic, the newly shortened
627     // length must remain an integer multiple of the element size.
628     const uint32_t ElementSize = AMI->getElementSizeInBytes();
629     if (0 != NewSize % ElementSize)
630       return false;
631   }
632 
633   LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
634                     << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
635                     << *EarlierWrite << "\n  KILLER [" << ToRemoveStart << ", "
636                     << int64_t(ToRemoveStart + ToRemoveSize) << ")\n");
637 
638   Value *EarlierWriteLength = EarlierIntrinsic->getLength();
639   Value *TrimmedLength =
640       ConstantInt::get(EarlierWriteLength->getType(), NewSize);
641   EarlierIntrinsic->setLength(TrimmedLength);
642   EarlierIntrinsic->setDestAlignment(PrefAlign);
643 
644   if (!IsOverwriteEnd) {
645     Value *OrigDest = EarlierIntrinsic->getRawDest();
646     Type *Int8PtrTy =
647         Type::getInt8PtrTy(EarlierIntrinsic->getContext(),
648                            OrigDest->getType()->getPointerAddressSpace());
649     Value *Dest = OrigDest;
650     if (OrigDest->getType() != Int8PtrTy)
651       Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", EarlierWrite);
652     Value *Indices[1] = {
653         ConstantInt::get(EarlierWriteLength->getType(), ToRemoveSize)};
654     Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds(
655         Type::getInt8Ty(EarlierIntrinsic->getContext()),
656         Dest, Indices, "", EarlierWrite);
657     NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
658     if (NewDestGEP->getType() != OrigDest->getType())
659       NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(),
660                                                "", EarlierWrite);
661     EarlierIntrinsic->setDest(NewDestGEP);
662   }
663 
664   // Finally update start and size of earlier access.
665   if (!IsOverwriteEnd)
666     EarlierStart += ToRemoveSize;
667   EarlierSize = NewSize;
668 
669   return true;
670 }
671 
tryToShortenEnd(Instruction * EarlierWrite,OverlapIntervalsTy & IntervalMap,int64_t & EarlierStart,uint64_t & EarlierSize)672 static bool tryToShortenEnd(Instruction *EarlierWrite,
673                             OverlapIntervalsTy &IntervalMap,
674                             int64_t &EarlierStart, uint64_t &EarlierSize) {
675   if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
676     return false;
677 
678   OverlapIntervalsTy::iterator OII = --IntervalMap.end();
679   int64_t LaterStart = OII->second;
680   uint64_t LaterSize = OII->first - LaterStart;
681 
682   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
683 
684   if (LaterStart > EarlierStart &&
685       // Note: "LaterStart - EarlierStart" is known to be positive due to
686       // preceding check.
687       (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
688       // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
689       // be non negative due to preceding checks.
690       LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
691     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
692                      LaterSize, true)) {
693       IntervalMap.erase(OII);
694       return true;
695     }
696   }
697   return false;
698 }
699 
tryToShortenBegin(Instruction * EarlierWrite,OverlapIntervalsTy & IntervalMap,int64_t & EarlierStart,uint64_t & EarlierSize)700 static bool tryToShortenBegin(Instruction *EarlierWrite,
701                               OverlapIntervalsTy &IntervalMap,
702                               int64_t &EarlierStart, uint64_t &EarlierSize) {
703   if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
704     return false;
705 
706   OverlapIntervalsTy::iterator OII = IntervalMap.begin();
707   int64_t LaterStart = OII->second;
708   uint64_t LaterSize = OII->first - LaterStart;
709 
710   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
711 
712   if (LaterStart <= EarlierStart &&
713       // Note: "EarlierStart - LaterStart" is known to be non negative due to
714       // preceding check.
715       LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
716     // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
717     // positive due to preceding checks.
718     assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&
719            "Should have been handled as OW_Complete");
720     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
721                      LaterSize, false)) {
722       IntervalMap.erase(OII);
723       return true;
724     }
725   }
726   return false;
727 }
728 
removePartiallyOverlappedStores(const DataLayout & DL,InstOverlapIntervalsTy & IOL,const TargetLibraryInfo & TLI)729 static bool removePartiallyOverlappedStores(const DataLayout &DL,
730                                             InstOverlapIntervalsTy &IOL,
731                                             const TargetLibraryInfo &TLI) {
732   bool Changed = false;
733   for (auto OI : IOL) {
734     Instruction *EarlierWrite = OI.first;
735     MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI);
736     assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
737 
738     const Value *Ptr = Loc.Ptr->stripPointerCasts();
739     int64_t EarlierStart = 0;
740     uint64_t EarlierSize = Loc.Size.getValue();
741     GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
742     OverlapIntervalsTy &IntervalMap = OI.second;
743     Changed |=
744         tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
745     if (IntervalMap.empty())
746       continue;
747     Changed |=
748         tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
749   }
750   return Changed;
751 }
752 
tryToMergePartialOverlappingStores(StoreInst * Earlier,StoreInst * Later,int64_t InstWriteOffset,int64_t DepWriteOffset,const DataLayout & DL,BatchAAResults & AA,DominatorTree * DT)753 static Constant *tryToMergePartialOverlappingStores(
754     StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset,
755     int64_t DepWriteOffset, const DataLayout &DL, BatchAAResults &AA,
756     DominatorTree *DT) {
757 
758   if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
759       DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
760       Later && isa<ConstantInt>(Later->getValueOperand()) &&
761       DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
762       memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
763     // If the store we find is:
764     //   a) partially overwritten by the store to 'Loc'
765     //   b) the later store is fully contained in the earlier one and
766     //   c) they both have a constant value
767     //   d) none of the two stores need padding
768     // Merge the two stores, replacing the earlier store's value with a
769     // merge of both values.
770     // TODO: Deal with other constant types (vectors, etc), and probably
771     // some mem intrinsics (if needed)
772 
773     APInt EarlierValue =
774         cast<ConstantInt>(Earlier->getValueOperand())->getValue();
775     APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
776     unsigned LaterBits = LaterValue.getBitWidth();
777     assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
778     LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
779 
780     // Offset of the smaller store inside the larger store
781     unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
782     unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
783                                                    BitOffsetDiff - LaterBits
784                                              : BitOffsetDiff;
785     APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
786                                    LShiftAmount + LaterBits);
787     // Clear the bits we'll be replacing, then OR with the smaller
788     // store, shifted appropriately.
789     APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
790     LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n  Earlier: " << *Earlier
791                       << "\n  Later: " << *Later
792                       << "\n  Merged Value: " << Merged << '\n');
793     return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
794   }
795   return nullptr;
796 }
797 
798 namespace {
799 // Returns true if \p I is an intrisnic that does not read or write memory.
isNoopIntrinsic(Instruction * I)800 bool isNoopIntrinsic(Instruction *I) {
801   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
802     switch (II->getIntrinsicID()) {
803     case Intrinsic::lifetime_start:
804     case Intrinsic::lifetime_end:
805     case Intrinsic::invariant_end:
806     case Intrinsic::launder_invariant_group:
807     case Intrinsic::assume:
808       return true;
809     case Intrinsic::dbg_addr:
810     case Intrinsic::dbg_declare:
811     case Intrinsic::dbg_label:
812     case Intrinsic::dbg_value:
813       llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
814     default:
815       return false;
816     }
817   }
818   return false;
819 }
820 
821 // Check if we can ignore \p D for DSE.
canSkipDef(MemoryDef * D,bool DefVisibleToCaller)822 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
823   Instruction *DI = D->getMemoryInst();
824   // Calls that only access inaccessible memory cannot read or write any memory
825   // locations we consider for elimination.
826   if (auto *CB = dyn_cast<CallBase>(DI))
827     if (CB->onlyAccessesInaccessibleMemory())
828       return true;
829 
830   // We can eliminate stores to locations not visible to the caller across
831   // throwing instructions.
832   if (DI->mayThrow() && !DefVisibleToCaller)
833     return true;
834 
835   // We can remove the dead stores, irrespective of the fence and its ordering
836   // (release/acquire/seq_cst). Fences only constraints the ordering of
837   // already visible stores, it does not make a store visible to other
838   // threads. So, skipping over a fence does not change a store from being
839   // dead.
840   if (isa<FenceInst>(DI))
841     return true;
842 
843   // Skip intrinsics that do not really read or modify memory.
844   if (isNoopIntrinsic(D->getMemoryInst()))
845     return true;
846 
847   return false;
848 }
849 
850 struct DSEState {
851   Function &F;
852   AliasAnalysis &AA;
853 
854   /// The single BatchAA instance that is used to cache AA queries. It will
855   /// not be invalidated over the whole run. This is safe, because:
856   /// 1. Only memory writes are removed, so the alias cache for memory
857   ///    locations remains valid.
858   /// 2. No new instructions are added (only instructions removed), so cached
859   ///    information for a deleted value cannot be accessed by a re-used new
860   ///    value pointer.
861   BatchAAResults BatchAA;
862 
863   MemorySSA &MSSA;
864   DominatorTree &DT;
865   PostDominatorTree &PDT;
866   const TargetLibraryInfo &TLI;
867   const DataLayout &DL;
868   const LoopInfo &LI;
869 
870   // Whether the function contains any irreducible control flow, useful for
871   // being accurately able to detect loops.
872   bool ContainsIrreducibleLoops;
873 
874   // All MemoryDefs that potentially could kill other MemDefs.
875   SmallVector<MemoryDef *, 64> MemDefs;
876   // Any that should be skipped as they are already deleted
877   SmallPtrSet<MemoryAccess *, 4> SkipStores;
878   // Keep track of all of the objects that are invisible to the caller before
879   // the function returns.
880   // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
881   DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
882   // Keep track of all of the objects that are invisible to the caller after
883   // the function returns.
884   DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
885   // Keep track of blocks with throwing instructions not modeled in MemorySSA.
886   SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
887   // Post-order numbers for each basic block. Used to figure out if memory
888   // accesses are executed before another access.
889   DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
890 
891   /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
892   /// basic block.
893   DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
894 
DSEState__anone150d9c10211::DSEState895   DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
896            PostDominatorTree &PDT, const TargetLibraryInfo &TLI,
897            const LoopInfo &LI)
898       : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI),
899         DL(F.getParent()->getDataLayout()), LI(LI) {}
900 
get__anone150d9c10211::DSEState901   static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
902                       DominatorTree &DT, PostDominatorTree &PDT,
903                       const TargetLibraryInfo &TLI, const LoopInfo &LI) {
904     DSEState State(F, AA, MSSA, DT, PDT, TLI, LI);
905     // Collect blocks with throwing instructions not modeled in MemorySSA and
906     // alloc-like objects.
907     unsigned PO = 0;
908     for (BasicBlock *BB : post_order(&F)) {
909       State.PostOrderNumbers[BB] = PO++;
910       for (Instruction &I : *BB) {
911         MemoryAccess *MA = MSSA.getMemoryAccess(&I);
912         if (I.mayThrow() && !MA)
913           State.ThrowingBlocks.insert(I.getParent());
914 
915         auto *MD = dyn_cast_or_null<MemoryDef>(MA);
916         if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
917             (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
918           State.MemDefs.push_back(MD);
919       }
920     }
921 
922     // Treat byval or inalloca arguments the same as Allocas, stores to them are
923     // dead at the end of the function.
924     for (Argument &AI : F.args())
925       if (AI.hasPassPointeeByValueCopyAttr()) {
926         // For byval, the caller doesn't know the address of the allocation.
927         if (AI.hasByValAttr())
928           State.InvisibleToCallerBeforeRet.insert({&AI, true});
929         State.InvisibleToCallerAfterRet.insert({&AI, true});
930       }
931 
932     // Collect whether there is any irreducible control flow in the function.
933     State.ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI);
934 
935     return State;
936   }
937 
938   /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI
939   /// instruction) completely overwrites a store to the 'Earlier' location.
940   /// (by \p EarlierI instruction).
941   /// Return OW_MaybePartial if \p Later does not completely overwrite
942   /// \p Earlier, but they both write to the same underlying object. In that
943   /// case, use isPartialOverwrite to check if \p Later partially overwrites
944   /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined.
945   OverwriteResult
isOverwrite__anone150d9c10211::DSEState946   isOverwrite(const Instruction *LaterI, const Instruction *EarlierI,
947               const MemoryLocation &Later, const MemoryLocation &Earlier,
948               int64_t &EarlierOff, int64_t &LaterOff) {
949     // AliasAnalysis does not always account for loops. Limit overwrite checks
950     // to dependencies for which we can guarantee they are independant of any
951     // loops they are in.
952     if (!isGuaranteedLoopIndependent(EarlierI, LaterI, Earlier))
953       return OW_Unknown;
954 
955     // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
956     // get imprecise values here, though (except for unknown sizes).
957     if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) {
958       // In case no constant size is known, try to an IR values for the number
959       // of bytes written and check if they match.
960       const auto *LaterMemI = dyn_cast<MemIntrinsic>(LaterI);
961       const auto *EarlierMemI = dyn_cast<MemIntrinsic>(EarlierI);
962       if (LaterMemI && EarlierMemI) {
963         const Value *LaterV = LaterMemI->getLength();
964         const Value *EarlierV = EarlierMemI->getLength();
965         if (LaterV == EarlierV && BatchAA.isMustAlias(Earlier, Later))
966           return OW_Complete;
967       }
968 
969       // Masked stores have imprecise locations, but we can reason about them
970       // to some extent.
971       return isMaskedStoreOverwrite(LaterI, EarlierI, BatchAA);
972     }
973 
974     const uint64_t LaterSize = Later.Size.getValue();
975     const uint64_t EarlierSize = Earlier.Size.getValue();
976 
977     // Query the alias information
978     AliasResult AAR = BatchAA.alias(Later, Earlier);
979 
980     // If the start pointers are the same, we just have to compare sizes to see if
981     // the later store was larger than the earlier store.
982     if (AAR == AliasResult::MustAlias) {
983       // Make sure that the Later size is >= the Earlier size.
984       if (LaterSize >= EarlierSize)
985         return OW_Complete;
986     }
987 
988     // If we hit a partial alias we may have a full overwrite
989     if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
990       int32_t Off = AAR.getOffset();
991       if (Off >= 0 && (uint64_t)Off + EarlierSize <= LaterSize)
992         return OW_Complete;
993     }
994 
995     // Check to see if the later store is to the entire object (either a global,
996     // an alloca, or a byval/inalloca argument).  If so, then it clearly
997     // overwrites any other store to the same object.
998     const Value *P1 = Earlier.Ptr->stripPointerCasts();
999     const Value *P2 = Later.Ptr->stripPointerCasts();
1000     const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2);
1001 
1002     // If we can't resolve the same pointers to the same object, then we can't
1003     // analyze them at all.
1004     if (UO1 != UO2)
1005       return OW_Unknown;
1006 
1007     // If the "Later" store is to a recognizable object, get its size.
1008     uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, &F);
1009     if (ObjectSize != MemoryLocation::UnknownSize)
1010       if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
1011         return OW_Complete;
1012 
1013     // Okay, we have stores to two completely different pointers.  Try to
1014     // decompose the pointer into a "base + constant_offset" form.  If the base
1015     // pointers are equal, then we can reason about the two stores.
1016     EarlierOff = 0;
1017     LaterOff = 0;
1018     const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
1019     const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
1020 
1021     // If the base pointers still differ, we have two completely different stores.
1022     if (BP1 != BP2)
1023       return OW_Unknown;
1024 
1025     // The later access completely overlaps the earlier store if and only if
1026     // both start and end of the earlier one is "inside" the later one:
1027     //    |<->|--earlier--|<->|
1028     //    |-------later-------|
1029     // Accesses may overlap if and only if start of one of them is "inside"
1030     // another one:
1031     //    |<->|--earlier--|<----->|
1032     //    |-------later-------|
1033     //           OR
1034     //    |----- earlier -----|
1035     //    |<->|---later---|<----->|
1036     //
1037     // We have to be careful here as *Off is signed while *.Size is unsigned.
1038 
1039     // Check if the earlier access starts "not before" the later one.
1040     if (EarlierOff >= LaterOff) {
1041       // If the earlier access ends "not after" the later access then the earlier
1042       // one is completely overwritten by the later one.
1043       if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
1044         return OW_Complete;
1045       // If start of the earlier access is "before" end of the later access then
1046       // accesses overlap.
1047       else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize)
1048         return OW_MaybePartial;
1049     }
1050     // If start of the later access is "before" end of the earlier access then
1051     // accesses overlap.
1052     else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) {
1053       return OW_MaybePartial;
1054     }
1055 
1056     // Can reach here only if accesses are known not to overlap. There is no
1057     // dedicated code to indicate no overlap so signal "unknown".
1058     return OW_Unknown;
1059   }
1060 
isInvisibleToCallerAfterRet__anone150d9c10211::DSEState1061   bool isInvisibleToCallerAfterRet(const Value *V) {
1062     if (isa<AllocaInst>(V))
1063       return true;
1064     auto I = InvisibleToCallerAfterRet.insert({V, false});
1065     if (I.second) {
1066       if (!isInvisibleToCallerBeforeRet(V)) {
1067         I.first->second = false;
1068       } else {
1069         auto *Inst = dyn_cast<Instruction>(V);
1070         if (Inst && isAllocLikeFn(Inst, &TLI))
1071           I.first->second = !PointerMayBeCaptured(V, true, false);
1072       }
1073     }
1074     return I.first->second;
1075   }
1076 
isInvisibleToCallerBeforeRet__anone150d9c10211::DSEState1077   bool isInvisibleToCallerBeforeRet(const Value *V) {
1078     if (isa<AllocaInst>(V))
1079       return true;
1080     auto I = InvisibleToCallerBeforeRet.insert({V, false});
1081     if (I.second) {
1082       auto *Inst = dyn_cast<Instruction>(V);
1083       if (Inst && isAllocLikeFn(Inst, &TLI))
1084         // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1085         // with the killing MemoryDef. But we refrain from doing so for now to
1086         // limit compile-time and this does not cause any changes to the number
1087         // of stores removed on a large test set in practice.
1088         I.first->second = !PointerMayBeCaptured(V, false, true);
1089     }
1090     return I.first->second;
1091   }
1092 
getLocForWriteEx__anone150d9c10211::DSEState1093   Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1094     if (!I->mayWriteToMemory())
1095       return None;
1096 
1097     if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1098       return {MemoryLocation::getForDest(MTI)};
1099 
1100     if (auto *CB = dyn_cast<CallBase>(I)) {
1101       // If the functions may write to memory we do not know about, bail out.
1102       if (!CB->onlyAccessesArgMemory() &&
1103           !CB->onlyAccessesInaccessibleMemOrArgMem())
1104         return None;
1105 
1106       LibFunc LF;
1107       if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1108         switch (LF) {
1109         case LibFunc_strcpy:
1110         case LibFunc_strncpy:
1111         case LibFunc_strcat:
1112         case LibFunc_strncat:
1113           return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1114         default:
1115           break;
1116         }
1117       }
1118       switch (CB->getIntrinsicID()) {
1119       case Intrinsic::init_trampoline:
1120         return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1121       case Intrinsic::masked_store:
1122         return {MemoryLocation::getForArgument(CB, 1, TLI)};
1123       default:
1124         break;
1125       }
1126       return None;
1127     }
1128 
1129     return MemoryLocation::getOrNone(I);
1130   }
1131 
1132   /// Returns true if \p UseInst completely overwrites \p DefLoc
1133   /// (stored by \p DefInst).
isCompleteOverwrite__anone150d9c10211::DSEState1134   bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1135                            Instruction *UseInst) {
1136     // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1137     // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1138     // MemoryDef.
1139     if (!UseInst->mayWriteToMemory())
1140       return false;
1141 
1142     if (auto *CB = dyn_cast<CallBase>(UseInst))
1143       if (CB->onlyAccessesInaccessibleMemory())
1144         return false;
1145 
1146     int64_t InstWriteOffset, DepWriteOffset;
1147     if (auto CC = getLocForWriteEx(UseInst))
1148       return isOverwrite(UseInst, DefInst, *CC, DefLoc, DepWriteOffset,
1149                          InstWriteOffset) == OW_Complete;
1150     return false;
1151   }
1152 
1153   /// Returns true if \p Def is not read before returning from the function.
isWriteAtEndOfFunction__anone150d9c10211::DSEState1154   bool isWriteAtEndOfFunction(MemoryDef *Def) {
1155     LLVM_DEBUG(dbgs() << "  Check if def " << *Def << " ("
1156                       << *Def->getMemoryInst()
1157                       << ") is at the end the function \n");
1158 
1159     auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1160     if (!MaybeLoc) {
1161       LLVM_DEBUG(dbgs() << "  ... could not get location for write.\n");
1162       return false;
1163     }
1164 
1165     SmallVector<MemoryAccess *, 4> WorkList;
1166     SmallPtrSet<MemoryAccess *, 8> Visited;
1167     auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1168       if (!Visited.insert(Acc).second)
1169         return;
1170       for (Use &U : Acc->uses())
1171         WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1172     };
1173     PushMemUses(Def);
1174     for (unsigned I = 0; I < WorkList.size(); I++) {
1175       if (WorkList.size() >= MemorySSAScanLimit) {
1176         LLVM_DEBUG(dbgs() << "  ... hit exploration limit.\n");
1177         return false;
1178       }
1179 
1180       MemoryAccess *UseAccess = WorkList[I];
1181       // Simply adding the users of MemoryPhi to the worklist is not enough,
1182       // because we might miss read clobbers in different iterations of a loop,
1183       // for example.
1184       // TODO: Add support for phi translation to handle the loop case.
1185       if (isa<MemoryPhi>(UseAccess))
1186         return false;
1187 
1188       // TODO: Checking for aliasing is expensive. Consider reducing the amount
1189       // of times this is called and/or caching it.
1190       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1191       if (isReadClobber(*MaybeLoc, UseInst)) {
1192         LLVM_DEBUG(dbgs() << "  ... hit read clobber " << *UseInst << ".\n");
1193         return false;
1194       }
1195 
1196       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1197         PushMemUses(UseDef);
1198     }
1199     return true;
1200   }
1201 
1202   /// If \p I is a memory  terminator like llvm.lifetime.end or free, return a
1203   /// pair with the MemoryLocation terminated by \p I and a boolean flag
1204   /// indicating whether \p I is a free-like call.
1205   Optional<std::pair<MemoryLocation, bool>>
getLocForTerminator__anone150d9c10211::DSEState1206   getLocForTerminator(Instruction *I) const {
1207     uint64_t Len;
1208     Value *Ptr;
1209     if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1210                                                       m_Value(Ptr))))
1211       return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1212 
1213     if (auto *CB = dyn_cast<CallBase>(I)) {
1214       if (isFreeCall(I, &TLI))
1215         return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1216                                true)};
1217     }
1218 
1219     return None;
1220   }
1221 
1222   /// Returns true if \p I is a memory terminator instruction like
1223   /// llvm.lifetime.end or free.
isMemTerminatorInst__anone150d9c10211::DSEState1224   bool isMemTerminatorInst(Instruction *I) const {
1225     IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1226     return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1227            isFreeCall(I, &TLI);
1228   }
1229 
1230   /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1231   /// instruction \p AccessI.
isMemTerminator__anone150d9c10211::DSEState1232   bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1233                        Instruction *MaybeTerm) {
1234     Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1235         getLocForTerminator(MaybeTerm);
1236 
1237     if (!MaybeTermLoc)
1238       return false;
1239 
1240     // If the terminator is a free-like call, all accesses to the underlying
1241     // object can be considered terminated.
1242     if (getUnderlyingObject(Loc.Ptr) !=
1243         getUnderlyingObject(MaybeTermLoc->first.Ptr))
1244       return false;
1245 
1246     auto TermLoc = MaybeTermLoc->first;
1247     if (MaybeTermLoc->second) {
1248       const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1249       return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1250     }
1251     int64_t InstWriteOffset, DepWriteOffset;
1252     return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DepWriteOffset,
1253                        InstWriteOffset) == OW_Complete;
1254   }
1255 
1256   // Returns true if \p Use may read from \p DefLoc.
isReadClobber__anone150d9c10211::DSEState1257   bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1258     if (isNoopIntrinsic(UseInst))
1259       return false;
1260 
1261     // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1262     // treated as read clobber.
1263     if (auto SI = dyn_cast<StoreInst>(UseInst))
1264       return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1265 
1266     if (!UseInst->mayReadFromMemory())
1267       return false;
1268 
1269     if (auto *CB = dyn_cast<CallBase>(UseInst))
1270       if (CB->onlyAccessesInaccessibleMemory())
1271         return false;
1272 
1273     // NOTE: For calls, the number of stores removed could be slightly improved
1274     // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
1275     // be expensive compared to the benefits in practice. For now, avoid more
1276     // expensive analysis to limit compile-time.
1277     return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1278   }
1279 
1280   /// Returns true if a dependency between \p Current and \p KillingDef is
1281   /// guaranteed to be loop invariant for the loops that they are in. Either
1282   /// because they are known to be in the same block, in the same loop level or
1283   /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation
1284   /// during execution of the containing function.
isGuaranteedLoopIndependent__anone150d9c10211::DSEState1285   bool isGuaranteedLoopIndependent(const Instruction *Current,
1286                                    const Instruction *KillingDef,
1287                                    const MemoryLocation &CurrentLoc) {
1288     // If the dependency is within the same block or loop level (being careful
1289     // of irreducible loops), we know that AA will return a valid result for the
1290     // memory dependency. (Both at the function level, outside of any loop,
1291     // would also be valid but we currently disable that to limit compile time).
1292     if (Current->getParent() == KillingDef->getParent())
1293       return true;
1294     const Loop *CurrentLI = LI.getLoopFor(Current->getParent());
1295     if (!ContainsIrreducibleLoops && CurrentLI &&
1296         CurrentLI == LI.getLoopFor(KillingDef->getParent()))
1297       return true;
1298     // Otherwise check the memory location is invariant to any loops.
1299     return isGuaranteedLoopInvariant(CurrentLoc.Ptr);
1300   }
1301 
1302   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1303   /// loop. In particular, this guarantees that it only references a single
1304   /// MemoryLocation during execution of the containing function.
isGuaranteedLoopInvariant__anone150d9c10211::DSEState1305   bool isGuaranteedLoopInvariant(const Value *Ptr) {
1306     auto IsGuaranteedLoopInvariantBase = [](const Value *Ptr) {
1307       Ptr = Ptr->stripPointerCasts();
1308       if (auto *I = dyn_cast<Instruction>(Ptr))
1309         return I->getParent()->isEntryBlock();
1310       return true;
1311     };
1312 
1313     Ptr = Ptr->stripPointerCasts();
1314     if (auto *I = dyn_cast<Instruction>(Ptr)) {
1315       if (I->getParent()->isEntryBlock())
1316         return true;
1317     }
1318     if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
1319       return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
1320              GEP->hasAllConstantIndices();
1321     }
1322     return IsGuaranteedLoopInvariantBase(Ptr);
1323   }
1324 
1325   // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with
1326   // no read access between them or on any other path to a function exit block
1327   // if \p DefLoc is not accessible after the function returns. If there is no
1328   // such MemoryDef, return None. The returned value may not (completely)
1329   // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing
1330   // MemoryUse (read).
1331   Optional<MemoryAccess *>
getDomMemoryDef__anone150d9c10211::DSEState1332   getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1333                   const MemoryLocation &DefLoc, const Value *DefUO,
1334                   unsigned &ScanLimit, unsigned &WalkerStepLimit,
1335                   bool IsMemTerm, unsigned &PartialLimit) {
1336     if (ScanLimit == 0 || WalkerStepLimit == 0) {
1337       LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1338       return None;
1339     }
1340 
1341     MemoryAccess *Current = StartAccess;
1342     Instruction *KillingI = KillingDef->getMemoryInst();
1343     LLVM_DEBUG(dbgs() << "  trying to get dominating access\n");
1344 
1345     // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1346     Optional<MemoryLocation> CurrentLoc;
1347     for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
1348       LLVM_DEBUG({
1349         dbgs() << "   visiting " << *Current;
1350         if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
1351           dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1352                  << ")";
1353         dbgs() << "\n";
1354       });
1355 
1356       // Reached TOP.
1357       if (MSSA.isLiveOnEntryDef(Current)) {
1358         LLVM_DEBUG(dbgs() << "   ...  found LiveOnEntryDef\n");
1359         return None;
1360       }
1361 
1362       // Cost of a step. Accesses in the same block are more likely to be valid
1363       // candidates for elimination, hence consider them cheaper.
1364       unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
1365                               ? MemorySSASameBBStepCost
1366                               : MemorySSAOtherBBStepCost;
1367       if (WalkerStepLimit <= StepCost) {
1368         LLVM_DEBUG(dbgs() << "   ...  hit walker step limit\n");
1369         return None;
1370       }
1371       WalkerStepLimit -= StepCost;
1372 
1373       // Return for MemoryPhis. They cannot be eliminated directly and the
1374       // caller is responsible for traversing them.
1375       if (isa<MemoryPhi>(Current)) {
1376         LLVM_DEBUG(dbgs() << "   ...  found MemoryPhi\n");
1377         return Current;
1378       }
1379 
1380       // Below, check if CurrentDef is a valid candidate to be eliminated by
1381       // KillingDef. If it is not, check the next candidate.
1382       MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1383       Instruction *CurrentI = CurrentDef->getMemoryInst();
1384 
1385       if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO)))
1386         continue;
1387 
1388       // Before we try to remove anything, check for any extra throwing
1389       // instructions that block us from DSEing
1390       if (mayThrowBetween(KillingI, CurrentI, DefUO)) {
1391         LLVM_DEBUG(dbgs() << "  ... skip, may throw!\n");
1392         return None;
1393       }
1394 
1395       // Check for anything that looks like it will be a barrier to further
1396       // removal
1397       if (isDSEBarrier(DefUO, CurrentI)) {
1398         LLVM_DEBUG(dbgs() << "  ... skip, barrier\n");
1399         return None;
1400       }
1401 
1402       // If Current is known to be on path that reads DefLoc or is a read
1403       // clobber, bail out, as the path is not profitable. We skip this check
1404       // for intrinsic calls, because the code knows how to handle memcpy
1405       // intrinsics.
1406       if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(DefLoc, CurrentI))
1407         return None;
1408 
1409       // Quick check if there are direct uses that are read-clobbers.
1410       if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) {
1411             if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1412               return !MSSA.dominates(StartAccess, UseOrDef) &&
1413                      isReadClobber(DefLoc, UseOrDef->getMemoryInst());
1414             return false;
1415           })) {
1416         LLVM_DEBUG(dbgs() << "   ...  found a read clobber\n");
1417         return None;
1418       }
1419 
1420       // If Current cannot be analyzed or is not removable, check the next
1421       // candidate.
1422       if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI))
1423         continue;
1424 
1425       // If Current does not have an analyzable write location, skip it
1426       CurrentLoc = getLocForWriteEx(CurrentI);
1427       if (!CurrentLoc)
1428         continue;
1429 
1430       // AliasAnalysis does not account for loops. Limit elimination to
1431       // candidates for which we can guarantee they always store to the same
1432       // memory location and not located in different loops.
1433       if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
1434         LLVM_DEBUG(dbgs() << "  ... not guaranteed loop independent\n");
1435         WalkerStepLimit -= 1;
1436         continue;
1437       }
1438 
1439       if (IsMemTerm) {
1440         // If the killing def is a memory terminator (e.g. lifetime.end), check
1441         // the next candidate if the current Current does not write the same
1442         // underlying object as the terminator.
1443         if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI))
1444           continue;
1445       } else {
1446         int64_t InstWriteOffset, DepWriteOffset;
1447         auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc,
1448                               DepWriteOffset, InstWriteOffset);
1449         // If Current does not write to the same object as KillingDef, check
1450         // the next candidate.
1451         if (OR == OW_Unknown)
1452           continue;
1453         else if (OR == OW_MaybePartial) {
1454           // If KillingDef only partially overwrites Current, check the next
1455           // candidate if the partial step limit is exceeded. This aggressively
1456           // limits the number of candidates for partial store elimination,
1457           // which are less likely to be removable in the end.
1458           if (PartialLimit <= 1) {
1459             WalkerStepLimit -= 1;
1460             continue;
1461           }
1462           PartialLimit -= 1;
1463         }
1464       }
1465       break;
1466     };
1467 
1468     // Accesses to objects accessible after the function returns can only be
1469     // eliminated if the access is killed along all paths to the exit. Collect
1470     // the blocks with killing (=completely overwriting MemoryDefs) and check if
1471     // they cover all paths from EarlierAccess to any function exit.
1472     SmallPtrSet<Instruction *, 16> KillingDefs;
1473     KillingDefs.insert(KillingDef->getMemoryInst());
1474     MemoryAccess *EarlierAccess = Current;
1475     Instruction *EarlierMemInst =
1476         cast<MemoryDef>(EarlierAccess)->getMemoryInst();
1477     LLVM_DEBUG(dbgs() << "  Checking for reads of " << *EarlierAccess << " ("
1478                       << *EarlierMemInst << ")\n");
1479 
1480     SmallSetVector<MemoryAccess *, 32> WorkList;
1481     auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1482       for (Use &U : Acc->uses())
1483         WorkList.insert(cast<MemoryAccess>(U.getUser()));
1484     };
1485     PushMemUses(EarlierAccess);
1486 
1487     // Optimistically collect all accesses for reads. If we do not find any
1488     // read clobbers, add them to the cache.
1489     SmallPtrSet<MemoryAccess *, 16> KnownNoReads;
1490     if (!EarlierMemInst->mayReadFromMemory())
1491       KnownNoReads.insert(EarlierAccess);
1492     // Check if EarlierDef may be read.
1493     for (unsigned I = 0; I < WorkList.size(); I++) {
1494       MemoryAccess *UseAccess = WorkList[I];
1495 
1496       LLVM_DEBUG(dbgs() << "   " << *UseAccess);
1497       // Bail out if the number of accesses to check exceeds the scan limit.
1498       if (ScanLimit < (WorkList.size() - I)) {
1499         LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1500         return None;
1501       }
1502       --ScanLimit;
1503       NumDomMemDefChecks++;
1504       KnownNoReads.insert(UseAccess);
1505 
1506       if (isa<MemoryPhi>(UseAccess)) {
1507         if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
1508               return DT.properlyDominates(KI->getParent(),
1509                                           UseAccess->getBlock());
1510             })) {
1511           LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
1512           continue;
1513         }
1514         LLVM_DEBUG(dbgs() << "\n    ... adding PHI uses\n");
1515         PushMemUses(UseAccess);
1516         continue;
1517       }
1518 
1519       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1520       LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
1521 
1522       if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
1523             return DT.dominates(KI, UseInst);
1524           })) {
1525         LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
1526         continue;
1527       }
1528 
1529       // A memory terminator kills all preceeding MemoryDefs and all succeeding
1530       // MemoryAccesses. We do not have to check it's users.
1531       if (isMemTerminator(*CurrentLoc, EarlierMemInst, UseInst)) {
1532         LLVM_DEBUG(
1533             dbgs()
1534             << " ... skipping, memterminator invalidates following accesses\n");
1535         continue;
1536       }
1537 
1538       if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1539         LLVM_DEBUG(dbgs() << "    ... adding uses of intrinsic\n");
1540         PushMemUses(UseAccess);
1541         continue;
1542       }
1543 
1544       if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) {
1545         LLVM_DEBUG(dbgs() << "  ... found throwing instruction\n");
1546         return None;
1547       }
1548 
1549       // Uses which may read the original MemoryDef mean we cannot eliminate the
1550       // original MD. Stop walk.
1551       if (isReadClobber(*CurrentLoc, UseInst)) {
1552         LLVM_DEBUG(dbgs() << "    ... found read clobber\n");
1553         return None;
1554       }
1555 
1556       // If this worklist walks back to the original memory access (and the
1557       // pointer is not guarenteed loop invariant) then we cannot assume that a
1558       // store kills itself.
1559       if (EarlierAccess == UseAccess &&
1560           !isGuaranteedLoopInvariant(CurrentLoc->Ptr)) {
1561         LLVM_DEBUG(dbgs() << "    ... found not loop invariant self access\n");
1562         return None;
1563       }
1564       // Otherwise, for the KillingDef and EarlierAccess we only have to check
1565       // if it reads the memory location.
1566       // TODO: It would probably be better to check for self-reads before
1567       // calling the function.
1568       if (KillingDef == UseAccess || EarlierAccess == UseAccess) {
1569         LLVM_DEBUG(dbgs() << "    ... skipping killing def/dom access\n");
1570         continue;
1571       }
1572 
1573       // Check all uses for MemoryDefs, except for defs completely overwriting
1574       // the original location. Otherwise we have to check uses of *all*
1575       // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1576       // miss cases like the following
1577       //   1 = Def(LoE) ; <----- EarlierDef stores [0,1]
1578       //   2 = Def(1)   ; (2, 1) = NoAlias,   stores [2,3]
1579       //   Use(2)       ; MayAlias 2 *and* 1, loads [0, 3].
1580       //                  (The Use points to the *first* Def it may alias)
1581       //   3 = Def(1)   ; <---- Current  (3, 2) = NoAlias, (3,1) = MayAlias,
1582       //                  stores [0,1]
1583       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1584         if (isCompleteOverwrite(*CurrentLoc, EarlierMemInst, UseInst)) {
1585           BasicBlock *MaybeKillingBlock = UseInst->getParent();
1586           if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1587               PostOrderNumbers.find(EarlierAccess->getBlock())->second) {
1588             if (!isInvisibleToCallerAfterRet(DefUO)) {
1589               LLVM_DEBUG(dbgs()
1590                          << "    ... found killing def " << *UseInst << "\n");
1591               KillingDefs.insert(UseInst);
1592             }
1593           } else {
1594             LLVM_DEBUG(dbgs()
1595                        << "    ... found preceeding def " << *UseInst << "\n");
1596             return None;
1597           }
1598         } else
1599           PushMemUses(UseDef);
1600       }
1601     }
1602 
1603     // For accesses to locations visible after the function returns, make sure
1604     // that the location is killed (=overwritten) along all paths from
1605     // EarlierAccess to the exit.
1606     if (!isInvisibleToCallerAfterRet(DefUO)) {
1607       SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1608       for (Instruction *KD : KillingDefs)
1609         KillingBlocks.insert(KD->getParent());
1610       assert(!KillingBlocks.empty() &&
1611              "Expected at least a single killing block");
1612 
1613       // Find the common post-dominator of all killing blocks.
1614       BasicBlock *CommonPred = *KillingBlocks.begin();
1615       for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
1616            I != E; I++) {
1617         if (!CommonPred)
1618           break;
1619         CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
1620       }
1621 
1622       // If CommonPred is in the set of killing blocks, just check if it
1623       // post-dominates EarlierAccess.
1624       if (KillingBlocks.count(CommonPred)) {
1625         if (PDT.dominates(CommonPred, EarlierAccess->getBlock()))
1626           return {EarlierAccess};
1627         return None;
1628       }
1629 
1630       // If the common post-dominator does not post-dominate EarlierAccess,
1631       // there is a path from EarlierAccess to an exit not going through a
1632       // killing block.
1633       if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) {
1634         SetVector<BasicBlock *> WorkList;
1635 
1636         // If CommonPred is null, there are multiple exits from the function.
1637         // They all have to be added to the worklist.
1638         if (CommonPred)
1639           WorkList.insert(CommonPred);
1640         else
1641           for (BasicBlock *R : PDT.roots())
1642             WorkList.insert(R);
1643 
1644         NumCFGTries++;
1645         // Check if all paths starting from an exit node go through one of the
1646         // killing blocks before reaching EarlierAccess.
1647         for (unsigned I = 0; I < WorkList.size(); I++) {
1648           NumCFGChecks++;
1649           BasicBlock *Current = WorkList[I];
1650           if (KillingBlocks.count(Current))
1651             continue;
1652           if (Current == EarlierAccess->getBlock())
1653             return None;
1654 
1655           // EarlierAccess is reachable from the entry, so we don't have to
1656           // explore unreachable blocks further.
1657           if (!DT.isReachableFromEntry(Current))
1658             continue;
1659 
1660           for (BasicBlock *Pred : predecessors(Current))
1661             WorkList.insert(Pred);
1662 
1663           if (WorkList.size() >= MemorySSAPathCheckLimit)
1664             return None;
1665         }
1666         NumCFGSuccess++;
1667         return {EarlierAccess};
1668       }
1669       return None;
1670     }
1671 
1672     // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is
1673     // potentially dead.
1674     return {EarlierAccess};
1675   }
1676 
1677   // Delete dead memory defs
deleteDeadInstruction__anone150d9c10211::DSEState1678   void deleteDeadInstruction(Instruction *SI) {
1679     MemorySSAUpdater Updater(&MSSA);
1680     SmallVector<Instruction *, 32> NowDeadInsts;
1681     NowDeadInsts.push_back(SI);
1682     --NumFastOther;
1683 
1684     while (!NowDeadInsts.empty()) {
1685       Instruction *DeadInst = NowDeadInsts.pop_back_val();
1686       ++NumFastOther;
1687 
1688       // Try to preserve debug information attached to the dead instruction.
1689       salvageDebugInfo(*DeadInst);
1690       salvageKnowledge(DeadInst);
1691 
1692       // Remove the Instruction from MSSA.
1693       if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1694         if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1695           SkipStores.insert(MD);
1696         }
1697         Updater.removeMemoryAccess(MA);
1698       }
1699 
1700       auto I = IOLs.find(DeadInst->getParent());
1701       if (I != IOLs.end())
1702         I->second.erase(DeadInst);
1703       // Remove its operands
1704       for (Use &O : DeadInst->operands())
1705         if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1706           O = nullptr;
1707           if (isInstructionTriviallyDead(OpI, &TLI))
1708             NowDeadInsts.push_back(OpI);
1709         }
1710 
1711       DeadInst->eraseFromParent();
1712     }
1713   }
1714 
1715   // Check for any extra throws between SI and NI that block DSE.  This only
1716   // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
1717   // throw are handled during the walk from one def to the next.
mayThrowBetween__anone150d9c10211::DSEState1718   bool mayThrowBetween(Instruction *SI, Instruction *NI,
1719                        const Value *SILocUnd) {
1720     // First see if we can ignore it by using the fact that SI is an
1721     // alloca/alloca like object that is not visible to the caller during
1722     // execution of the function.
1723     if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd))
1724       return false;
1725 
1726     if (SI->getParent() == NI->getParent())
1727       return ThrowingBlocks.count(SI->getParent());
1728     return !ThrowingBlocks.empty();
1729   }
1730 
1731   // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
1732   // act as barriers:
1733   //  * A memory instruction that may throw and \p SI accesses a non-stack
1734   //  object.
1735   //  * Atomic stores stronger that monotonic.
isDSEBarrier__anone150d9c10211::DSEState1736   bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) {
1737     // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
1738     // like object that does not escape.
1739     if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd))
1740       return true;
1741 
1742     // If NI is an atomic load/store stronger than monotonic, do not try to
1743     // eliminate/reorder it.
1744     if (NI->isAtomic()) {
1745       if (auto *LI = dyn_cast<LoadInst>(NI))
1746         return isStrongerThanMonotonic(LI->getOrdering());
1747       if (auto *SI = dyn_cast<StoreInst>(NI))
1748         return isStrongerThanMonotonic(SI->getOrdering());
1749       if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI))
1750         return isStrongerThanMonotonic(ARMW->getOrdering());
1751       if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI))
1752         return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
1753                isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
1754       llvm_unreachable("other instructions should be skipped in MemorySSA");
1755     }
1756     return false;
1757   }
1758 
1759   /// Eliminate writes to objects that are not visible in the caller and are not
1760   /// accessed before returning from the function.
eliminateDeadWritesAtEndOfFunction__anone150d9c10211::DSEState1761   bool eliminateDeadWritesAtEndOfFunction() {
1762     bool MadeChange = false;
1763     LLVM_DEBUG(
1764         dbgs()
1765         << "Trying to eliminate MemoryDefs at the end of the function\n");
1766     for (int I = MemDefs.size() - 1; I >= 0; I--) {
1767       MemoryDef *Def = MemDefs[I];
1768       if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst()))
1769         continue;
1770 
1771       Instruction *DefI = Def->getMemoryInst();
1772       SmallVector<const Value *, 4> Pointers;
1773       auto DefLoc = getLocForWriteEx(DefI);
1774       if (!DefLoc)
1775         continue;
1776 
1777       // NOTE: Currently eliminating writes at the end of a function is limited
1778       // to MemoryDefs with a single underlying object, to save compile-time. In
1779       // practice it appears the case with multiple underlying objects is very
1780       // uncommon. If it turns out to be important, we can use
1781       // getUnderlyingObjects here instead.
1782       const Value *UO = getUnderlyingObject(DefLoc->Ptr);
1783       if (!UO || !isInvisibleToCallerAfterRet(UO))
1784         continue;
1785 
1786       if (isWriteAtEndOfFunction(Def)) {
1787         // See through pointer-to-pointer bitcasts
1788         LLVM_DEBUG(dbgs() << "   ... MemoryDef is not accessed until the end "
1789                              "of the function\n");
1790         deleteDeadInstruction(DefI);
1791         ++NumFastStores;
1792         MadeChange = true;
1793       }
1794     }
1795     return MadeChange;
1796   }
1797 
1798   /// \returns true if \p Def is a no-op store, either because it
1799   /// directly stores back a loaded value or stores zero to a calloced object.
storeIsNoop__anone150d9c10211::DSEState1800   bool storeIsNoop(MemoryDef *Def, const MemoryLocation &DefLoc,
1801                    const Value *DefUO) {
1802     StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
1803     MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst());
1804     Constant *StoredConstant = nullptr;
1805     if (Store)
1806       StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
1807     if (MemSet)
1808       StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1809 
1810     if (StoredConstant && StoredConstant->isNullValue()) {
1811       auto *DefUOInst = dyn_cast<Instruction>(DefUO);
1812       if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) {
1813         auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
1814         // If UnderlyingDef is the clobbering access of Def, no instructions
1815         // between them can modify the memory location.
1816         auto *ClobberDef =
1817             MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
1818         return UnderlyingDef == ClobberDef;
1819       }
1820     }
1821 
1822     if (!Store)
1823       return false;
1824 
1825     if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
1826       if (LoadI->getPointerOperand() == Store->getOperand(1)) {
1827         // Get the defining access for the load.
1828         auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
1829         // Fast path: the defining accesses are the same.
1830         if (LoadAccess == Def->getDefiningAccess())
1831           return true;
1832 
1833         // Look through phi accesses. Recursively scan all phi accesses by
1834         // adding them to a worklist. Bail when we run into a memory def that
1835         // does not match LoadAccess.
1836         SetVector<MemoryAccess *> ToCheck;
1837         MemoryAccess *Current =
1838             MSSA.getWalker()->getClobberingMemoryAccess(Def);
1839         // We don't want to bail when we run into the store memory def. But,
1840         // the phi access may point to it. So, pretend like we've already
1841         // checked it.
1842         ToCheck.insert(Def);
1843         ToCheck.insert(Current);
1844         // Start at current (1) to simulate already having checked Def.
1845         for (unsigned I = 1; I < ToCheck.size(); ++I) {
1846           Current = ToCheck[I];
1847           if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
1848             // Check all the operands.
1849             for (auto &Use : PhiAccess->incoming_values())
1850               ToCheck.insert(cast<MemoryAccess>(&Use));
1851             continue;
1852           }
1853 
1854           // If we found a memory def, bail. This happens when we have an
1855           // unrelated write in between an otherwise noop store.
1856           assert(isa<MemoryDef>(Current) &&
1857                  "Only MemoryDefs should reach here.");
1858           // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1859           // We are searching for the definition of the store's destination.
1860           // So, if that is the same definition as the load, then this is a
1861           // noop. Otherwise, fail.
1862           if (LoadAccess != Current)
1863             return false;
1864         }
1865         return true;
1866       }
1867     }
1868 
1869     return false;
1870   }
1871 };
1872 
eliminateDeadStores(Function & F,AliasAnalysis & AA,MemorySSA & MSSA,DominatorTree & DT,PostDominatorTree & PDT,const TargetLibraryInfo & TLI,const LoopInfo & LI)1873 static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1874                                 DominatorTree &DT, PostDominatorTree &PDT,
1875                                 const TargetLibraryInfo &TLI,
1876                                 const LoopInfo &LI) {
1877   bool MadeChange = false;
1878 
1879   DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI, LI);
1880   // For each store:
1881   for (unsigned I = 0; I < State.MemDefs.size(); I++) {
1882     MemoryDef *KillingDef = State.MemDefs[I];
1883     if (State.SkipStores.count(KillingDef))
1884       continue;
1885     Instruction *SI = KillingDef->getMemoryInst();
1886 
1887     Optional<MemoryLocation> MaybeSILoc;
1888     if (State.isMemTerminatorInst(SI))
1889       MaybeSILoc = State.getLocForTerminator(SI).map(
1890           [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
1891     else
1892       MaybeSILoc = State.getLocForWriteEx(SI);
1893 
1894     if (!MaybeSILoc) {
1895       LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
1896                         << *SI << "\n");
1897       continue;
1898     }
1899     MemoryLocation SILoc = *MaybeSILoc;
1900     assert(SILoc.Ptr && "SILoc should not be null");
1901     const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr);
1902 
1903     MemoryAccess *Current = KillingDef;
1904     LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
1905                       << *Current << " (" << *SI << ")\n");
1906 
1907     unsigned ScanLimit = MemorySSAScanLimit;
1908     unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
1909     unsigned PartialLimit = MemorySSAPartialStoreLimit;
1910     // Worklist of MemoryAccesses that may be killed by KillingDef.
1911     SetVector<MemoryAccess *> ToCheck;
1912 
1913     if (SILocUnd)
1914       ToCheck.insert(KillingDef->getDefiningAccess());
1915 
1916     bool Shortend = false;
1917     bool IsMemTerm = State.isMemTerminatorInst(SI);
1918     // Check if MemoryAccesses in the worklist are killed by KillingDef.
1919     for (unsigned I = 0; I < ToCheck.size(); I++) {
1920       Current = ToCheck[I];
1921       if (State.SkipStores.count(Current))
1922         continue;
1923 
1924       Optional<MemoryAccess *> Next = State.getDomMemoryDef(
1925           KillingDef, Current, SILoc, SILocUnd, ScanLimit, WalkerStepLimit,
1926           IsMemTerm, PartialLimit);
1927 
1928       if (!Next) {
1929         LLVM_DEBUG(dbgs() << "  finished walk\n");
1930         continue;
1931       }
1932 
1933       MemoryAccess *EarlierAccess = *Next;
1934       LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess);
1935       if (isa<MemoryPhi>(EarlierAccess)) {
1936         LLVM_DEBUG(dbgs() << "\n  ... adding incoming values to worklist\n");
1937         for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) {
1938           MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
1939           BasicBlock *IncomingBlock = IncomingAccess->getBlock();
1940           BasicBlock *PhiBlock = EarlierAccess->getBlock();
1941 
1942           // We only consider incoming MemoryAccesses that come before the
1943           // MemoryPhi. Otherwise we could discover candidates that do not
1944           // strictly dominate our starting def.
1945           if (State.PostOrderNumbers[IncomingBlock] >
1946               State.PostOrderNumbers[PhiBlock])
1947             ToCheck.insert(IncomingAccess);
1948         }
1949         continue;
1950       }
1951       auto *NextDef = cast<MemoryDef>(EarlierAccess);
1952       Instruction *NI = NextDef->getMemoryInst();
1953       LLVM_DEBUG(dbgs() << " (" << *NI << ")\n");
1954       ToCheck.insert(NextDef->getDefiningAccess());
1955       NumGetDomMemoryDefPassed++;
1956 
1957       if (!DebugCounter::shouldExecute(MemorySSACounter))
1958         continue;
1959 
1960       MemoryLocation NILoc = *State.getLocForWriteEx(NI);
1961 
1962       if (IsMemTerm) {
1963         const Value *NIUnd = getUnderlyingObject(NILoc.Ptr);
1964         if (SILocUnd != NIUnd)
1965           continue;
1966         LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
1967                           << "\n  KILLER: " << *SI << '\n');
1968         State.deleteDeadInstruction(NI);
1969         ++NumFastStores;
1970         MadeChange = true;
1971       } else {
1972         // Check if NI overwrites SI.
1973         int64_t InstWriteOffset, DepWriteOffset;
1974         OverwriteResult OR = State.isOverwrite(SI, NI, SILoc, NILoc,
1975                                                DepWriteOffset, InstWriteOffset);
1976         if (OR == OW_MaybePartial) {
1977           auto Iter = State.IOLs.insert(
1978               std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
1979                   NI->getParent(), InstOverlapIntervalsTy()));
1980           auto &IOL = Iter.first->second;
1981           OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset,
1982                                   NI, IOL);
1983         }
1984 
1985         if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
1986           auto *Earlier = dyn_cast<StoreInst>(NI);
1987           auto *Later = dyn_cast<StoreInst>(SI);
1988           // We are re-using tryToMergePartialOverlappingStores, which requires
1989           // Earlier to domiante Later.
1990           // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
1991           if (Earlier && Later && DT.dominates(Earlier, Later)) {
1992             if (Constant *Merged = tryToMergePartialOverlappingStores(
1993                     Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL,
1994                     State.BatchAA, &DT)) {
1995 
1996               // Update stored value of earlier store to merged constant.
1997               Earlier->setOperand(0, Merged);
1998               ++NumModifiedStores;
1999               MadeChange = true;
2000 
2001               Shortend = true;
2002               // Remove later store and remove any outstanding overlap intervals
2003               // for the updated store.
2004               State.deleteDeadInstruction(Later);
2005               auto I = State.IOLs.find(Earlier->getParent());
2006               if (I != State.IOLs.end())
2007                 I->second.erase(Earlier);
2008               break;
2009             }
2010           }
2011         }
2012 
2013         if (OR == OW_Complete) {
2014           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
2015                             << "\n  KILLER: " << *SI << '\n');
2016           State.deleteDeadInstruction(NI);
2017           ++NumFastStores;
2018           MadeChange = true;
2019         }
2020       }
2021     }
2022 
2023     // Check if the store is a no-op.
2024     if (!Shortend && isRemovable(SI) &&
2025         State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
2026       LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *SI << '\n');
2027       State.deleteDeadInstruction(SI);
2028       NumRedundantStores++;
2029       MadeChange = true;
2030       continue;
2031     }
2032   }
2033 
2034   if (EnablePartialOverwriteTracking)
2035     for (auto &KV : State.IOLs)
2036       MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI);
2037 
2038   MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2039   return MadeChange;
2040 }
2041 } // end anonymous namespace
2042 
2043 //===----------------------------------------------------------------------===//
2044 // DSE Pass
2045 //===----------------------------------------------------------------------===//
run(Function & F,FunctionAnalysisManager & AM)2046 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2047   AliasAnalysis &AA = AM.getResult<AAManager>(F);
2048   const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2049   DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2050   MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2051   PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2052   LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
2053 
2054   bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI);
2055 
2056 #ifdef LLVM_ENABLE_STATS
2057   if (AreStatisticsEnabled())
2058     for (auto &I : instructions(F))
2059       NumRemainingStores += isa<StoreInst>(&I);
2060 #endif
2061 
2062   if (!Changed)
2063     return PreservedAnalyses::all();
2064 
2065   PreservedAnalyses PA;
2066   PA.preserveSet<CFGAnalyses>();
2067   PA.preserve<MemorySSAAnalysis>();
2068   PA.preserve<LoopAnalysis>();
2069   return PA;
2070 }
2071 
2072 namespace {
2073 
2074 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2075 class DSELegacyPass : public FunctionPass {
2076 public:
2077   static char ID; // Pass identification, replacement for typeid
2078 
DSELegacyPass()2079   DSELegacyPass() : FunctionPass(ID) {
2080     initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2081   }
2082 
runOnFunction(Function & F)2083   bool runOnFunction(Function &F) override {
2084     if (skipFunction(F))
2085       return false;
2086 
2087     AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2088     DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2089     const TargetLibraryInfo &TLI =
2090         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2091     MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2092     PostDominatorTree &PDT =
2093         getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2094     LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2095 
2096     bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI);
2097 
2098 #ifdef LLVM_ENABLE_STATS
2099     if (AreStatisticsEnabled())
2100       for (auto &I : instructions(F))
2101         NumRemainingStores += isa<StoreInst>(&I);
2102 #endif
2103 
2104     return Changed;
2105   }
2106 
getAnalysisUsage(AnalysisUsage & AU) const2107   void getAnalysisUsage(AnalysisUsage &AU) const override {
2108     AU.setPreservesCFG();
2109     AU.addRequired<AAResultsWrapperPass>();
2110     AU.addRequired<TargetLibraryInfoWrapperPass>();
2111     AU.addPreserved<GlobalsAAWrapperPass>();
2112     AU.addRequired<DominatorTreeWrapperPass>();
2113     AU.addPreserved<DominatorTreeWrapperPass>();
2114     AU.addRequired<PostDominatorTreeWrapperPass>();
2115     AU.addRequired<MemorySSAWrapperPass>();
2116     AU.addPreserved<PostDominatorTreeWrapperPass>();
2117     AU.addPreserved<MemorySSAWrapperPass>();
2118     AU.addRequired<LoopInfoWrapperPass>();
2119     AU.addPreserved<LoopInfoWrapperPass>();
2120   }
2121 };
2122 
2123 } // end anonymous namespace
2124 
2125 char DSELegacyPass::ID = 0;
2126 
2127 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
2128                       false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)2129 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2130 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
2131 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2132 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2133 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
2134 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2135 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2136 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2137 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
2138                     false)
2139 
2140 FunctionPass *llvm::createDeadStoreEliminationPass() {
2141   return new DSELegacyPass();
2142 }
2143