1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a trivial dead store elimination that only considers
10 // basic-block local redundant stores.
11 //
12 // FIXME: This should eventually be extended to be a post-dominator tree
13 // traversal.  Doing so would be pretty trivial.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/ADT/PostOrderIterator.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/GlobalsModRef.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
32 #include "llvm/Analysis/MemoryLocation.h"
33 #include "llvm/Analysis/MemorySSA.h"
34 #include "llvm/Analysis/MemorySSAUpdater.h"
35 #include "llvm/Analysis/PostDominators.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/IR/Argument.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstIterator.h"
46 #include "llvm/IR/InstrTypes.h"
47 #include "llvm/IR/Instruction.h"
48 #include "llvm/IR/Instructions.h"
49 #include "llvm/IR/IntrinsicInst.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/Module.h"
53 #include "llvm/IR/PassManager.h"
54 #include "llvm/IR/PatternMatch.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/InitializePasses.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/DebugCounter.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Transforms/Scalar.h"
66 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include <algorithm>
69 #include <cassert>
70 #include <cstddef>
71 #include <cstdint>
72 #include <iterator>
73 #include <map>
74 #include <utility>
75 
76 using namespace llvm;
77 using namespace PatternMatch;
78 
79 #define DEBUG_TYPE "dse"
80 
81 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
82 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
83 STATISTIC(NumFastStores, "Number of stores deleted");
84 STATISTIC(NumFastOther, "Number of other instrs removed");
85 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
86 STATISTIC(NumModifiedStores, "Number of stores modified");
87 STATISTIC(NumCFGChecks, "Number of stores modified");
88 STATISTIC(NumCFGTries, "Number of stores modified");
89 STATISTIC(NumCFGSuccess, "Number of stores modified");
90 STATISTIC(NumGetDomMemoryDefPassed,
91           "Number of times a valid candidate is returned from getDomMemoryDef");
92 STATISTIC(NumDomMemDefChecks,
93           "Number iterations check for reads in getDomMemoryDef");
94 
95 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
96               "Controls which MemoryDefs are eliminated.");
97 
98 static cl::opt<bool>
99 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
100   cl::init(true), cl::Hidden,
101   cl::desc("Enable partial-overwrite tracking in DSE"));
102 
103 static cl::opt<bool>
104 EnablePartialStoreMerging("enable-dse-partial-store-merging",
105   cl::init(true), cl::Hidden,
106   cl::desc("Enable partial store merging in DSE"));
107 
108 static cl::opt<bool>
109     EnableMemorySSA("enable-dse-memoryssa", cl::init(true), cl::Hidden,
110                     cl::desc("Use the new MemorySSA-backed DSE."));
111 
112 static cl::opt<unsigned>
113     MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
114                        cl::desc("The number of memory instructions to scan for "
115                                 "dead store elimination (default = 100)"));
116 static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
117     "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
118     cl::desc("The maximum number of steps while walking upwards to find "
119              "MemoryDefs that may be killed (default = 90)"));
120 
121 static cl::opt<unsigned> MemorySSAPartialStoreLimit(
122     "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
123     cl::desc("The maximum number candidates that only partially overwrite the "
124              "killing MemoryDef to consider"
125              " (default = 5)"));
126 
127 static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
128     "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
129     cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
130              "other stores per basic block (default = 5000)"));
131 
132 static cl::opt<unsigned> MemorySSASameBBStepCost(
133     "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
134     cl::desc(
135         "The cost of a step in the same basic block as the killing MemoryDef"
136         "(default = 1)"));
137 
138 static cl::opt<unsigned>
139     MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
140                              cl::Hidden,
141                              cl::desc("The cost of a step in a different basic "
142                                       "block than the killing MemoryDef"
143                                       "(default = 5)"));
144 
145 static cl::opt<unsigned> MemorySSAPathCheckLimit(
146     "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
147     cl::desc("The maximum number of blocks to check when trying to prove that "
148              "all paths to an exit go through a killing block (default = 50)"));
149 
150 //===----------------------------------------------------------------------===//
151 // Helper functions
152 //===----------------------------------------------------------------------===//
153 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
154 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
155 
156 /// Delete this instruction.  Before we do, go through and zero out all the
157 /// operands of this instruction.  If any of them become dead, delete them and
158 /// the computation tree that feeds them.
159 /// If ValueSet is non-null, remove any deleted instructions from it as well.
160 static void
161 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
162                       MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
163                       InstOverlapIntervalsTy &IOL,
164                       MapVector<Instruction *, bool> &ThrowableInst,
165                       SmallSetVector<const Value *, 16> *ValueSet = nullptr) {
166   SmallVector<Instruction*, 32> NowDeadInsts;
167 
168   NowDeadInsts.push_back(I);
169   --NumFastOther;
170 
171   // Keeping the iterator straight is a pain, so we let this routine tell the
172   // caller what the next instruction is after we're done mucking about.
173   BasicBlock::iterator NewIter = *BBI;
174 
175   // Before we touch this instruction, remove it from memdep!
176   do {
177     Instruction *DeadInst = NowDeadInsts.pop_back_val();
178     // Mark the DeadInst as dead in the list of throwable instructions.
179     auto It = ThrowableInst.find(DeadInst);
180     if (It != ThrowableInst.end())
181       ThrowableInst[It->first] = false;
182     ++NumFastOther;
183 
184     // Try to preserve debug information attached to the dead instruction.
185     salvageDebugInfo(*DeadInst);
186     salvageKnowledge(DeadInst);
187 
188     // This instruction is dead, zap it, in stages.  Start by removing it from
189     // MemDep, which needs to know the operands and needs it to be in the
190     // function.
191     MD.removeInstruction(DeadInst);
192 
193     for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
194       Value *Op = DeadInst->getOperand(op);
195       DeadInst->setOperand(op, nullptr);
196 
197       // If this operand just became dead, add it to the NowDeadInsts list.
198       if (!Op->use_empty()) continue;
199 
200       if (Instruction *OpI = dyn_cast<Instruction>(Op))
201         if (isInstructionTriviallyDead(OpI, &TLI))
202           NowDeadInsts.push_back(OpI);
203     }
204 
205     if (ValueSet) ValueSet->remove(DeadInst);
206     IOL.erase(DeadInst);
207 
208     if (NewIter == DeadInst->getIterator())
209       NewIter = DeadInst->eraseFromParent();
210     else
211       DeadInst->eraseFromParent();
212   } while (!NowDeadInsts.empty());
213   *BBI = NewIter;
214   // Pop dead entries from back of ThrowableInst till we find an alive entry.
215   while (!ThrowableInst.empty() && !ThrowableInst.back().second)
216     ThrowableInst.pop_back();
217 }
218 
219 /// Does this instruction write some memory?  This only returns true for things
220 /// that we can analyze with other helpers below.
221 static bool hasAnalyzableMemoryWrite(Instruction *I,
222                                      const TargetLibraryInfo &TLI) {
223   if (isa<StoreInst>(I))
224     return true;
225   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
226     switch (II->getIntrinsicID()) {
227     default:
228       return false;
229     case Intrinsic::memset:
230     case Intrinsic::memmove:
231     case Intrinsic::memcpy:
232     case Intrinsic::memcpy_inline:
233     case Intrinsic::memcpy_element_unordered_atomic:
234     case Intrinsic::memmove_element_unordered_atomic:
235     case Intrinsic::memset_element_unordered_atomic:
236     case Intrinsic::init_trampoline:
237     case Intrinsic::lifetime_end:
238     case Intrinsic::masked_store:
239       return true;
240     }
241   }
242   if (auto *CB = dyn_cast<CallBase>(I)) {
243     LibFunc LF;
244     if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
245       switch (LF) {
246       case LibFunc_strcpy:
247       case LibFunc_strncpy:
248       case LibFunc_strcat:
249       case LibFunc_strncat:
250         return true;
251       default:
252         return false;
253       }
254     }
255   }
256   return false;
257 }
258 
259 /// Return a Location stored to by the specified instruction. If isRemovable
260 /// returns true, this function and getLocForRead completely describe the memory
261 /// operations for this instruction.
262 static MemoryLocation getLocForWrite(Instruction *Inst,
263                                      const TargetLibraryInfo &TLI) {
264   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
265     return MemoryLocation::get(SI);
266 
267   // memcpy/memmove/memset.
268   if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst))
269     return MemoryLocation::getForDest(MI);
270 
271   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
272     switch (II->getIntrinsicID()) {
273     default:
274       return MemoryLocation(); // Unhandled intrinsic.
275     case Intrinsic::init_trampoline:
276       return MemoryLocation::getAfter(II->getArgOperand(0));
277     case Intrinsic::masked_store:
278       return MemoryLocation::getForArgument(II, 1, TLI);
279     case Intrinsic::lifetime_end: {
280       uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
281       return MemoryLocation(II->getArgOperand(1), Len);
282     }
283     }
284   }
285   if (auto *CB = dyn_cast<CallBase>(Inst))
286     // All the supported TLI functions so far happen to have dest as their
287     // first argument.
288     return MemoryLocation::getAfter(CB->getArgOperand(0));
289   return MemoryLocation();
290 }
291 
292 /// Return the location read by the specified "hasAnalyzableMemoryWrite"
293 /// instruction if any.
294 static MemoryLocation getLocForRead(Instruction *Inst,
295                                     const TargetLibraryInfo &TLI) {
296   assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
297 
298   // The only instructions that both read and write are the mem transfer
299   // instructions (memcpy/memmove).
300   if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
301     return MemoryLocation::getForSource(MTI);
302   return MemoryLocation();
303 }
304 
305 /// If the value of this instruction and the memory it writes to is unused, may
306 /// we delete this instruction?
307 static bool isRemovable(Instruction *I) {
308   // Don't remove volatile/atomic stores.
309   if (StoreInst *SI = dyn_cast<StoreInst>(I))
310     return SI->isUnordered();
311 
312   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
313     switch (II->getIntrinsicID()) {
314     default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
315     case Intrinsic::lifetime_end:
316       // Never remove dead lifetime_end's, e.g. because it is followed by a
317       // free.
318       return false;
319     case Intrinsic::init_trampoline:
320       // Always safe to remove init_trampoline.
321       return true;
322     case Intrinsic::memset:
323     case Intrinsic::memmove:
324     case Intrinsic::memcpy:
325     case Intrinsic::memcpy_inline:
326       // Don't remove volatile memory intrinsics.
327       return !cast<MemIntrinsic>(II)->isVolatile();
328     case Intrinsic::memcpy_element_unordered_atomic:
329     case Intrinsic::memmove_element_unordered_atomic:
330     case Intrinsic::memset_element_unordered_atomic:
331     case Intrinsic::masked_store:
332       return true;
333     }
334   }
335 
336   // note: only get here for calls with analyzable writes - i.e. libcalls
337   if (auto *CB = dyn_cast<CallBase>(I))
338     return CB->use_empty();
339 
340   return false;
341 }
342 
343 /// Returns true if the end of this instruction can be safely shortened in
344 /// length.
345 static bool isShortenableAtTheEnd(Instruction *I) {
346   // Don't shorten stores for now
347   if (isa<StoreInst>(I))
348     return false;
349 
350   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
351     switch (II->getIntrinsicID()) {
352       default: return false;
353       case Intrinsic::memset:
354       case Intrinsic::memcpy:
355       case Intrinsic::memcpy_element_unordered_atomic:
356       case Intrinsic::memset_element_unordered_atomic:
357         // Do shorten memory intrinsics.
358         // FIXME: Add memmove if it's also safe to transform.
359         return true;
360     }
361   }
362 
363   // Don't shorten libcalls calls for now.
364 
365   return false;
366 }
367 
368 /// Returns true if the beginning of this instruction can be safely shortened
369 /// in length.
370 static bool isShortenableAtTheBeginning(Instruction *I) {
371   // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
372   // easily done by offsetting the source address.
373   return isa<AnyMemSetInst>(I);
374 }
375 
376 /// Return the pointer that is being written to.
377 static Value *getStoredPointerOperand(Instruction *I,
378                                       const TargetLibraryInfo &TLI) {
379   //TODO: factor this to reuse getLocForWrite
380   MemoryLocation Loc = getLocForWrite(I, TLI);
381   assert(Loc.Ptr &&
382          "unable to find pointer written for analyzable instruction?");
383   // TODO: most APIs don't expect const Value *
384   return const_cast<Value*>(Loc.Ptr);
385 }
386 
387 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
388                                const TargetLibraryInfo &TLI,
389                                const Function *F) {
390   uint64_t Size;
391   ObjectSizeOpts Opts;
392   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
393 
394   if (getObjectSize(V, Size, DL, &TLI, Opts))
395     return Size;
396   return MemoryLocation::UnknownSize;
397 }
398 
399 namespace {
400 
401 enum OverwriteResult {
402   OW_Begin,
403   OW_Complete,
404   OW_End,
405   OW_PartialEarlierWithFullLater,
406   OW_MaybePartial,
407   OW_Unknown
408 };
409 
410 } // end anonymous namespace
411 
412 /// Check if two instruction are masked stores that completely
413 /// overwrite one another. More specifically, \p Later has to
414 /// overwrite \p Earlier.
415 template <typename AATy>
416 static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later,
417                                               const Instruction *Earlier,
418                                               AATy &AA) {
419   const auto *IIL = dyn_cast<IntrinsicInst>(Later);
420   const auto *IIE = dyn_cast<IntrinsicInst>(Earlier);
421   if (IIL == nullptr || IIE == nullptr)
422     return OW_Unknown;
423   if (IIL->getIntrinsicID() != Intrinsic::masked_store ||
424       IIE->getIntrinsicID() != Intrinsic::masked_store)
425     return OW_Unknown;
426   // Pointers.
427   Value *LP = IIL->getArgOperand(1)->stripPointerCasts();
428   Value *EP = IIE->getArgOperand(1)->stripPointerCasts();
429   if (LP != EP && !AA.isMustAlias(LP, EP))
430     return OW_Unknown;
431   // Masks.
432   // TODO: check that Later's mask is a superset of the Earlier's mask.
433   if (IIL->getArgOperand(3) != IIE->getArgOperand(3))
434     return OW_Unknown;
435   return OW_Complete;
436 }
437 
438 /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI
439 /// instruction) completely overwrites a store to the 'Earlier' location.
440 /// (by \p EarlierI instruction).
441 /// Return OW_MaybePartial if \p Later does not completely overwrite
442 /// \p Earlier, but they both write to the same underlying object. In that
443 /// case, use isPartialOverwrite to check if \p Later partially overwrites
444 /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined.
445 template <typename AATy>
446 static OverwriteResult
447 isOverwrite(const Instruction *LaterI, const Instruction *EarlierI,
448             const MemoryLocation &Later, const MemoryLocation &Earlier,
449             const DataLayout &DL, const TargetLibraryInfo &TLI,
450             int64_t &EarlierOff, int64_t &LaterOff, AATy &AA,
451             const Function *F) {
452   // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
453   // get imprecise values here, though (except for unknown sizes).
454   if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) {
455     // Masked stores have imprecise locations, but we can reason about them
456     // to some extent.
457     return isMaskedStoreOverwrite(LaterI, EarlierI, AA);
458   }
459 
460   const uint64_t LaterSize = Later.Size.getValue();
461   const uint64_t EarlierSize = Earlier.Size.getValue();
462 
463   const Value *P1 = Earlier.Ptr->stripPointerCasts();
464   const Value *P2 = Later.Ptr->stripPointerCasts();
465 
466   // If the start pointers are the same, we just have to compare sizes to see if
467   // the later store was larger than the earlier store.
468   if (P1 == P2 || AA.isMustAlias(P1, P2)) {
469     // Make sure that the Later size is >= the Earlier size.
470     if (LaterSize >= EarlierSize)
471       return OW_Complete;
472   }
473 
474   // Check to see if the later store is to the entire object (either a global,
475   // an alloca, or a byval/inalloca argument).  If so, then it clearly
476   // overwrites any other store to the same object.
477   const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2);
478 
479   // If we can't resolve the same pointers to the same object, then we can't
480   // analyze them at all.
481   if (UO1 != UO2)
482     return OW_Unknown;
483 
484   // If the "Later" store is to a recognizable object, get its size.
485   uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
486   if (ObjectSize != MemoryLocation::UnknownSize)
487     if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
488       return OW_Complete;
489 
490   // Okay, we have stores to two completely different pointers.  Try to
491   // decompose the pointer into a "base + constant_offset" form.  If the base
492   // pointers are equal, then we can reason about the two stores.
493   EarlierOff = 0;
494   LaterOff = 0;
495   const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
496   const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
497 
498   // If the base pointers still differ, we have two completely different stores.
499   if (BP1 != BP2)
500     return OW_Unknown;
501 
502   // The later access completely overlaps the earlier store if and only if
503   // both start and end of the earlier one is "inside" the later one:
504   //    |<->|--earlier--|<->|
505   //    |-------later-------|
506   // Accesses may overlap if and only if start of one of them is "inside"
507   // another one:
508   //    |<->|--earlier--|<----->|
509   //    |-------later-------|
510   //           OR
511   //    |----- earlier -----|
512   //    |<->|---later---|<----->|
513   //
514   // We have to be careful here as *Off is signed while *.Size is unsigned.
515 
516   // Check if the earlier access starts "not before" the later one.
517   if (EarlierOff >= LaterOff) {
518     // If the earlier access ends "not after" the later access then the earlier
519     // one is completely overwritten by the later one.
520     if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
521       return OW_Complete;
522     // If start of the earlier access is "before" end of the later access then
523     // accesses overlap.
524     else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize)
525       return OW_MaybePartial;
526   }
527   // If start of the later access is "before" end of the earlier access then
528   // accesses overlap.
529   else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) {
530     return OW_MaybePartial;
531   }
532 
533   // Can reach here only if accesses are known not to overlap. There is no
534   // dedicated code to indicate no overlap so signal "unknown".
535   return OW_Unknown;
536 }
537 
538 /// Return 'OW_Complete' if a store to the 'Later' location completely
539 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
540 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
541 /// beginning of the 'Earlier' location is overwritten by 'Later'.
542 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
543 /// overwritten by a latter (smaller) store which doesn't write outside the big
544 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
545 /// NOTE: This function must only be called if both \p Later and \p Earlier
546 /// write to the same underlying object with valid \p EarlierOff and \p
547 /// LaterOff.
548 static OverwriteResult isPartialOverwrite(const MemoryLocation &Later,
549                                           const MemoryLocation &Earlier,
550                                           int64_t EarlierOff, int64_t LaterOff,
551                                           Instruction *DepWrite,
552                                           InstOverlapIntervalsTy &IOL) {
553   const uint64_t LaterSize = Later.Size.getValue();
554   const uint64_t EarlierSize = Earlier.Size.getValue();
555   // We may now overlap, although the overlap is not complete. There might also
556   // be other incomplete overlaps, and together, they might cover the complete
557   // earlier write.
558   // Note: The correctness of this logic depends on the fact that this function
559   // is not even called providing DepWrite when there are any intervening reads.
560   if (EnablePartialOverwriteTracking &&
561       LaterOff < int64_t(EarlierOff + EarlierSize) &&
562       int64_t(LaterOff + LaterSize) >= EarlierOff) {
563 
564     // Insert our part of the overlap into the map.
565     auto &IM = IOL[DepWrite];
566     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
567                       << ", " << int64_t(EarlierOff + EarlierSize)
568                       << ") Later [" << LaterOff << ", "
569                       << int64_t(LaterOff + LaterSize) << ")\n");
570 
571     // Make sure that we only insert non-overlapping intervals and combine
572     // adjacent intervals. The intervals are stored in the map with the ending
573     // offset as the key (in the half-open sense) and the starting offset as
574     // the value.
575     int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
576 
577     // Find any intervals ending at, or after, LaterIntStart which start
578     // before LaterIntEnd.
579     auto ILI = IM.lower_bound(LaterIntStart);
580     if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
581       // This existing interval is overlapped with the current store somewhere
582       // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
583       // intervals and adjusting our start and end.
584       LaterIntStart = std::min(LaterIntStart, ILI->second);
585       LaterIntEnd = std::max(LaterIntEnd, ILI->first);
586       ILI = IM.erase(ILI);
587 
588       // Continue erasing and adjusting our end in case other previous
589       // intervals are also overlapped with the current store.
590       //
591       // |--- ealier 1 ---|  |--- ealier 2 ---|
592       //     |------- later---------|
593       //
594       while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
595         assert(ILI->second > LaterIntStart && "Unexpected interval");
596         LaterIntEnd = std::max(LaterIntEnd, ILI->first);
597         ILI = IM.erase(ILI);
598       }
599     }
600 
601     IM[LaterIntEnd] = LaterIntStart;
602 
603     ILI = IM.begin();
604     if (ILI->second <= EarlierOff &&
605         ILI->first >= int64_t(EarlierOff + EarlierSize)) {
606       LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
607                         << EarlierOff << ", "
608                         << int64_t(EarlierOff + EarlierSize)
609                         << ") Composite Later [" << ILI->second << ", "
610                         << ILI->first << ")\n");
611       ++NumCompletePartials;
612       return OW_Complete;
613     }
614   }
615 
616   // Check for an earlier store which writes to all the memory locations that
617   // the later store writes to.
618   if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
619       int64_t(EarlierOff + EarlierSize) > LaterOff &&
620       uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
621     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
622                       << EarlierOff << ", "
623                       << int64_t(EarlierOff + EarlierSize)
624                       << ") by a later store [" << LaterOff << ", "
625                       << int64_t(LaterOff + LaterSize) << ")\n");
626     // TODO: Maybe come up with a better name?
627     return OW_PartialEarlierWithFullLater;
628   }
629 
630   // Another interesting case is if the later store overwrites the end of the
631   // earlier store.
632   //
633   //      |--earlier--|
634   //                |--   later   --|
635   //
636   // In this case we may want to trim the size of earlier to avoid generating
637   // writes to addresses which will definitely be overwritten later
638   if (!EnablePartialOverwriteTracking &&
639       (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
640        int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
641     return OW_End;
642 
643   // Finally, we also need to check if the later store overwrites the beginning
644   // of the earlier store.
645   //
646   //                |--earlier--|
647   //      |--   later   --|
648   //
649   // In this case we may want to move the destination address and trim the size
650   // of earlier to avoid generating writes to addresses which will definitely
651   // be overwritten later.
652   if (!EnablePartialOverwriteTracking &&
653       (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
654     assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
655            "Expect to be handled as OW_Complete");
656     return OW_Begin;
657   }
658   // Otherwise, they don't completely overlap.
659   return OW_Unknown;
660 }
661 
662 /// If 'Inst' might be a self read (i.e. a noop copy of a
663 /// memory region into an identical pointer) then it doesn't actually make its
664 /// input dead in the traditional sense.  Consider this case:
665 ///
666 ///   memmove(A <- B)
667 ///   memmove(A <- A)
668 ///
669 /// In this case, the second store to A does not make the first store to A dead.
670 /// The usual situation isn't an explicit A<-A store like this (which can be
671 /// trivially removed) but a case where two pointers may alias.
672 ///
673 /// This function detects when it is unsafe to remove a dependent instruction
674 /// because the DSE inducing instruction may be a self-read.
675 static bool isPossibleSelfRead(Instruction *Inst,
676                                const MemoryLocation &InstStoreLoc,
677                                Instruction *DepWrite,
678                                const TargetLibraryInfo &TLI,
679                                AliasAnalysis &AA) {
680   // Self reads can only happen for instructions that read memory.  Get the
681   // location read.
682   MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
683   if (!InstReadLoc.Ptr)
684     return false; // Not a reading instruction.
685 
686   // If the read and written loc obviously don't alias, it isn't a read.
687   if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
688     return false;
689 
690   if (isa<AnyMemCpyInst>(Inst)) {
691     // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
692     // but in practice memcpy(A <- B) either means that A and B are disjoint or
693     // are equal (i.e. there are not partial overlaps).  Given that, if we have:
694     //
695     //   memcpy/memmove(A <- B)  // DepWrite
696     //   memcpy(A <- B)  // Inst
697     //
698     // with Inst reading/writing a >= size than DepWrite, we can reason as
699     // follows:
700     //
701     //   - If A == B then both the copies are no-ops, so the DepWrite can be
702     //     removed.
703     //   - If A != B then A and B are disjoint locations in Inst.  Since
704     //     Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
705     //     Therefore DepWrite can be removed.
706     MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
707 
708     if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
709       return false;
710   }
711 
712   // If DepWrite doesn't read memory or if we can't prove it is a must alias,
713   // then it can't be considered dead.
714   return true;
715 }
716 
717 /// Returns true if the memory which is accessed by the second instruction is not
718 /// modified between the first and the second instruction.
719 /// Precondition: Second instruction must be dominated by the first
720 /// instruction.
721 template <typename AATy>
722 static bool
723 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, AATy &AA,
724                            const DataLayout &DL, DominatorTree *DT) {
725   // Do a backwards scan through the CFG from SecondI to FirstI. Look for
726   // instructions which can modify the memory location accessed by SecondI.
727   //
728   // While doing the walk keep track of the address to check. It might be
729   // different in different basic blocks due to PHI translation.
730   using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
731   SmallVector<BlockAddressPair, 16> WorkList;
732   // Keep track of the address we visited each block with. Bail out if we
733   // visit a block with different addresses.
734   DenseMap<BasicBlock *, Value *> Visited;
735 
736   BasicBlock::iterator FirstBBI(FirstI);
737   ++FirstBBI;
738   BasicBlock::iterator SecondBBI(SecondI);
739   BasicBlock *FirstBB = FirstI->getParent();
740   BasicBlock *SecondBB = SecondI->getParent();
741   MemoryLocation MemLoc = MemoryLocation::get(SecondI);
742   auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
743 
744   // Start checking the SecondBB.
745   WorkList.push_back(
746       std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
747   bool isFirstBlock = true;
748 
749   // Check all blocks going backward until we reach the FirstBB.
750   while (!WorkList.empty()) {
751     BlockAddressPair Current = WorkList.pop_back_val();
752     BasicBlock *B = Current.first;
753     PHITransAddr &Addr = Current.second;
754     Value *Ptr = Addr.getAddr();
755 
756     // Ignore instructions before FirstI if this is the FirstBB.
757     BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
758 
759     BasicBlock::iterator EI;
760     if (isFirstBlock) {
761       // Ignore instructions after SecondI if this is the first visit of SecondBB.
762       assert(B == SecondBB && "first block is not the store block");
763       EI = SecondBBI;
764       isFirstBlock = false;
765     } else {
766       // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
767       // In this case we also have to look at instructions after SecondI.
768       EI = B->end();
769     }
770     for (; BI != EI; ++BI) {
771       Instruction *I = &*BI;
772       if (I->mayWriteToMemory() && I != SecondI)
773         if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
774           return false;
775     }
776     if (B != FirstBB) {
777       assert(B != &FirstBB->getParent()->getEntryBlock() &&
778           "Should not hit the entry block because SI must be dominated by LI");
779       for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
780         PHITransAddr PredAddr = Addr;
781         if (PredAddr.NeedsPHITranslationFromBlock(B)) {
782           if (!PredAddr.IsPotentiallyPHITranslatable())
783             return false;
784           if (PredAddr.PHITranslateValue(B, *PredI, DT, false))
785             return false;
786         }
787         Value *TranslatedPtr = PredAddr.getAddr();
788         auto Inserted = Visited.insert(std::make_pair(*PredI, TranslatedPtr));
789         if (!Inserted.second) {
790           // We already visited this block before. If it was with a different
791           // address - bail out!
792           if (TranslatedPtr != Inserted.first->second)
793             return false;
794           // ... otherwise just skip it.
795           continue;
796         }
797         WorkList.push_back(std::make_pair(*PredI, PredAddr));
798       }
799     }
800   }
801   return true;
802 }
803 
804 /// Find all blocks that will unconditionally lead to the block BB and append
805 /// them to F.
806 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
807                                    BasicBlock *BB, DominatorTree *DT) {
808   for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
809     BasicBlock *Pred = *I;
810     if (Pred == BB) continue;
811     Instruction *PredTI = Pred->getTerminator();
812     if (PredTI->getNumSuccessors() != 1)
813       continue;
814 
815     if (DT->isReachableFromEntry(Pred))
816       Blocks.push_back(Pred);
817   }
818 }
819 
820 /// Handle frees of entire structures whose dependency is a store
821 /// to a field of that structure.
822 static bool handleFree(CallInst *F, AliasAnalysis *AA,
823                        MemoryDependenceResults *MD, DominatorTree *DT,
824                        const TargetLibraryInfo *TLI,
825                        InstOverlapIntervalsTy &IOL,
826                        MapVector<Instruction *, bool> &ThrowableInst) {
827   bool MadeChange = false;
828 
829   MemoryLocation Loc = MemoryLocation::getAfter(F->getOperand(0));
830   SmallVector<BasicBlock *, 16> Blocks;
831   Blocks.push_back(F->getParent());
832 
833   while (!Blocks.empty()) {
834     BasicBlock *BB = Blocks.pop_back_val();
835     Instruction *InstPt = BB->getTerminator();
836     if (BB == F->getParent()) InstPt = F;
837 
838     MemDepResult Dep =
839         MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
840     while (Dep.isDef() || Dep.isClobber()) {
841       Instruction *Dependency = Dep.getInst();
842       if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
843           !isRemovable(Dependency))
844         break;
845 
846       Value *DepPointer =
847           getUnderlyingObject(getStoredPointerOperand(Dependency, *TLI));
848 
849       // Check for aliasing.
850       if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
851         break;
852 
853       LLVM_DEBUG(
854           dbgs() << "DSE: Dead Store to soon to be freed memory:\n  DEAD: "
855                  << *Dependency << '\n');
856 
857       // DCE instructions only used to calculate that store.
858       BasicBlock::iterator BBI(Dependency);
859       deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL,
860                             ThrowableInst);
861       ++NumFastStores;
862       MadeChange = true;
863 
864       // Inst's old Dependency is now deleted. Compute the next dependency,
865       // which may also be dead, as in
866       //    s[0] = 0;
867       //    s[1] = 0; // This has just been deleted.
868       //    free(s);
869       Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
870     }
871 
872     if (Dep.isNonLocal())
873       findUnconditionalPreds(Blocks, BB, DT);
874   }
875 
876   return MadeChange;
877 }
878 
879 /// Check to see if the specified location may alias any of the stack objects in
880 /// the DeadStackObjects set. If so, they become live because the location is
881 /// being loaded.
882 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
883                                   SmallSetVector<const Value *, 16> &DeadStackObjects,
884                                   const DataLayout &DL, AliasAnalysis *AA,
885                                   const TargetLibraryInfo *TLI,
886                                   const Function *F) {
887   const Value *UnderlyingPointer = getUnderlyingObject(LoadedLoc.Ptr);
888 
889   // A constant can't be in the dead pointer set.
890   if (isa<Constant>(UnderlyingPointer))
891     return;
892 
893   // If the kill pointer can be easily reduced to an alloca, don't bother doing
894   // extraneous AA queries.
895   if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
896     DeadStackObjects.remove(UnderlyingPointer);
897     return;
898   }
899 
900   // Remove objects that could alias LoadedLoc.
901   DeadStackObjects.remove_if([&](const Value *I) {
902     // See if the loaded location could alias the stack location.
903     MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
904     return !AA->isNoAlias(StackLoc, LoadedLoc);
905   });
906 }
907 
908 /// Remove dead stores to stack-allocated locations in the function end block.
909 /// Ex:
910 /// %A = alloca i32
911 /// ...
912 /// store i32 1, i32* %A
913 /// ret void
914 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
915                            MemoryDependenceResults *MD,
916                            const TargetLibraryInfo *TLI,
917                            InstOverlapIntervalsTy &IOL,
918                            MapVector<Instruction *, bool> &ThrowableInst) {
919   bool MadeChange = false;
920 
921   // Keep track of all of the stack objects that are dead at the end of the
922   // function.
923   SmallSetVector<const Value*, 16> DeadStackObjects;
924 
925   // Find all of the alloca'd pointers in the entry block.
926   BasicBlock &Entry = BB.getParent()->front();
927   for (Instruction &I : Entry) {
928     if (isa<AllocaInst>(&I))
929       DeadStackObjects.insert(&I);
930 
931     // Okay, so these are dead heap objects, but if the pointer never escapes
932     // then it's leaked by this function anyways.
933     else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
934       DeadStackObjects.insert(&I);
935   }
936 
937   // Treat byval or inalloca arguments the same, stores to them are dead at the
938   // end of the function.
939   for (Argument &AI : BB.getParent()->args())
940     if (AI.hasPassPointeeByValueCopyAttr())
941       DeadStackObjects.insert(&AI);
942 
943   const DataLayout &DL = BB.getModule()->getDataLayout();
944 
945   // Scan the basic block backwards
946   for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
947     --BBI;
948 
949     // If we find a store, check to see if it points into a dead stack value.
950     if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
951       // See through pointer-to-pointer bitcasts
952       SmallVector<const Value *, 4> Pointers;
953       getUnderlyingObjects(getStoredPointerOperand(&*BBI, *TLI), Pointers);
954 
955       // Stores to stack values are valid candidates for removal.
956       bool AllDead = true;
957       for (const Value *Pointer : Pointers)
958         if (!DeadStackObjects.count(Pointer)) {
959           AllDead = false;
960           break;
961         }
962 
963       if (AllDead) {
964         Instruction *Dead = &*BBI;
965 
966         LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
967                           << *Dead << "\n  Objects: ";
968                    for (SmallVectorImpl<const Value *>::iterator I =
969                             Pointers.begin(),
970                         E = Pointers.end();
971                         I != E; ++I) {
972                      dbgs() << **I;
973                      if (std::next(I) != E)
974                        dbgs() << ", ";
975                    } dbgs()
976                    << '\n');
977 
978         // DCE instructions only used to calculate that store.
979         deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, ThrowableInst,
980                               &DeadStackObjects);
981         ++NumFastStores;
982         MadeChange = true;
983         continue;
984       }
985     }
986 
987     // Remove any dead non-memory-mutating instructions.
988     if (isInstructionTriviallyDead(&*BBI, TLI)) {
989       LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n  DEAD: "
990                         << *&*BBI << '\n');
991       deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, ThrowableInst,
992                             &DeadStackObjects);
993       ++NumFastOther;
994       MadeChange = true;
995       continue;
996     }
997 
998     if (isa<AllocaInst>(BBI)) {
999       // Remove allocas from the list of dead stack objects; there can't be
1000       // any references before the definition.
1001       DeadStackObjects.remove(&*BBI);
1002       continue;
1003     }
1004 
1005     if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
1006       // Remove allocation function calls from the list of dead stack objects;
1007       // there can't be any references before the definition.
1008       if (isAllocLikeFn(&*BBI, TLI))
1009         DeadStackObjects.remove(&*BBI);
1010 
1011       // If this call does not access memory, it can't be loading any of our
1012       // pointers.
1013       if (AA->doesNotAccessMemory(Call))
1014         continue;
1015 
1016       // If the call might load from any of our allocas, then any store above
1017       // the call is live.
1018       DeadStackObjects.remove_if([&](const Value *I) {
1019         // See if the call site touches the value.
1020         return isRefSet(AA->getModRefInfo(
1021             Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
1022       });
1023 
1024       // If all of the allocas were clobbered by the call then we're not going
1025       // to find anything else to process.
1026       if (DeadStackObjects.empty())
1027         break;
1028 
1029       continue;
1030     }
1031 
1032     // We can remove the dead stores, irrespective of the fence and its ordering
1033     // (release/acquire/seq_cst). Fences only constraints the ordering of
1034     // already visible stores, it does not make a store visible to other
1035     // threads. So, skipping over a fence does not change a store from being
1036     // dead.
1037     if (isa<FenceInst>(*BBI))
1038       continue;
1039 
1040     MemoryLocation LoadedLoc;
1041 
1042     // If we encounter a use of the pointer, it is no longer considered dead
1043     if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
1044       if (!L->isUnordered()) // Be conservative with atomic/volatile load
1045         break;
1046       LoadedLoc = MemoryLocation::get(L);
1047     } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
1048       LoadedLoc = MemoryLocation::get(V);
1049     } else if (!BBI->mayReadFromMemory()) {
1050       // Instruction doesn't read memory.  Note that stores that weren't removed
1051       // above will hit this case.
1052       continue;
1053     } else {
1054       // Unknown inst; assume it clobbers everything.
1055       break;
1056     }
1057 
1058     // Remove any allocas from the DeadPointer set that are loaded, as this
1059     // makes any stores above the access live.
1060     removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
1061 
1062     // If all of the allocas were clobbered by the access then we're not going
1063     // to find anything else to process.
1064     if (DeadStackObjects.empty())
1065       break;
1066   }
1067 
1068   return MadeChange;
1069 }
1070 
1071 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
1072                          uint64_t &EarlierSize, int64_t LaterOffset,
1073                          uint64_t LaterSize, bool IsOverwriteEnd) {
1074   // TODO: base this on the target vector size so that if the earlier
1075   // store was too small to get vector writes anyway then its likely
1076   // a good idea to shorten it
1077   // Power of 2 vector writes are probably always a bad idea to optimize
1078   // as any store/memset/memcpy is likely using vector instructions so
1079   // shortening it to not vector size is likely to be slower
1080   auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
1081   unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
1082   if (!IsOverwriteEnd)
1083     LaterOffset = int64_t(LaterOffset + LaterSize);
1084 
1085   if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
1086       !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
1087     return false;
1088 
1089   int64_t NewLength = IsOverwriteEnd
1090                           ? LaterOffset - EarlierOffset
1091                           : EarlierSize - (LaterOffset - EarlierOffset);
1092 
1093   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
1094     // When shortening an atomic memory intrinsic, the newly shortened
1095     // length must remain an integer multiple of the element size.
1096     const uint32_t ElementSize = AMI->getElementSizeInBytes();
1097     if (0 != NewLength % ElementSize)
1098       return false;
1099   }
1100 
1101   LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
1102                     << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
1103                     << *EarlierWrite << "\n  KILLER (offset " << LaterOffset
1104                     << ", " << EarlierSize << ")\n");
1105 
1106   Value *EarlierWriteLength = EarlierIntrinsic->getLength();
1107   Value *TrimmedLength =
1108       ConstantInt::get(EarlierWriteLength->getType(), NewLength);
1109   EarlierIntrinsic->setLength(TrimmedLength);
1110 
1111   EarlierSize = NewLength;
1112   if (!IsOverwriteEnd) {
1113     int64_t OffsetMoved = (LaterOffset - EarlierOffset);
1114     Value *Indices[1] = {
1115         ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
1116     GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
1117         EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
1118         EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
1119     NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
1120     EarlierIntrinsic->setDest(NewDestGEP);
1121     EarlierOffset = EarlierOffset + OffsetMoved;
1122   }
1123   return true;
1124 }
1125 
1126 static bool tryToShortenEnd(Instruction *EarlierWrite,
1127                             OverlapIntervalsTy &IntervalMap,
1128                             int64_t &EarlierStart, uint64_t &EarlierSize) {
1129   if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
1130     return false;
1131 
1132   OverlapIntervalsTy::iterator OII = --IntervalMap.end();
1133   int64_t LaterStart = OII->second;
1134   uint64_t LaterSize = OII->first - LaterStart;
1135 
1136   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
1137 
1138   if (LaterStart > EarlierStart &&
1139       // Note: "LaterStart - EarlierStart" is known to be positive due to
1140       // preceding check.
1141       (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
1142       // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
1143       // be non negative due to preceding checks.
1144       LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
1145     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1146                      LaterSize, true)) {
1147       IntervalMap.erase(OII);
1148       return true;
1149     }
1150   }
1151   return false;
1152 }
1153 
1154 static bool tryToShortenBegin(Instruction *EarlierWrite,
1155                               OverlapIntervalsTy &IntervalMap,
1156                               int64_t &EarlierStart, uint64_t &EarlierSize) {
1157   if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
1158     return false;
1159 
1160   OverlapIntervalsTy::iterator OII = IntervalMap.begin();
1161   int64_t LaterStart = OII->second;
1162   uint64_t LaterSize = OII->first - LaterStart;
1163 
1164   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
1165 
1166   if (LaterStart <= EarlierStart &&
1167       // Note: "EarlierStart - LaterStart" is known to be non negative due to
1168       // preceding check.
1169       LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
1170     // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
1171     // positive due to preceding checks.
1172     assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&
1173            "Should have been handled as OW_Complete");
1174     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1175                      LaterSize, false)) {
1176       IntervalMap.erase(OII);
1177       return true;
1178     }
1179   }
1180   return false;
1181 }
1182 
1183 static bool removePartiallyOverlappedStores(const DataLayout &DL,
1184                                             InstOverlapIntervalsTy &IOL,
1185                                             const TargetLibraryInfo &TLI) {
1186   bool Changed = false;
1187   for (auto OI : IOL) {
1188     Instruction *EarlierWrite = OI.first;
1189     MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI);
1190     assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1191 
1192     const Value *Ptr = Loc.Ptr->stripPointerCasts();
1193     int64_t EarlierStart = 0;
1194     uint64_t EarlierSize = Loc.Size.getValue();
1195     GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1196     OverlapIntervalsTy &IntervalMap = OI.second;
1197     Changed |=
1198         tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1199     if (IntervalMap.empty())
1200       continue;
1201     Changed |=
1202         tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1203   }
1204   return Changed;
1205 }
1206 
1207 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
1208                                AliasAnalysis *AA, MemoryDependenceResults *MD,
1209                                const DataLayout &DL,
1210                                const TargetLibraryInfo *TLI,
1211                                InstOverlapIntervalsTy &IOL,
1212                                MapVector<Instruction *, bool> &ThrowableInst,
1213                                DominatorTree *DT) {
1214   // Must be a store instruction.
1215   StoreInst *SI = dyn_cast<StoreInst>(Inst);
1216   if (!SI)
1217     return false;
1218 
1219   // If we're storing the same value back to a pointer that we just loaded from,
1220   // then the store can be removed.
1221   if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1222     if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1223         isRemovable(SI) &&
1224         memoryIsNotModifiedBetween(DepLoad, SI, *AA, DL, DT)) {
1225 
1226       LLVM_DEBUG(
1227           dbgs() << "DSE: Remove Store Of Load from same pointer:\n  LOAD: "
1228                  << *DepLoad << "\n  STORE: " << *SI << '\n');
1229 
1230       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1231       ++NumRedundantStores;
1232       return true;
1233     }
1234   }
1235 
1236   // Remove null stores into the calloc'ed objects
1237   Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1238   if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1239     Instruction *UnderlyingPointer =
1240         dyn_cast<Instruction>(getUnderlyingObject(SI->getPointerOperand()));
1241 
1242     if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1243         memoryIsNotModifiedBetween(UnderlyingPointer, SI, *AA, DL, DT)) {
1244       LLVM_DEBUG(
1245           dbgs() << "DSE: Remove null store to the calloc'ed object:\n  DEAD: "
1246                  << *Inst << "\n  OBJECT: " << *UnderlyingPointer << '\n');
1247 
1248       deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, ThrowableInst);
1249       ++NumRedundantStores;
1250       return true;
1251     }
1252   }
1253   return false;
1254 }
1255 
1256 template <typename AATy>
1257 static Constant *tryToMergePartialOverlappingStores(
1258     StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset,
1259     int64_t DepWriteOffset, const DataLayout &DL, AATy &AA, DominatorTree *DT) {
1260 
1261   if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1262       DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
1263       Later && isa<ConstantInt>(Later->getValueOperand()) &&
1264       DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
1265       memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
1266     // If the store we find is:
1267     //   a) partially overwritten by the store to 'Loc'
1268     //   b) the later store is fully contained in the earlier one and
1269     //   c) they both have a constant value
1270     //   d) none of the two stores need padding
1271     // Merge the two stores, replacing the earlier store's value with a
1272     // merge of both values.
1273     // TODO: Deal with other constant types (vectors, etc), and probably
1274     // some mem intrinsics (if needed)
1275 
1276     APInt EarlierValue =
1277         cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1278     APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
1279     unsigned LaterBits = LaterValue.getBitWidth();
1280     assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1281     LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1282 
1283     // Offset of the smaller store inside the larger store
1284     unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1285     unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
1286                                                    BitOffsetDiff - LaterBits
1287                                              : BitOffsetDiff;
1288     APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1289                                    LShiftAmount + LaterBits);
1290     // Clear the bits we'll be replacing, then OR with the smaller
1291     // store, shifted appropriately.
1292     APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1293     LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n  Earlier: " << *Earlier
1294                       << "\n  Later: " << *Later
1295                       << "\n  Merged Value: " << Merged << '\n');
1296     return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
1297   }
1298   return nullptr;
1299 }
1300 
1301 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1302                                 MemoryDependenceResults *MD, DominatorTree *DT,
1303                                 const TargetLibraryInfo *TLI) {
1304   const DataLayout &DL = BB.getModule()->getDataLayout();
1305   bool MadeChange = false;
1306 
1307   MapVector<Instruction *, bool> ThrowableInst;
1308 
1309   // A map of interval maps representing partially-overwritten value parts.
1310   InstOverlapIntervalsTy IOL;
1311 
1312   // Do a top-down walk on the BB.
1313   for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1314     // Handle 'free' calls specially.
1315     if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1316       MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, ThrowableInst);
1317       // Increment BBI after handleFree has potentially deleted instructions.
1318       // This ensures we maintain a valid iterator.
1319       ++BBI;
1320       continue;
1321     }
1322 
1323     Instruction *Inst = &*BBI++;
1324 
1325     if (Inst->mayThrow()) {
1326       ThrowableInst[Inst] = true;
1327       continue;
1328     }
1329 
1330     // Check to see if Inst writes to memory.  If not, continue.
1331     if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1332       continue;
1333 
1334     // eliminateNoopStore will update in iterator, if necessary.
1335     if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL,
1336                            ThrowableInst, DT)) {
1337       MadeChange = true;
1338       continue;
1339     }
1340 
1341     // If we find something that writes memory, get its memory dependence.
1342     MemDepResult InstDep = MD->getDependency(Inst);
1343 
1344     // Ignore any store where we can't find a local dependence.
1345     // FIXME: cross-block DSE would be fun. :)
1346     if (!InstDep.isDef() && !InstDep.isClobber())
1347       continue;
1348 
1349     // Figure out what location is being stored to.
1350     MemoryLocation Loc = getLocForWrite(Inst, *TLI);
1351 
1352     // If we didn't get a useful location, fail.
1353     if (!Loc.Ptr)
1354       continue;
1355 
1356     // Loop until we find a store we can eliminate or a load that
1357     // invalidates the analysis. Without an upper bound on the number of
1358     // instructions examined, this analysis can become very time-consuming.
1359     // However, the potential gain diminishes as we process more instructions
1360     // without eliminating any of them. Therefore, we limit the number of
1361     // instructions we look at.
1362     auto Limit = MD->getDefaultBlockScanLimit();
1363     while (InstDep.isDef() || InstDep.isClobber()) {
1364       // Get the memory clobbered by the instruction we depend on.  MemDep will
1365       // skip any instructions that 'Loc' clearly doesn't interact with.  If we
1366       // end up depending on a may- or must-aliased load, then we can't optimize
1367       // away the store and we bail out.  However, if we depend on something
1368       // that overwrites the memory location we *can* potentially optimize it.
1369       //
1370       // Find out what memory location the dependent instruction stores.
1371       Instruction *DepWrite = InstDep.getInst();
1372       if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1373         break;
1374       MemoryLocation DepLoc = getLocForWrite(DepWrite, *TLI);
1375       // If we didn't get a useful location, or if it isn't a size, bail out.
1376       if (!DepLoc.Ptr)
1377         break;
1378 
1379       // Find the last throwable instruction not removed by call to
1380       // deleteDeadInstruction.
1381       Instruction *LastThrowing = nullptr;
1382       if (!ThrowableInst.empty())
1383         LastThrowing = ThrowableInst.back().first;
1384 
1385       // Make sure we don't look past a call which might throw. This is an
1386       // issue because MemoryDependenceAnalysis works in the wrong direction:
1387       // it finds instructions which dominate the current instruction, rather than
1388       // instructions which are post-dominated by the current instruction.
1389       //
1390       // If the underlying object is a non-escaping memory allocation, any store
1391       // to it is dead along the unwind edge. Otherwise, we need to preserve
1392       // the store.
1393       if (LastThrowing && DepWrite->comesBefore(LastThrowing)) {
1394         const Value *Underlying = getUnderlyingObject(DepLoc.Ptr);
1395         bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1396         if (!IsStoreDeadOnUnwind) {
1397             // We're looking for a call to an allocation function
1398             // where the allocation doesn't escape before the last
1399             // throwing instruction; PointerMayBeCaptured
1400             // reasonably fast approximation.
1401             IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1402                 !PointerMayBeCaptured(Underlying, false, true);
1403         }
1404         if (!IsStoreDeadOnUnwind)
1405           break;
1406       }
1407 
1408       // If we find a write that is a) removable (i.e., non-volatile), b) is
1409       // completely obliterated by the store to 'Loc', and c) which we know that
1410       // 'Inst' doesn't load from, then we can remove it.
1411       // Also try to merge two stores if a later one only touches memory written
1412       // to by the earlier one.
1413       if (isRemovable(DepWrite) &&
1414           !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1415         int64_t InstWriteOffset, DepWriteOffset;
1416         OverwriteResult OR = isOverwrite(Inst, DepWrite, Loc, DepLoc, DL, *TLI,
1417                                          DepWriteOffset, InstWriteOffset, *AA,
1418                                          BB.getParent());
1419         if (OR == OW_MaybePartial)
1420           OR = isPartialOverwrite(Loc, DepLoc, DepWriteOffset, InstWriteOffset,
1421                                   DepWrite, IOL);
1422 
1423         if (OR == OW_Complete) {
1424           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *DepWrite
1425                             << "\n  KILLER: " << *Inst << '\n');
1426 
1427           // Delete the store and now-dead instructions that feed it.
1428           deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1429                                 ThrowableInst);
1430           ++NumFastStores;
1431           MadeChange = true;
1432 
1433           // We erased DepWrite; start over.
1434           InstDep = MD->getDependency(Inst);
1435           continue;
1436         } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1437                    ((OR == OW_Begin &&
1438                      isShortenableAtTheBeginning(DepWrite)))) {
1439           assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1440                                                     "when partial-overwrite "
1441                                                     "tracking is enabled");
1442           // The overwrite result is known, so these must be known, too.
1443           uint64_t EarlierSize = DepLoc.Size.getValue();
1444           uint64_t LaterSize = Loc.Size.getValue();
1445           bool IsOverwriteEnd = (OR == OW_End);
1446           MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1447                                     InstWriteOffset, LaterSize, IsOverwriteEnd);
1448         } else if (EnablePartialStoreMerging &&
1449                    OR == OW_PartialEarlierWithFullLater) {
1450           auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1451           auto *Later = dyn_cast<StoreInst>(Inst);
1452           if (Constant *C = tryToMergePartialOverlappingStores(
1453                   Earlier, Later, InstWriteOffset, DepWriteOffset, DL, *AA,
1454                   DT)) {
1455             auto *SI = new StoreInst(
1456                 C, Earlier->getPointerOperand(), false, Earlier->getAlign(),
1457                 Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1458 
1459             unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1460                                    LLVMContext::MD_alias_scope,
1461                                    LLVMContext::MD_noalias,
1462                                    LLVMContext::MD_nontemporal};
1463             SI->copyMetadata(*DepWrite, MDToKeep);
1464             ++NumModifiedStores;
1465 
1466             // Delete the old stores and now-dead instructions that feed them.
1467             deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL,
1468                                   ThrowableInst);
1469             deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1470                                   ThrowableInst);
1471             MadeChange = true;
1472 
1473             // We erased DepWrite and Inst (Loc); start over.
1474             break;
1475           }
1476         }
1477       }
1478 
1479       // If this is a may-aliased store that is clobbering the store value, we
1480       // can keep searching past it for another must-aliased pointer that stores
1481       // to the same location.  For example, in:
1482       //   store -> P
1483       //   store -> Q
1484       //   store -> P
1485       // we can remove the first store to P even though we don't know if P and Q
1486       // alias.
1487       if (DepWrite == &BB.front()) break;
1488 
1489       // Can't look past this instruction if it might read 'Loc'.
1490       if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1491         break;
1492 
1493       InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1494                                              DepWrite->getIterator(), &BB,
1495                                              /*QueryInst=*/ nullptr, &Limit);
1496     }
1497   }
1498 
1499   if (EnablePartialOverwriteTracking)
1500     MadeChange |= removePartiallyOverlappedStores(DL, IOL, *TLI);
1501 
1502   // If this block ends in a return, unwind, or unreachable, all allocas are
1503   // dead at its end, which means stores to them are also dead.
1504   if (BB.getTerminator()->getNumSuccessors() == 0)
1505     MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, ThrowableInst);
1506 
1507   return MadeChange;
1508 }
1509 
1510 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1511                                 MemoryDependenceResults *MD, DominatorTree *DT,
1512                                 const TargetLibraryInfo *TLI) {
1513   bool MadeChange = false;
1514   for (BasicBlock &BB : F)
1515     // Only check non-dead blocks.  Dead blocks may have strange pointer
1516     // cycles that will confuse alias analysis.
1517     if (DT->isReachableFromEntry(&BB))
1518       MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1519 
1520   return MadeChange;
1521 }
1522 
1523 namespace {
1524 //=============================================================================
1525 // MemorySSA backed dead store elimination.
1526 //
1527 // The code below implements dead store elimination using MemorySSA. It uses
1528 // the following general approach: given a MemoryDef, walk upwards to find
1529 // clobbering MemoryDefs that may be killed by the starting def. Then check
1530 // that there are no uses that may read the location of the original MemoryDef
1531 // in between both MemoryDefs. A bit more concretely:
1532 //
1533 // For all MemoryDefs StartDef:
1534 // 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking
1535 //    upwards.
1536 // 2. Check that there are no reads between EarlierAccess and the StartDef by
1537 //    checking all uses starting at EarlierAccess and walking until we see
1538 //    StartDef.
1539 // 3. For each found CurrentDef, check that:
1540 //   1. There are no barrier instructions between CurrentDef and StartDef (like
1541 //       throws or stores with ordering constraints).
1542 //   2. StartDef is executed whenever CurrentDef is executed.
1543 //   3. StartDef completely overwrites CurrentDef.
1544 // 4. Erase CurrentDef from the function and MemorySSA.
1545 
1546 // Returns true if \p I is an intrisnic that does not read or write memory.
1547 bool isNoopIntrinsic(Instruction *I) {
1548   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1549     switch (II->getIntrinsicID()) {
1550     case Intrinsic::lifetime_start:
1551     case Intrinsic::lifetime_end:
1552     case Intrinsic::invariant_end:
1553     case Intrinsic::launder_invariant_group:
1554     case Intrinsic::assume:
1555       return true;
1556     case Intrinsic::dbg_addr:
1557     case Intrinsic::dbg_declare:
1558     case Intrinsic::dbg_label:
1559     case Intrinsic::dbg_value:
1560       llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
1561     default:
1562       return false;
1563     }
1564   }
1565   return false;
1566 }
1567 
1568 // Check if we can ignore \p D for DSE.
1569 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
1570   Instruction *DI = D->getMemoryInst();
1571   // Calls that only access inaccessible memory cannot read or write any memory
1572   // locations we consider for elimination.
1573   if (auto *CB = dyn_cast<CallBase>(DI))
1574     if (CB->onlyAccessesInaccessibleMemory())
1575       return true;
1576 
1577   // We can eliminate stores to locations not visible to the caller across
1578   // throwing instructions.
1579   if (DI->mayThrow() && !DefVisibleToCaller)
1580     return true;
1581 
1582   // We can remove the dead stores, irrespective of the fence and its ordering
1583   // (release/acquire/seq_cst). Fences only constraints the ordering of
1584   // already visible stores, it does not make a store visible to other
1585   // threads. So, skipping over a fence does not change a store from being
1586   // dead.
1587   if (isa<FenceInst>(DI))
1588     return true;
1589 
1590   // Skip intrinsics that do not really read or modify memory.
1591   if (isNoopIntrinsic(D->getMemoryInst()))
1592     return true;
1593 
1594   return false;
1595 }
1596 
1597 struct DSEState {
1598   Function &F;
1599   AliasAnalysis &AA;
1600 
1601   /// The single BatchAA instance that is used to cache AA queries. It will
1602   /// not be invalidated over the whole run. This is safe, because:
1603   /// 1. Only memory writes are removed, so the alias cache for memory
1604   ///    locations remains valid.
1605   /// 2. No new instructions are added (only instructions removed), so cached
1606   ///    information for a deleted value cannot be accessed by a re-used new
1607   ///    value pointer.
1608   BatchAAResults BatchAA;
1609 
1610   MemorySSA &MSSA;
1611   DominatorTree &DT;
1612   PostDominatorTree &PDT;
1613   const TargetLibraryInfo &TLI;
1614   const DataLayout &DL;
1615 
1616   // All MemoryDefs that potentially could kill other MemDefs.
1617   SmallVector<MemoryDef *, 64> MemDefs;
1618   // Any that should be skipped as they are already deleted
1619   SmallPtrSet<MemoryAccess *, 4> SkipStores;
1620   // Keep track of all of the objects that are invisible to the caller before
1621   // the function returns.
1622   // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
1623   DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
1624   // Keep track of all of the objects that are invisible to the caller after
1625   // the function returns.
1626   DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
1627   // Keep track of blocks with throwing instructions not modeled in MemorySSA.
1628   SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
1629   // Post-order numbers for each basic block. Used to figure out if memory
1630   // accesses are executed before another access.
1631   DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
1632 
1633   /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
1634   /// basic block.
1635   DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
1636 
1637   DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
1638            PostDominatorTree &PDT, const TargetLibraryInfo &TLI)
1639       : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI),
1640         DL(F.getParent()->getDataLayout()) {}
1641 
1642   static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1643                       DominatorTree &DT, PostDominatorTree &PDT,
1644                       const TargetLibraryInfo &TLI) {
1645     DSEState State(F, AA, MSSA, DT, PDT, TLI);
1646     // Collect blocks with throwing instructions not modeled in MemorySSA and
1647     // alloc-like objects.
1648     unsigned PO = 0;
1649     for (BasicBlock *BB : post_order(&F)) {
1650       State.PostOrderNumbers[BB] = PO++;
1651       for (Instruction &I : *BB) {
1652         MemoryAccess *MA = MSSA.getMemoryAccess(&I);
1653         if (I.mayThrow() && !MA)
1654           State.ThrowingBlocks.insert(I.getParent());
1655 
1656         auto *MD = dyn_cast_or_null<MemoryDef>(MA);
1657         if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
1658             (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
1659           State.MemDefs.push_back(MD);
1660       }
1661     }
1662 
1663     // Treat byval or inalloca arguments the same as Allocas, stores to them are
1664     // dead at the end of the function.
1665     for (Argument &AI : F.args())
1666       if (AI.hasPassPointeeByValueCopyAttr()) {
1667         // For byval, the caller doesn't know the address of the allocation.
1668         if (AI.hasByValAttr())
1669           State.InvisibleToCallerBeforeRet.insert({&AI, true});
1670         State.InvisibleToCallerAfterRet.insert({&AI, true});
1671       }
1672 
1673     return State;
1674   }
1675 
1676   bool isInvisibleToCallerAfterRet(const Value *V) {
1677     if (isa<AllocaInst>(V))
1678       return true;
1679     auto I = InvisibleToCallerAfterRet.insert({V, false});
1680     if (I.second) {
1681       if (!isInvisibleToCallerBeforeRet(V)) {
1682         I.first->second = false;
1683       } else {
1684         auto *Inst = dyn_cast<Instruction>(V);
1685         if (Inst && isAllocLikeFn(Inst, &TLI))
1686           I.first->second = !PointerMayBeCaptured(V, true, false);
1687       }
1688     }
1689     return I.first->second;
1690   }
1691 
1692   bool isInvisibleToCallerBeforeRet(const Value *V) {
1693     if (isa<AllocaInst>(V))
1694       return true;
1695     auto I = InvisibleToCallerBeforeRet.insert({V, false});
1696     if (I.second) {
1697       auto *Inst = dyn_cast<Instruction>(V);
1698       if (Inst && isAllocLikeFn(Inst, &TLI))
1699         // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1700         // with the killing MemoryDef. But we refrain from doing so for now to
1701         // limit compile-time and this does not cause any changes to the number
1702         // of stores removed on a large test set in practice.
1703         I.first->second = !PointerMayBeCaptured(V, false, true);
1704     }
1705     return I.first->second;
1706   }
1707 
1708   Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1709     if (!I->mayWriteToMemory())
1710       return None;
1711 
1712     if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1713       return {MemoryLocation::getForDest(MTI)};
1714 
1715     if (auto *CB = dyn_cast<CallBase>(I)) {
1716       // If the functions may write to memory we do not know about, bail out.
1717       if (!CB->onlyAccessesArgMemory() &&
1718           !CB->onlyAccessesInaccessibleMemOrArgMem())
1719         return None;
1720 
1721       LibFunc LF;
1722       if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1723         switch (LF) {
1724         case LibFunc_strcpy:
1725         case LibFunc_strncpy:
1726         case LibFunc_strcat:
1727         case LibFunc_strncat:
1728           return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1729         default:
1730           break;
1731         }
1732       }
1733       switch (CB->getIntrinsicID()) {
1734       case Intrinsic::init_trampoline:
1735         return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1736       case Intrinsic::masked_store:
1737         return {MemoryLocation::getForArgument(CB, 1, TLI)};
1738       default:
1739         break;
1740       }
1741       return None;
1742     }
1743 
1744     return MemoryLocation::getOrNone(I);
1745   }
1746 
1747   /// Returns true if \p UseInst completely overwrites \p DefLoc
1748   /// (stored by \p DefInst).
1749   bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1750                            Instruction *UseInst) {
1751     // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1752     // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1753     // MemoryDef.
1754     if (!UseInst->mayWriteToMemory())
1755       return false;
1756 
1757     if (auto *CB = dyn_cast<CallBase>(UseInst))
1758       if (CB->onlyAccessesInaccessibleMemory())
1759         return false;
1760 
1761     int64_t InstWriteOffset, DepWriteOffset;
1762     if (auto CC = getLocForWriteEx(UseInst))
1763       return isOverwrite(UseInst, DefInst, *CC, DefLoc, DL, TLI, DepWriteOffset,
1764                          InstWriteOffset, BatchAA, &F) == OW_Complete;
1765     return false;
1766   }
1767 
1768   /// Returns true if \p Def is not read before returning from the function.
1769   bool isWriteAtEndOfFunction(MemoryDef *Def) {
1770     LLVM_DEBUG(dbgs() << "  Check if def " << *Def << " ("
1771                       << *Def->getMemoryInst()
1772                       << ") is at the end the function \n");
1773 
1774     auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1775     if (!MaybeLoc) {
1776       LLVM_DEBUG(dbgs() << "  ... could not get location for write.\n");
1777       return false;
1778     }
1779 
1780     SmallVector<MemoryAccess *, 4> WorkList;
1781     SmallPtrSet<MemoryAccess *, 8> Visited;
1782     auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1783       if (!Visited.insert(Acc).second)
1784         return;
1785       for (Use &U : Acc->uses())
1786         WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1787     };
1788     PushMemUses(Def);
1789     for (unsigned I = 0; I < WorkList.size(); I++) {
1790       if (WorkList.size() >= MemorySSAScanLimit) {
1791         LLVM_DEBUG(dbgs() << "  ... hit exploration limit.\n");
1792         return false;
1793       }
1794 
1795       MemoryAccess *UseAccess = WorkList[I];
1796       // Simply adding the users of MemoryPhi to the worklist is not enough,
1797       // because we might miss read clobbers in different iterations of a loop,
1798       // for example.
1799       // TODO: Add support for phi translation to handle the loop case.
1800       if (isa<MemoryPhi>(UseAccess))
1801         return false;
1802 
1803       // TODO: Checking for aliasing is expensive. Consider reducing the amount
1804       // of times this is called and/or caching it.
1805       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1806       if (isReadClobber(*MaybeLoc, UseInst)) {
1807         LLVM_DEBUG(dbgs() << "  ... hit read clobber " << *UseInst << ".\n");
1808         return false;
1809       }
1810 
1811       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1812         PushMemUses(UseDef);
1813     }
1814     return true;
1815   }
1816 
1817   /// If \p I is a memory  terminator like llvm.lifetime.end or free, return a
1818   /// pair with the MemoryLocation terminated by \p I and a boolean flag
1819   /// indicating whether \p I is a free-like call.
1820   Optional<std::pair<MemoryLocation, bool>>
1821   getLocForTerminator(Instruction *I) const {
1822     uint64_t Len;
1823     Value *Ptr;
1824     if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1825                                                       m_Value(Ptr))))
1826       return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1827 
1828     if (auto *CB = dyn_cast<CallBase>(I)) {
1829       if (isFreeCall(I, &TLI))
1830         return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1831                                true)};
1832     }
1833 
1834     return None;
1835   }
1836 
1837   /// Returns true if \p I is a memory terminator instruction like
1838   /// llvm.lifetime.end or free.
1839   bool isMemTerminatorInst(Instruction *I) const {
1840     IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1841     return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1842            isFreeCall(I, &TLI);
1843   }
1844 
1845   /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1846   /// instruction \p AccessI.
1847   bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1848                        Instruction *MaybeTerm) {
1849     Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1850         getLocForTerminator(MaybeTerm);
1851 
1852     if (!MaybeTermLoc)
1853       return false;
1854 
1855     // If the terminator is a free-like call, all accesses to the underlying
1856     // object can be considered terminated.
1857     if (getUnderlyingObject(Loc.Ptr) !=
1858         getUnderlyingObject(MaybeTermLoc->first.Ptr))
1859       return false;
1860 
1861     auto TermLoc = MaybeTermLoc->first;
1862     if (MaybeTermLoc->second) {
1863       const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1864       return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1865     }
1866     int64_t InstWriteOffset, DepWriteOffset;
1867     return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DL, TLI,
1868                        DepWriteOffset, InstWriteOffset, BatchAA,
1869                        &F) == OW_Complete;
1870   }
1871 
1872   // Returns true if \p Use may read from \p DefLoc.
1873   bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1874     if (isNoopIntrinsic(UseInst))
1875       return false;
1876 
1877     // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1878     // treated as read clobber.
1879     if (auto SI = dyn_cast<StoreInst>(UseInst))
1880       return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1881 
1882     if (!UseInst->mayReadFromMemory())
1883       return false;
1884 
1885     if (auto *CB = dyn_cast<CallBase>(UseInst))
1886       if (CB->onlyAccessesInaccessibleMemory())
1887         return false;
1888 
1889     // NOTE: For calls, the number of stores removed could be slightly improved
1890     // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
1891     // be expensive compared to the benefits in practice. For now, avoid more
1892     // expensive analysis to limit compile-time.
1893     return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1894   }
1895 
1896   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1897   /// loop. In particular, this guarantees that it only references a single
1898   /// MemoryLocation during execution of the containing function.
1899   bool IsGuaranteedLoopInvariant(Value *Ptr) {
1900     auto IsGuaranteedLoopInvariantBase = [this](Value *Ptr) {
1901       Ptr = Ptr->stripPointerCasts();
1902       if (auto *I = dyn_cast<Instruction>(Ptr)) {
1903         if (isa<AllocaInst>(Ptr))
1904           return true;
1905 
1906         if (isAllocLikeFn(I, &TLI))
1907           return true;
1908 
1909         return false;
1910       }
1911       return true;
1912     };
1913 
1914     Ptr = Ptr->stripPointerCasts();
1915     if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
1916       return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
1917              GEP->hasAllConstantIndices();
1918     }
1919     return IsGuaranteedLoopInvariantBase(Ptr);
1920   }
1921 
1922   // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with
1923   // no read access between them or on any other path to a function exit block
1924   // if \p DefLoc is not accessible after the function returns. If there is no
1925   // such MemoryDef, return None. The returned value may not (completely)
1926   // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing
1927   // MemoryUse (read).
1928   Optional<MemoryAccess *>
1929   getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1930                   const MemoryLocation &DefLoc, const Value *DefUO,
1931                   unsigned &ScanLimit, unsigned &WalkerStepLimit,
1932                   bool IsMemTerm, unsigned &PartialLimit) {
1933     if (ScanLimit == 0 || WalkerStepLimit == 0) {
1934       LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1935       return None;
1936     }
1937 
1938     MemoryAccess *Current = StartAccess;
1939     Instruction *KillingI = KillingDef->getMemoryInst();
1940     bool StepAgain;
1941     LLVM_DEBUG(dbgs() << "  trying to get dominating access\n");
1942 
1943     // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1944     Optional<MemoryLocation> CurrentLoc;
1945     do {
1946       StepAgain = false;
1947       LLVM_DEBUG({
1948         dbgs() << "   visiting " << *Current;
1949         if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
1950           dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1951                  << ")";
1952         dbgs() << "\n";
1953       });
1954 
1955       // Reached TOP.
1956       if (MSSA.isLiveOnEntryDef(Current)) {
1957         LLVM_DEBUG(dbgs() << "   ...  found LiveOnEntryDef\n");
1958         return None;
1959       }
1960 
1961       // Cost of a step. Accesses in the same block are more likely to be valid
1962       // candidates for elimination, hence consider them cheaper.
1963       unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
1964                               ? MemorySSASameBBStepCost
1965                               : MemorySSAOtherBBStepCost;
1966       if (WalkerStepLimit <= StepCost) {
1967         LLVM_DEBUG(dbgs() << "   ...  hit walker step limit\n");
1968         return None;
1969       }
1970       WalkerStepLimit -= StepCost;
1971 
1972       // Return for MemoryPhis. They cannot be eliminated directly and the
1973       // caller is responsible for traversing them.
1974       if (isa<MemoryPhi>(Current)) {
1975         LLVM_DEBUG(dbgs() << "   ...  found MemoryPhi\n");
1976         return Current;
1977       }
1978 
1979       // Below, check if CurrentDef is a valid candidate to be eliminated by
1980       // KillingDef. If it is not, check the next candidate.
1981       MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1982       Instruction *CurrentI = CurrentDef->getMemoryInst();
1983 
1984       if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO))) {
1985         StepAgain = true;
1986         Current = CurrentDef->getDefiningAccess();
1987         continue;
1988       }
1989 
1990       // Before we try to remove anything, check for any extra throwing
1991       // instructions that block us from DSEing
1992       if (mayThrowBetween(KillingI, CurrentI, DefUO)) {
1993         LLVM_DEBUG(dbgs() << "  ... skip, may throw!\n");
1994         return None;
1995       }
1996 
1997       // Check for anything that looks like it will be a barrier to further
1998       // removal
1999       if (isDSEBarrier(DefUO, CurrentI)) {
2000         LLVM_DEBUG(dbgs() << "  ... skip, barrier\n");
2001         return None;
2002       }
2003 
2004       // If Current is known to be on path that reads DefLoc or is a read
2005       // clobber, bail out, as the path is not profitable. We skip this check
2006       // for intrinsic calls, because the code knows how to handle memcpy
2007       // intrinsics.
2008       if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(DefLoc, CurrentI))
2009         return None;
2010 
2011       // Quick check if there are direct uses that are read-clobbers.
2012       if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) {
2013             if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
2014               return !MSSA.dominates(StartAccess, UseOrDef) &&
2015                      isReadClobber(DefLoc, UseOrDef->getMemoryInst());
2016             return false;
2017           })) {
2018         LLVM_DEBUG(dbgs() << "   ...  found a read clobber\n");
2019         return None;
2020       }
2021 
2022       // If Current cannot be analyzed or is not removable, check the next
2023       // candidate.
2024       if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI)) {
2025         StepAgain = true;
2026         Current = CurrentDef->getDefiningAccess();
2027         continue;
2028       }
2029 
2030       // If Current does not have an analyzable write location, skip it
2031       CurrentLoc = getLocForWriteEx(CurrentI);
2032       if (!CurrentLoc) {
2033         StepAgain = true;
2034         Current = CurrentDef->getDefiningAccess();
2035         continue;
2036       }
2037 
2038       // AliasAnalysis does not account for loops. Limit elimination to
2039       // candidates for which we can guarantee they always store to the same
2040       // memory location and not multiple locations in a loop.
2041       if (Current->getBlock() != KillingDef->getBlock() &&
2042           !IsGuaranteedLoopInvariant(const_cast<Value *>(CurrentLoc->Ptr))) {
2043         StepAgain = true;
2044         Current = CurrentDef->getDefiningAccess();
2045         WalkerStepLimit -= 1;
2046         continue;
2047       }
2048 
2049       if (IsMemTerm) {
2050         // If the killing def is a memory terminator (e.g. lifetime.end), check
2051         // the next candidate if the current Current does not write the same
2052         // underlying object as the terminator.
2053         if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
2054           StepAgain = true;
2055           Current = CurrentDef->getDefiningAccess();
2056         }
2057         continue;
2058       } else {
2059         int64_t InstWriteOffset, DepWriteOffset;
2060         auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc, DL, TLI,
2061                               DepWriteOffset, InstWriteOffset, BatchAA, &F);
2062         // If Current does not write to the same object as KillingDef, check
2063         // the next candidate.
2064         if (OR == OW_Unknown) {
2065           StepAgain = true;
2066           Current = CurrentDef->getDefiningAccess();
2067         } else if (OR == OW_MaybePartial) {
2068           // If KillingDef only partially overwrites Current, check the next
2069           // candidate if the partial step limit is exceeded. This aggressively
2070           // limits the number of candidates for partial store elimination,
2071           // which are less likely to be removable in the end.
2072           if (PartialLimit <= 1) {
2073             StepAgain = true;
2074             Current = CurrentDef->getDefiningAccess();
2075             WalkerStepLimit -= 1;
2076             continue;
2077           }
2078           PartialLimit -= 1;
2079         }
2080       }
2081     } while (StepAgain);
2082 
2083     // Accesses to objects accessible after the function returns can only be
2084     // eliminated if the access is killed along all paths to the exit. Collect
2085     // the blocks with killing (=completely overwriting MemoryDefs) and check if
2086     // they cover all paths from EarlierAccess to any function exit.
2087     SmallPtrSet<Instruction *, 16> KillingDefs;
2088     KillingDefs.insert(KillingDef->getMemoryInst());
2089     MemoryAccess *EarlierAccess = Current;
2090     Instruction *EarlierMemInst =
2091         cast<MemoryDef>(EarlierAccess)->getMemoryInst();
2092     LLVM_DEBUG(dbgs() << "  Checking for reads of " << *EarlierAccess << " ("
2093                       << *EarlierMemInst << ")\n");
2094 
2095     SmallSetVector<MemoryAccess *, 32> WorkList;
2096     auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
2097       for (Use &U : Acc->uses())
2098         WorkList.insert(cast<MemoryAccess>(U.getUser()));
2099     };
2100     PushMemUses(EarlierAccess);
2101 
2102     // Optimistically collect all accesses for reads. If we do not find any
2103     // read clobbers, add them to the cache.
2104     SmallPtrSet<MemoryAccess *, 16> KnownNoReads;
2105     if (!EarlierMemInst->mayReadFromMemory())
2106       KnownNoReads.insert(EarlierAccess);
2107     // Check if EarlierDef may be read.
2108     for (unsigned I = 0; I < WorkList.size(); I++) {
2109       MemoryAccess *UseAccess = WorkList[I];
2110 
2111       LLVM_DEBUG(dbgs() << "   " << *UseAccess);
2112       // Bail out if the number of accesses to check exceeds the scan limit.
2113       if (ScanLimit < (WorkList.size() - I)) {
2114         LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
2115         return None;
2116       }
2117       --ScanLimit;
2118       NumDomMemDefChecks++;
2119       KnownNoReads.insert(UseAccess);
2120 
2121       if (isa<MemoryPhi>(UseAccess)) {
2122         if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
2123               return DT.properlyDominates(KI->getParent(),
2124                                           UseAccess->getBlock());
2125             })) {
2126           LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
2127           continue;
2128         }
2129         LLVM_DEBUG(dbgs() << "\n    ... adding PHI uses\n");
2130         PushMemUses(UseAccess);
2131         continue;
2132       }
2133 
2134       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
2135       LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
2136 
2137       if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
2138             return DT.dominates(KI, UseInst);
2139           })) {
2140         LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
2141         continue;
2142       }
2143 
2144       // A memory terminator kills all preceeding MemoryDefs and all succeeding
2145       // MemoryAccesses. We do not have to check it's users.
2146       if (isMemTerminator(*CurrentLoc, EarlierMemInst, UseInst)) {
2147         LLVM_DEBUG(
2148             dbgs()
2149             << " ... skipping, memterminator invalidates following accesses\n");
2150         continue;
2151       }
2152 
2153       if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
2154         LLVM_DEBUG(dbgs() << "    ... adding uses of intrinsic\n");
2155         PushMemUses(UseAccess);
2156         continue;
2157       }
2158 
2159       if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) {
2160         LLVM_DEBUG(dbgs() << "  ... found throwing instruction\n");
2161         return None;
2162       }
2163 
2164       // Uses which may read the original MemoryDef mean we cannot eliminate the
2165       // original MD. Stop walk.
2166       if (isReadClobber(*CurrentLoc, UseInst)) {
2167         LLVM_DEBUG(dbgs() << "    ... found read clobber\n");
2168         return None;
2169       }
2170 
2171       // For the KillingDef and EarlierAccess we only have to check if it reads
2172       // the memory location.
2173       // TODO: It would probably be better to check for self-reads before
2174       // calling the function.
2175       if (KillingDef == UseAccess || EarlierAccess == UseAccess) {
2176         LLVM_DEBUG(dbgs() << "    ... skipping killing def/dom access\n");
2177         continue;
2178       }
2179 
2180       // Check all uses for MemoryDefs, except for defs completely overwriting
2181       // the original location. Otherwise we have to check uses of *all*
2182       // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
2183       // miss cases like the following
2184       //   1 = Def(LoE) ; <----- EarlierDef stores [0,1]
2185       //   2 = Def(1)   ; (2, 1) = NoAlias,   stores [2,3]
2186       //   Use(2)       ; MayAlias 2 *and* 1, loads [0, 3].
2187       //                  (The Use points to the *first* Def it may alias)
2188       //   3 = Def(1)   ; <---- Current  (3, 2) = NoAlias, (3,1) = MayAlias,
2189       //                  stores [0,1]
2190       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
2191         if (isCompleteOverwrite(*CurrentLoc, EarlierMemInst, UseInst)) {
2192           if (!isInvisibleToCallerAfterRet(DefUO) &&
2193               UseAccess != EarlierAccess) {
2194             BasicBlock *MaybeKillingBlock = UseInst->getParent();
2195             if (PostOrderNumbers.find(MaybeKillingBlock)->second <
2196                 PostOrderNumbers.find(EarlierAccess->getBlock())->second) {
2197 
2198               LLVM_DEBUG(dbgs()
2199                          << "    ... found killing def " << *UseInst << "\n");
2200               KillingDefs.insert(UseInst);
2201             }
2202           }
2203         } else
2204           PushMemUses(UseDef);
2205       }
2206     }
2207 
2208     // For accesses to locations visible after the function returns, make sure
2209     // that the location is killed (=overwritten) along all paths from
2210     // EarlierAccess to the exit.
2211     if (!isInvisibleToCallerAfterRet(DefUO)) {
2212       SmallPtrSet<BasicBlock *, 16> KillingBlocks;
2213       for (Instruction *KD : KillingDefs)
2214         KillingBlocks.insert(KD->getParent());
2215       assert(!KillingBlocks.empty() &&
2216              "Expected at least a single killing block");
2217 
2218       // Find the common post-dominator of all killing blocks.
2219       BasicBlock *CommonPred = *KillingBlocks.begin();
2220       for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
2221            I != E; I++) {
2222         if (!CommonPred)
2223           break;
2224         CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
2225       }
2226 
2227       // If CommonPred is in the set of killing blocks, just check if it
2228       // post-dominates EarlierAccess.
2229       if (KillingBlocks.count(CommonPred)) {
2230         if (PDT.dominates(CommonPred, EarlierAccess->getBlock()))
2231           return {EarlierAccess};
2232         return None;
2233       }
2234 
2235       // If the common post-dominator does not post-dominate EarlierAccess,
2236       // there is a path from EarlierAccess to an exit not going through a
2237       // killing block.
2238       if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) {
2239         SetVector<BasicBlock *> WorkList;
2240 
2241         // If CommonPred is null, there are multiple exits from the function.
2242         // They all have to be added to the worklist.
2243         if (CommonPred)
2244           WorkList.insert(CommonPred);
2245         else
2246           for (BasicBlock *R : PDT.roots())
2247             WorkList.insert(R);
2248 
2249         NumCFGTries++;
2250         // Check if all paths starting from an exit node go through one of the
2251         // killing blocks before reaching EarlierAccess.
2252         for (unsigned I = 0; I < WorkList.size(); I++) {
2253           NumCFGChecks++;
2254           BasicBlock *Current = WorkList[I];
2255           if (KillingBlocks.count(Current))
2256             continue;
2257           if (Current == EarlierAccess->getBlock())
2258             return None;
2259 
2260           // EarlierAccess is reachable from the entry, so we don't have to
2261           // explore unreachable blocks further.
2262           if (!DT.isReachableFromEntry(Current))
2263             continue;
2264 
2265           for (BasicBlock *Pred : predecessors(Current))
2266             WorkList.insert(Pred);
2267 
2268           if (WorkList.size() >= MemorySSAPathCheckLimit)
2269             return None;
2270         }
2271         NumCFGSuccess++;
2272         return {EarlierAccess};
2273       }
2274       return None;
2275     }
2276 
2277     // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is
2278     // potentially dead.
2279     return {EarlierAccess};
2280   }
2281 
2282   // Delete dead memory defs
2283   void deleteDeadInstruction(Instruction *SI) {
2284     MemorySSAUpdater Updater(&MSSA);
2285     SmallVector<Instruction *, 32> NowDeadInsts;
2286     NowDeadInsts.push_back(SI);
2287     --NumFastOther;
2288 
2289     while (!NowDeadInsts.empty()) {
2290       Instruction *DeadInst = NowDeadInsts.pop_back_val();
2291       ++NumFastOther;
2292 
2293       // Try to preserve debug information attached to the dead instruction.
2294       salvageDebugInfo(*DeadInst);
2295       salvageKnowledge(DeadInst);
2296 
2297       // Remove the Instruction from MSSA.
2298       if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
2299         if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
2300           SkipStores.insert(MD);
2301         }
2302         Updater.removeMemoryAccess(MA);
2303       }
2304 
2305       auto I = IOLs.find(DeadInst->getParent());
2306       if (I != IOLs.end())
2307         I->second.erase(DeadInst);
2308       // Remove its operands
2309       for (Use &O : DeadInst->operands())
2310         if (Instruction *OpI = dyn_cast<Instruction>(O)) {
2311           O = nullptr;
2312           if (isInstructionTriviallyDead(OpI, &TLI))
2313             NowDeadInsts.push_back(OpI);
2314         }
2315 
2316       DeadInst->eraseFromParent();
2317     }
2318   }
2319 
2320   // Check for any extra throws between SI and NI that block DSE.  This only
2321   // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
2322   // throw are handled during the walk from one def to the next.
2323   bool mayThrowBetween(Instruction *SI, Instruction *NI,
2324                        const Value *SILocUnd) {
2325     // First see if we can ignore it by using the fact that SI is an
2326     // alloca/alloca like object that is not visible to the caller during
2327     // execution of the function.
2328     if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd))
2329       return false;
2330 
2331     if (SI->getParent() == NI->getParent())
2332       return ThrowingBlocks.count(SI->getParent());
2333     return !ThrowingBlocks.empty();
2334   }
2335 
2336   // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
2337   // act as barriers:
2338   //  * A memory instruction that may throw and \p SI accesses a non-stack
2339   //  object.
2340   //  * Atomic stores stronger that monotonic.
2341   bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) {
2342     // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
2343     // like object that does not escape.
2344     if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd))
2345       return true;
2346 
2347     // If NI is an atomic load/store stronger than monotonic, do not try to
2348     // eliminate/reorder it.
2349     if (NI->isAtomic()) {
2350       if (auto *LI = dyn_cast<LoadInst>(NI))
2351         return isStrongerThanMonotonic(LI->getOrdering());
2352       if (auto *SI = dyn_cast<StoreInst>(NI))
2353         return isStrongerThanMonotonic(SI->getOrdering());
2354       if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI))
2355         return isStrongerThanMonotonic(ARMW->getOrdering());
2356       if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI))
2357         return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
2358                isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
2359       llvm_unreachable("other instructions should be skipped in MemorySSA");
2360     }
2361     return false;
2362   }
2363 
2364   /// Eliminate writes to objects that are not visible in the caller and are not
2365   /// accessed before returning from the function.
2366   bool eliminateDeadWritesAtEndOfFunction() {
2367     bool MadeChange = false;
2368     LLVM_DEBUG(
2369         dbgs()
2370         << "Trying to eliminate MemoryDefs at the end of the function\n");
2371     for (int I = MemDefs.size() - 1; I >= 0; I--) {
2372       MemoryDef *Def = MemDefs[I];
2373       if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst()))
2374         continue;
2375 
2376       Instruction *DefI = Def->getMemoryInst();
2377       SmallVector<const Value *, 4> Pointers;
2378       auto DefLoc = getLocForWriteEx(DefI);
2379       if (!DefLoc)
2380         continue;
2381 
2382       // NOTE: Currently eliminating writes at the end of a function is limited
2383       // to MemoryDefs with a single underlying object, to save compile-time. In
2384       // practice it appears the case with multiple underlying objects is very
2385       // uncommon. If it turns out to be important, we can use
2386       // getUnderlyingObjects here instead.
2387       const Value *UO = getUnderlyingObject(DefLoc->Ptr);
2388       if (!UO || !isInvisibleToCallerAfterRet(UO))
2389         continue;
2390 
2391       if (isWriteAtEndOfFunction(Def)) {
2392         // See through pointer-to-pointer bitcasts
2393         LLVM_DEBUG(dbgs() << "   ... MemoryDef is not accessed until the end "
2394                              "of the function\n");
2395         deleteDeadInstruction(DefI);
2396         ++NumFastStores;
2397         MadeChange = true;
2398       }
2399     }
2400     return MadeChange;
2401   }
2402 
2403   /// \returns true if \p Def is a no-op store, either because it
2404   /// directly stores back a loaded value or stores zero to a calloced object.
2405   bool storeIsNoop(MemoryDef *Def, const MemoryLocation &DefLoc,
2406                    const Value *DefUO) {
2407     StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
2408     if (!Store)
2409       return false;
2410 
2411     if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
2412       if (LoadI->getPointerOperand() == Store->getOperand(1)) {
2413         // Get the defining access for the load.
2414         auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
2415         // Fast path: the defining accesses are the same.
2416         if (LoadAccess == Def->getDefiningAccess())
2417           return true;
2418 
2419         // Look through phi accesses. Recursively scan all phi accesses by
2420         // adding them to a worklist. Bail when we run into a memory def that
2421         // does not match LoadAccess.
2422         SetVector<MemoryAccess *> ToCheck;
2423         MemoryAccess *Current =
2424             MSSA.getWalker()->getClobberingMemoryAccess(Def);
2425         // We don't want to bail when we run into the store memory def. But,
2426         // the phi access may point to it. So, pretend like we've already
2427         // checked it.
2428         ToCheck.insert(Def);
2429         ToCheck.insert(Current);
2430         // Start at current (1) to simulate already having checked Def.
2431         for (unsigned I = 1; I < ToCheck.size(); ++I) {
2432           Current = ToCheck[I];
2433           if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
2434             // Check all the operands.
2435             for (auto &Use : PhiAccess->incoming_values())
2436               ToCheck.insert(cast<MemoryAccess>(&Use));
2437             continue;
2438           }
2439 
2440           // If we found a memory def, bail. This happens when we have an
2441           // unrelated write in between an otherwise noop store.
2442           assert(isa<MemoryDef>(Current) &&
2443                  "Only MemoryDefs should reach here.");
2444           // TODO: Skip no alias MemoryDefs that have no aliasing reads.
2445           // We are searching for the definition of the store's destination.
2446           // So, if that is the same definition as the load, then this is a
2447           // noop. Otherwise, fail.
2448           if (LoadAccess != Current)
2449             return false;
2450         }
2451         return true;
2452       }
2453     }
2454 
2455     Constant *StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
2456     if (StoredConstant && StoredConstant->isNullValue()) {
2457       auto *DefUOInst = dyn_cast<Instruction>(DefUO);
2458       if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) {
2459         auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
2460         // If UnderlyingDef is the clobbering access of Def, no instructions
2461         // between them can modify the memory location.
2462         auto *ClobberDef =
2463             MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
2464         return UnderlyingDef == ClobberDef;
2465       }
2466     }
2467     return false;
2468   }
2469 };
2470 
2471 bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA,
2472                                   MemorySSA &MSSA, DominatorTree &DT,
2473                                   PostDominatorTree &PDT,
2474                                   const TargetLibraryInfo &TLI) {
2475   bool MadeChange = false;
2476 
2477   DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI);
2478   // For each store:
2479   for (unsigned I = 0; I < State.MemDefs.size(); I++) {
2480     MemoryDef *KillingDef = State.MemDefs[I];
2481     if (State.SkipStores.count(KillingDef))
2482       continue;
2483     Instruction *SI = KillingDef->getMemoryInst();
2484 
2485     Optional<MemoryLocation> MaybeSILoc;
2486     if (State.isMemTerminatorInst(SI))
2487       MaybeSILoc = State.getLocForTerminator(SI).map(
2488           [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
2489     else
2490       MaybeSILoc = State.getLocForWriteEx(SI);
2491 
2492     if (!MaybeSILoc) {
2493       LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
2494                         << *SI << "\n");
2495       continue;
2496     }
2497     MemoryLocation SILoc = *MaybeSILoc;
2498     assert(SILoc.Ptr && "SILoc should not be null");
2499     const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr);
2500 
2501     MemoryAccess *Current = KillingDef;
2502     LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
2503                       << *KillingDef << " (" << *SI << ")\n");
2504 
2505     unsigned ScanLimit = MemorySSAScanLimit;
2506     unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
2507     unsigned PartialLimit = MemorySSAPartialStoreLimit;
2508     // Worklist of MemoryAccesses that may be killed by KillingDef.
2509     SetVector<MemoryAccess *> ToCheck;
2510 
2511     if (SILocUnd)
2512       ToCheck.insert(KillingDef->getDefiningAccess());
2513 
2514     bool Shortend = false;
2515     bool IsMemTerm = State.isMemTerminatorInst(SI);
2516     // Check if MemoryAccesses in the worklist are killed by KillingDef.
2517     for (unsigned I = 0; I < ToCheck.size(); I++) {
2518       Current = ToCheck[I];
2519       if (State.SkipStores.count(Current))
2520         continue;
2521 
2522       Optional<MemoryAccess *> Next = State.getDomMemoryDef(
2523           KillingDef, Current, SILoc, SILocUnd, ScanLimit, WalkerStepLimit,
2524           IsMemTerm, PartialLimit);
2525 
2526       if (!Next) {
2527         LLVM_DEBUG(dbgs() << "  finished walk\n");
2528         continue;
2529       }
2530 
2531       MemoryAccess *EarlierAccess = *Next;
2532       LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess);
2533       if (isa<MemoryPhi>(EarlierAccess)) {
2534         LLVM_DEBUG(dbgs() << "\n  ... adding incoming values to worklist\n");
2535         for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) {
2536           MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
2537           BasicBlock *IncomingBlock = IncomingAccess->getBlock();
2538           BasicBlock *PhiBlock = EarlierAccess->getBlock();
2539 
2540           // We only consider incoming MemoryAccesses that come before the
2541           // MemoryPhi. Otherwise we could discover candidates that do not
2542           // strictly dominate our starting def.
2543           if (State.PostOrderNumbers[IncomingBlock] >
2544               State.PostOrderNumbers[PhiBlock])
2545             ToCheck.insert(IncomingAccess);
2546         }
2547         continue;
2548       }
2549       auto *NextDef = cast<MemoryDef>(EarlierAccess);
2550       Instruction *NI = NextDef->getMemoryInst();
2551       LLVM_DEBUG(dbgs() << " (" << *NI << ")\n");
2552       ToCheck.insert(NextDef->getDefiningAccess());
2553       NumGetDomMemoryDefPassed++;
2554 
2555       if (!DebugCounter::shouldExecute(MemorySSACounter))
2556         continue;
2557 
2558       MemoryLocation NILoc = *State.getLocForWriteEx(NI);
2559 
2560       if (IsMemTerm) {
2561         const Value *NIUnd = getUnderlyingObject(NILoc.Ptr);
2562         if (SILocUnd != NIUnd)
2563           continue;
2564         LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
2565                           << "\n  KILLER: " << *SI << '\n');
2566         State.deleteDeadInstruction(NI);
2567         ++NumFastStores;
2568         MadeChange = true;
2569       } else {
2570         // Check if NI overwrites SI.
2571         int64_t InstWriteOffset, DepWriteOffset;
2572         OverwriteResult OR =
2573             isOverwrite(SI, NI, SILoc, NILoc, State.DL, TLI, DepWriteOffset,
2574                         InstWriteOffset, State.BatchAA, &F);
2575         if (OR == OW_MaybePartial) {
2576           auto Iter = State.IOLs.insert(
2577               std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2578                   NI->getParent(), InstOverlapIntervalsTy()));
2579           auto &IOL = Iter.first->second;
2580           OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset,
2581                                   NI, IOL);
2582         }
2583 
2584         if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2585           auto *Earlier = dyn_cast<StoreInst>(NI);
2586           auto *Later = dyn_cast<StoreInst>(SI);
2587           // We are re-using tryToMergePartialOverlappingStores, which requires
2588           // Earlier to domiante Later.
2589           // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2590           if (Earlier && Later && DT.dominates(Earlier, Later)) {
2591             if (Constant *Merged = tryToMergePartialOverlappingStores(
2592                     Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL,
2593                     State.BatchAA, &DT)) {
2594 
2595               // Update stored value of earlier store to merged constant.
2596               Earlier->setOperand(0, Merged);
2597               ++NumModifiedStores;
2598               MadeChange = true;
2599 
2600               Shortend = true;
2601               // Remove later store and remove any outstanding overlap intervals
2602               // for the updated store.
2603               State.deleteDeadInstruction(Later);
2604               auto I = State.IOLs.find(Earlier->getParent());
2605               if (I != State.IOLs.end())
2606                 I->second.erase(Earlier);
2607               break;
2608             }
2609           }
2610         }
2611 
2612         if (OR == OW_Complete) {
2613           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
2614                             << "\n  KILLER: " << *SI << '\n');
2615           State.deleteDeadInstruction(NI);
2616           ++NumFastStores;
2617           MadeChange = true;
2618         }
2619       }
2620     }
2621 
2622     // Check if the store is a no-op.
2623     if (!Shortend && isRemovable(SI) &&
2624         State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
2625       LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *SI << '\n');
2626       State.deleteDeadInstruction(SI);
2627       NumRedundantStores++;
2628       MadeChange = true;
2629       continue;
2630     }
2631   }
2632 
2633   if (EnablePartialOverwriteTracking)
2634     for (auto &KV : State.IOLs)
2635       MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI);
2636 
2637   MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2638   return MadeChange;
2639 }
2640 } // end anonymous namespace
2641 
2642 //===----------------------------------------------------------------------===//
2643 // DSE Pass
2644 //===----------------------------------------------------------------------===//
2645 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2646   AliasAnalysis &AA = AM.getResult<AAManager>(F);
2647   const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2648   DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2649 
2650   bool Changed = false;
2651   if (EnableMemorySSA) {
2652     MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2653     PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2654 
2655     Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2656   } else {
2657     MemoryDependenceResults &MD = AM.getResult<MemoryDependenceAnalysis>(F);
2658 
2659     Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2660   }
2661 
2662 #ifdef LLVM_ENABLE_STATS
2663   if (AreStatisticsEnabled())
2664     for (auto &I : instructions(F))
2665       NumRemainingStores += isa<StoreInst>(&I);
2666 #endif
2667 
2668   if (!Changed)
2669     return PreservedAnalyses::all();
2670 
2671   PreservedAnalyses PA;
2672   PA.preserveSet<CFGAnalyses>();
2673   PA.preserve<GlobalsAA>();
2674   if (EnableMemorySSA)
2675     PA.preserve<MemorySSAAnalysis>();
2676   else
2677     PA.preserve<MemoryDependenceAnalysis>();
2678   return PA;
2679 }
2680 
2681 namespace {
2682 
2683 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2684 class DSELegacyPass : public FunctionPass {
2685 public:
2686   static char ID; // Pass identification, replacement for typeid
2687 
2688   DSELegacyPass() : FunctionPass(ID) {
2689     initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2690   }
2691 
2692   bool runOnFunction(Function &F) override {
2693     if (skipFunction(F))
2694       return false;
2695 
2696     AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2697     DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2698     const TargetLibraryInfo &TLI =
2699         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2700 
2701     bool Changed = false;
2702     if (EnableMemorySSA) {
2703       MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2704       PostDominatorTree &PDT =
2705           getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2706 
2707       Changed = eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI);
2708     } else {
2709       MemoryDependenceResults &MD =
2710           getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
2711 
2712       Changed = eliminateDeadStores(F, &AA, &MD, &DT, &TLI);
2713     }
2714 
2715 #ifdef LLVM_ENABLE_STATS
2716     if (AreStatisticsEnabled())
2717       for (auto &I : instructions(F))
2718         NumRemainingStores += isa<StoreInst>(&I);
2719 #endif
2720 
2721     return Changed;
2722   }
2723 
2724   void getAnalysisUsage(AnalysisUsage &AU) const override {
2725     AU.setPreservesCFG();
2726     AU.addRequired<AAResultsWrapperPass>();
2727     AU.addRequired<TargetLibraryInfoWrapperPass>();
2728     AU.addPreserved<GlobalsAAWrapperPass>();
2729     AU.addRequired<DominatorTreeWrapperPass>();
2730     AU.addPreserved<DominatorTreeWrapperPass>();
2731 
2732     if (EnableMemorySSA) {
2733       AU.addRequired<PostDominatorTreeWrapperPass>();
2734       AU.addRequired<MemorySSAWrapperPass>();
2735       AU.addPreserved<PostDominatorTreeWrapperPass>();
2736       AU.addPreserved<MemorySSAWrapperPass>();
2737     } else {
2738       AU.addRequired<MemoryDependenceWrapperPass>();
2739       AU.addPreserved<MemoryDependenceWrapperPass>();
2740     }
2741   }
2742 };
2743 
2744 } // end anonymous namespace
2745 
2746 char DSELegacyPass::ID = 0;
2747 
2748 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
2749                       false)
2750 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2751 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
2752 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2753 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2754 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
2755 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2756 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2757 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
2758                     false)
2759 
2760 FunctionPass *llvm::createDeadStoreEliminationPass() {
2761   return new DSELegacyPass();
2762 }
2763