1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/iterator.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CFGPrinter.h"
29 #include "llvm/Analysis/IteratedDominanceFrontier.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Config/llvm-config.h"
32 #include "llvm/IR/AssemblyAnnotationWriter.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/PassManager.h"
42 #include "llvm/IR/Use.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/AtomicOrdering.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Support/Compiler.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/FormattedStream.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <cstdlib>
56 #include <iterator>
57 #include <memory>
58 #include <utility>
59
60 using namespace llvm;
61
62 #define DEBUG_TYPE "memoryssa"
63
64 static cl::opt<std::string>
65 DotCFGMSSA("dot-cfg-mssa",
66 cl::value_desc("file name for generated dot file"),
67 cl::desc("file name for generated dot file"), cl::init(""));
68
69 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
70 true)
71 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
72 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
73 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
74 true)
75
76 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
77 "Memory SSA Printer", false, false)
78 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
79 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
80 "Memory SSA Printer", false, false)
81
82 static cl::opt<unsigned> MaxCheckLimit(
83 "memssa-check-limit", cl::Hidden, cl::init(100),
84 cl::desc("The maximum number of stores/phis MemorySSA"
85 "will consider trying to walk past (default = 100)"));
86
87 // Always verify MemorySSA if expensive checking is enabled.
88 #ifdef EXPENSIVE_CHECKS
89 bool llvm::VerifyMemorySSA = true;
90 #else
91 bool llvm::VerifyMemorySSA = false;
92 #endif
93 /// Enables memory ssa as a dependency for loop passes in legacy pass manager.
94 cl::opt<bool> llvm::EnableMSSALoopDependency(
95 "enable-mssa-loop-dependency", cl::Hidden, cl::init(true),
96 cl::desc("Enable MemorySSA dependency for loop pass manager"));
97
98 static cl::opt<bool, true>
99 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
100 cl::Hidden, cl::desc("Enable verification of MemorySSA."));
101
102 namespace llvm {
103
104 /// An assembly annotator class to print Memory SSA information in
105 /// comments.
106 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
107 friend class MemorySSA;
108
109 const MemorySSA *MSSA;
110
111 public:
MemorySSAAnnotatedWriter(const MemorySSA * M)112 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
113
emitBasicBlockStartAnnot(const BasicBlock * BB,formatted_raw_ostream & OS)114 void emitBasicBlockStartAnnot(const BasicBlock *BB,
115 formatted_raw_ostream &OS) override {
116 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
117 OS << "; " << *MA << "\n";
118 }
119
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)120 void emitInstructionAnnot(const Instruction *I,
121 formatted_raw_ostream &OS) override {
122 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
123 OS << "; " << *MA << "\n";
124 }
125 };
126
127 } // end namespace llvm
128
129 namespace {
130
131 /// Our current alias analysis API differentiates heavily between calls and
132 /// non-calls, and functions called on one usually assert on the other.
133 /// This class encapsulates the distinction to simplify other code that wants
134 /// "Memory affecting instructions and related data" to use as a key.
135 /// For example, this class is used as a densemap key in the use optimizer.
136 class MemoryLocOrCall {
137 public:
138 bool IsCall = false;
139
MemoryLocOrCall(MemoryUseOrDef * MUD)140 MemoryLocOrCall(MemoryUseOrDef *MUD)
141 : MemoryLocOrCall(MUD->getMemoryInst()) {}
MemoryLocOrCall(const MemoryUseOrDef * MUD)142 MemoryLocOrCall(const MemoryUseOrDef *MUD)
143 : MemoryLocOrCall(MUD->getMemoryInst()) {}
144
MemoryLocOrCall(Instruction * Inst)145 MemoryLocOrCall(Instruction *Inst) {
146 if (auto *C = dyn_cast<CallBase>(Inst)) {
147 IsCall = true;
148 Call = C;
149 } else {
150 IsCall = false;
151 // There is no such thing as a memorylocation for a fence inst, and it is
152 // unique in that regard.
153 if (!isa<FenceInst>(Inst))
154 Loc = MemoryLocation::get(Inst);
155 }
156 }
157
MemoryLocOrCall(const MemoryLocation & Loc)158 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
159
getCall() const160 const CallBase *getCall() const {
161 assert(IsCall);
162 return Call;
163 }
164
getLoc() const165 MemoryLocation getLoc() const {
166 assert(!IsCall);
167 return Loc;
168 }
169
operator ==(const MemoryLocOrCall & Other) const170 bool operator==(const MemoryLocOrCall &Other) const {
171 if (IsCall != Other.IsCall)
172 return false;
173
174 if (!IsCall)
175 return Loc == Other.Loc;
176
177 if (Call->getCalledOperand() != Other.Call->getCalledOperand())
178 return false;
179
180 return Call->arg_size() == Other.Call->arg_size() &&
181 std::equal(Call->arg_begin(), Call->arg_end(),
182 Other.Call->arg_begin());
183 }
184
185 private:
186 union {
187 const CallBase *Call;
188 MemoryLocation Loc;
189 };
190 };
191
192 } // end anonymous namespace
193
194 namespace llvm {
195
196 template <> struct DenseMapInfo<MemoryLocOrCall> {
getEmptyKeyllvm::DenseMapInfo197 static inline MemoryLocOrCall getEmptyKey() {
198 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
199 }
200
getTombstoneKeyllvm::DenseMapInfo201 static inline MemoryLocOrCall getTombstoneKey() {
202 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
203 }
204
getHashValuellvm::DenseMapInfo205 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
206 if (!MLOC.IsCall)
207 return hash_combine(
208 MLOC.IsCall,
209 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
210
211 hash_code hash =
212 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
213 MLOC.getCall()->getCalledOperand()));
214
215 for (const Value *Arg : MLOC.getCall()->args())
216 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
217 return hash;
218 }
219
isEqualllvm::DenseMapInfo220 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
221 return LHS == RHS;
222 }
223 };
224
225 } // end namespace llvm
226
227 /// This does one-way checks to see if Use could theoretically be hoisted above
228 /// MayClobber. This will not check the other way around.
229 ///
230 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
231 /// MayClobber, with no potentially clobbering operations in between them.
232 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
areLoadsReorderable(const LoadInst * Use,const LoadInst * MayClobber)233 static bool areLoadsReorderable(const LoadInst *Use,
234 const LoadInst *MayClobber) {
235 bool VolatileUse = Use->isVolatile();
236 bool VolatileClobber = MayClobber->isVolatile();
237 // Volatile operations may never be reordered with other volatile operations.
238 if (VolatileUse && VolatileClobber)
239 return false;
240 // Otherwise, volatile doesn't matter here. From the language reference:
241 // 'optimizers may change the order of volatile operations relative to
242 // non-volatile operations.'"
243
244 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
245 // is weaker, it can be moved above other loads. We just need to be sure that
246 // MayClobber isn't an acquire load, because loads can't be moved above
247 // acquire loads.
248 //
249 // Note that this explicitly *does* allow the free reordering of monotonic (or
250 // weaker) loads of the same address.
251 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
252 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
253 AtomicOrdering::Acquire);
254 return !(SeqCstUse || MayClobberIsAcquire);
255 }
256
257 namespace {
258
259 struct ClobberAlias {
260 bool IsClobber;
261 Optional<AliasResult> AR;
262 };
263
264 } // end anonymous namespace
265
266 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
267 // ignored if IsClobber = false.
268 template <typename AliasAnalysisType>
269 static ClobberAlias
instructionClobbersQuery(const MemoryDef * MD,const MemoryLocation & UseLoc,const Instruction * UseInst,AliasAnalysisType & AA)270 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
271 const Instruction *UseInst, AliasAnalysisType &AA) {
272 Instruction *DefInst = MD->getMemoryInst();
273 assert(DefInst && "Defining instruction not actually an instruction");
274 Optional<AliasResult> AR;
275
276 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
277 // These intrinsics will show up as affecting memory, but they are just
278 // markers, mostly.
279 //
280 // FIXME: We probably don't actually want MemorySSA to model these at all
281 // (including creating MemoryAccesses for them): we just end up inventing
282 // clobbers where they don't really exist at all. Please see D43269 for
283 // context.
284 switch (II->getIntrinsicID()) {
285 case Intrinsic::invariant_start:
286 case Intrinsic::invariant_end:
287 case Intrinsic::assume:
288 case Intrinsic::experimental_noalias_scope_decl:
289 return {false, AliasResult(AliasResult::NoAlias)};
290 case Intrinsic::dbg_addr:
291 case Intrinsic::dbg_declare:
292 case Intrinsic::dbg_label:
293 case Intrinsic::dbg_value:
294 llvm_unreachable("debuginfo shouldn't have associated defs!");
295 default:
296 break;
297 }
298 }
299
300 if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
301 ModRefInfo I = AA.getModRefInfo(DefInst, CB);
302 AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
303 return {isModOrRefSet(I), AR};
304 }
305
306 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
307 if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
308 return {!areLoadsReorderable(UseLoad, DefLoad),
309 AliasResult(AliasResult::MayAlias)};
310
311 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
312 AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
313 return {isModSet(I), AR};
314 }
315
316 template <typename AliasAnalysisType>
instructionClobbersQuery(MemoryDef * MD,const MemoryUseOrDef * MU,const MemoryLocOrCall & UseMLOC,AliasAnalysisType & AA)317 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
318 const MemoryUseOrDef *MU,
319 const MemoryLocOrCall &UseMLOC,
320 AliasAnalysisType &AA) {
321 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
322 // to exist while MemoryLocOrCall is pushed through places.
323 if (UseMLOC.IsCall)
324 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
325 AA);
326 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
327 AA);
328 }
329
330 // Return true when MD may alias MU, return false otherwise.
defClobbersUseOrDef(MemoryDef * MD,const MemoryUseOrDef * MU,AliasAnalysis & AA)331 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
332 AliasAnalysis &AA) {
333 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
334 }
335
336 namespace {
337
338 struct UpwardsMemoryQuery {
339 // True if our original query started off as a call
340 bool IsCall = false;
341 // The pointer location we started the query with. This will be empty if
342 // IsCall is true.
343 MemoryLocation StartingLoc;
344 // This is the instruction we were querying about.
345 const Instruction *Inst = nullptr;
346 // The MemoryAccess we actually got called with, used to test local domination
347 const MemoryAccess *OriginalAccess = nullptr;
348 Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias);
349 bool SkipSelfAccess = false;
350
351 UpwardsMemoryQuery() = default;
352
UpwardsMemoryQuery__anon112ec91d0411::UpwardsMemoryQuery353 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
354 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
355 if (!IsCall)
356 StartingLoc = MemoryLocation::get(Inst);
357 }
358 };
359
360 } // end anonymous namespace
361
362 template <typename AliasAnalysisType>
isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType & AA,const Instruction * I)363 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
364 const Instruction *I) {
365 // If the memory can't be changed, then loads of the memory can't be
366 // clobbered.
367 if (auto *LI = dyn_cast<LoadInst>(I))
368 return I->hasMetadata(LLVMContext::MD_invariant_load) ||
369 AA.pointsToConstantMemory(MemoryLocation::get(LI));
370 return false;
371 }
372
373 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
374 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
375 ///
376 /// This is meant to be as simple and self-contained as possible. Because it
377 /// uses no cache, etc., it can be relatively expensive.
378 ///
379 /// \param Start The MemoryAccess that we want to walk from.
380 /// \param ClobberAt A clobber for Start.
381 /// \param StartLoc The MemoryLocation for Start.
382 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
383 /// \param Query The UpwardsMemoryQuery we used for our search.
384 /// \param AA The AliasAnalysis we used for our search.
385 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
386
387 template <typename AliasAnalysisType>
388 LLVM_ATTRIBUTE_UNUSED static void
checkClobberSanity(const MemoryAccess * Start,MemoryAccess * ClobberAt,const MemoryLocation & StartLoc,const MemorySSA & MSSA,const UpwardsMemoryQuery & Query,AliasAnalysisType & AA,bool AllowImpreciseClobber=false)389 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
390 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
391 const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
392 bool AllowImpreciseClobber = false) {
393 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
394
395 if (MSSA.isLiveOnEntryDef(Start)) {
396 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
397 "liveOnEntry must clobber itself");
398 return;
399 }
400
401 bool FoundClobber = false;
402 DenseSet<ConstMemoryAccessPair> VisitedPhis;
403 SmallVector<ConstMemoryAccessPair, 8> Worklist;
404 Worklist.emplace_back(Start, StartLoc);
405 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
406 // is found, complain.
407 while (!Worklist.empty()) {
408 auto MAP = Worklist.pop_back_val();
409 // All we care about is that nothing from Start to ClobberAt clobbers Start.
410 // We learn nothing from revisiting nodes.
411 if (!VisitedPhis.insert(MAP).second)
412 continue;
413
414 for (const auto *MA : def_chain(MAP.first)) {
415 if (MA == ClobberAt) {
416 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
417 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
418 // since it won't let us short-circuit.
419 //
420 // Also, note that this can't be hoisted out of the `Worklist` loop,
421 // since MD may only act as a clobber for 1 of N MemoryLocations.
422 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
423 if (!FoundClobber) {
424 ClobberAlias CA =
425 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
426 if (CA.IsClobber) {
427 FoundClobber = true;
428 // Not used: CA.AR;
429 }
430 }
431 }
432 break;
433 }
434
435 // We should never hit liveOnEntry, unless it's the clobber.
436 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
437
438 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
439 // If Start is a Def, skip self.
440 if (MD == Start)
441 continue;
442
443 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
444 .IsClobber &&
445 "Found clobber before reaching ClobberAt!");
446 continue;
447 }
448
449 if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
450 (void)MU;
451 assert (MU == Start &&
452 "Can only find use in def chain if Start is a use");
453 continue;
454 }
455
456 assert(isa<MemoryPhi>(MA));
457
458 // Add reachable phi predecessors
459 for (auto ItB = upward_defs_begin(
460 {const_cast<MemoryAccess *>(MA), MAP.second},
461 MSSA.getDomTree()),
462 ItE = upward_defs_end();
463 ItB != ItE; ++ItB)
464 if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
465 Worklist.emplace_back(*ItB);
466 }
467 }
468
469 // If the verify is done following an optimization, it's possible that
470 // ClobberAt was a conservative clobbering, that we can now infer is not a
471 // true clobbering access. Don't fail the verify if that's the case.
472 // We do have accesses that claim they're optimized, but could be optimized
473 // further. Updating all these can be expensive, so allow it for now (FIXME).
474 if (AllowImpreciseClobber)
475 return;
476
477 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
478 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
479 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
480 "ClobberAt never acted as a clobber");
481 }
482
483 namespace {
484
485 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
486 /// in one class.
487 template <class AliasAnalysisType> class ClobberWalker {
488 /// Save a few bytes by using unsigned instead of size_t.
489 using ListIndex = unsigned;
490
491 /// Represents a span of contiguous MemoryDefs, potentially ending in a
492 /// MemoryPhi.
493 struct DefPath {
494 MemoryLocation Loc;
495 // Note that, because we always walk in reverse, Last will always dominate
496 // First. Also note that First and Last are inclusive.
497 MemoryAccess *First;
498 MemoryAccess *Last;
499 Optional<ListIndex> Previous;
500
DefPath__anon112ec91d0511::ClobberWalker::DefPath501 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
502 Optional<ListIndex> Previous)
503 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
504
DefPath__anon112ec91d0511::ClobberWalker::DefPath505 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
506 Optional<ListIndex> Previous)
507 : DefPath(Loc, Init, Init, Previous) {}
508 };
509
510 const MemorySSA &MSSA;
511 AliasAnalysisType &AA;
512 DominatorTree &DT;
513 UpwardsMemoryQuery *Query;
514 unsigned *UpwardWalkLimit;
515
516 // Phi optimization bookkeeping:
517 // List of DefPath to process during the current phi optimization walk.
518 SmallVector<DefPath, 32> Paths;
519 // List of visited <Access, Location> pairs; we can skip paths already
520 // visited with the same memory location.
521 DenseSet<ConstMemoryAccessPair> VisitedPhis;
522 // Record if phi translation has been performed during the current phi
523 // optimization walk, as merging alias results after phi translation can
524 // yield incorrect results. Context in PR46156.
525 bool PerformedPhiTranslation = false;
526
527 /// Find the nearest def or phi that `From` can legally be optimized to.
getWalkTarget(const MemoryPhi * From) const528 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
529 assert(From->getNumOperands() && "Phi with no operands?");
530
531 BasicBlock *BB = From->getBlock();
532 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
533 DomTreeNode *Node = DT.getNode(BB);
534 while ((Node = Node->getIDom())) {
535 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
536 if (Defs)
537 return &*Defs->rbegin();
538 }
539 return Result;
540 }
541
542 /// Result of calling walkToPhiOrClobber.
543 struct UpwardsWalkResult {
544 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
545 /// both. Include alias info when clobber found.
546 MemoryAccess *Result;
547 bool IsKnownClobber;
548 Optional<AliasResult> AR;
549 };
550
551 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
552 /// This will update Desc.Last as it walks. It will (optionally) also stop at
553 /// StopAt.
554 ///
555 /// This does not test for whether StopAt is a clobber
556 UpwardsWalkResult
walkToPhiOrClobber(DefPath & Desc,const MemoryAccess * StopAt=nullptr,const MemoryAccess * SkipStopAt=nullptr) const557 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
558 const MemoryAccess *SkipStopAt = nullptr) const {
559 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
560 assert(UpwardWalkLimit && "Need a valid walk limit");
561 bool LimitAlreadyReached = false;
562 // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
563 // it to 1. This will not do any alias() calls. It either returns in the
564 // first iteration in the loop below, or is set back to 0 if all def chains
565 // are free of MemoryDefs.
566 if (!*UpwardWalkLimit) {
567 *UpwardWalkLimit = 1;
568 LimitAlreadyReached = true;
569 }
570
571 for (MemoryAccess *Current : def_chain(Desc.Last)) {
572 Desc.Last = Current;
573 if (Current == StopAt || Current == SkipStopAt)
574 return {Current, false, AliasResult(AliasResult::MayAlias)};
575
576 if (auto *MD = dyn_cast<MemoryDef>(Current)) {
577 if (MSSA.isLiveOnEntryDef(MD))
578 return {MD, true, AliasResult(AliasResult::MustAlias)};
579
580 if (!--*UpwardWalkLimit)
581 return {Current, true, AliasResult(AliasResult::MayAlias)};
582
583 ClobberAlias CA =
584 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
585 if (CA.IsClobber)
586 return {MD, true, CA.AR};
587 }
588 }
589
590 if (LimitAlreadyReached)
591 *UpwardWalkLimit = 0;
592
593 assert(isa<MemoryPhi>(Desc.Last) &&
594 "Ended at a non-clobber that's not a phi?");
595 return {Desc.Last, false, AliasResult(AliasResult::MayAlias)};
596 }
597
addSearches(MemoryPhi * Phi,SmallVectorImpl<ListIndex> & PausedSearches,ListIndex PriorNode)598 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
599 ListIndex PriorNode) {
600 auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT,
601 &PerformedPhiTranslation);
602 auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
603 for (const MemoryAccessPair &P : UpwardDefs) {
604 PausedSearches.push_back(Paths.size());
605 Paths.emplace_back(P.second, P.first, PriorNode);
606 }
607 }
608
609 /// Represents a search that terminated after finding a clobber. This clobber
610 /// may or may not be present in the path of defs from LastNode..SearchStart,
611 /// since it may have been retrieved from cache.
612 struct TerminatedPath {
613 MemoryAccess *Clobber;
614 ListIndex LastNode;
615 };
616
617 /// Get an access that keeps us from optimizing to the given phi.
618 ///
619 /// PausedSearches is an array of indices into the Paths array. Its incoming
620 /// value is the indices of searches that stopped at the last phi optimization
621 /// target. It's left in an unspecified state.
622 ///
623 /// If this returns None, NewPaused is a vector of searches that terminated
624 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
625 Optional<TerminatedPath>
getBlockingAccess(const MemoryAccess * StopWhere,SmallVectorImpl<ListIndex> & PausedSearches,SmallVectorImpl<ListIndex> & NewPaused,SmallVectorImpl<TerminatedPath> & Terminated)626 getBlockingAccess(const MemoryAccess *StopWhere,
627 SmallVectorImpl<ListIndex> &PausedSearches,
628 SmallVectorImpl<ListIndex> &NewPaused,
629 SmallVectorImpl<TerminatedPath> &Terminated) {
630 assert(!PausedSearches.empty() && "No searches to continue?");
631
632 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
633 // PausedSearches as our stack.
634 while (!PausedSearches.empty()) {
635 ListIndex PathIndex = PausedSearches.pop_back_val();
636 DefPath &Node = Paths[PathIndex];
637
638 // If we've already visited this path with this MemoryLocation, we don't
639 // need to do so again.
640 //
641 // NOTE: That we just drop these paths on the ground makes caching
642 // behavior sporadic. e.g. given a diamond:
643 // A
644 // B C
645 // D
646 //
647 // ...If we walk D, B, A, C, we'll only cache the result of phi
648 // optimization for A, B, and D; C will be skipped because it dies here.
649 // This arguably isn't the worst thing ever, since:
650 // - We generally query things in a top-down order, so if we got below D
651 // without needing cache entries for {C, MemLoc}, then chances are
652 // that those cache entries would end up ultimately unused.
653 // - We still cache things for A, so C only needs to walk up a bit.
654 // If this behavior becomes problematic, we can fix without a ton of extra
655 // work.
656 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) {
657 if (PerformedPhiTranslation) {
658 // If visiting this path performed Phi translation, don't continue,
659 // since it may not be correct to merge results from two paths if one
660 // relies on the phi translation.
661 TerminatedPath Term{Node.Last, PathIndex};
662 return Term;
663 }
664 continue;
665 }
666
667 const MemoryAccess *SkipStopWhere = nullptr;
668 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
669 assert(isa<MemoryDef>(Query->OriginalAccess));
670 SkipStopWhere = Query->OriginalAccess;
671 }
672
673 UpwardsWalkResult Res = walkToPhiOrClobber(Node,
674 /*StopAt=*/StopWhere,
675 /*SkipStopAt=*/SkipStopWhere);
676 if (Res.IsKnownClobber) {
677 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
678
679 // If this wasn't a cache hit, we hit a clobber when walking. That's a
680 // failure.
681 TerminatedPath Term{Res.Result, PathIndex};
682 if (!MSSA.dominates(Res.Result, StopWhere))
683 return Term;
684
685 // Otherwise, it's a valid thing to potentially optimize to.
686 Terminated.push_back(Term);
687 continue;
688 }
689
690 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
691 // We've hit our target. Save this path off for if we want to continue
692 // walking. If we are in the mode of skipping the OriginalAccess, and
693 // we've reached back to the OriginalAccess, do not save path, we've
694 // just looped back to self.
695 if (Res.Result != SkipStopWhere)
696 NewPaused.push_back(PathIndex);
697 continue;
698 }
699
700 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
701 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
702 }
703
704 return None;
705 }
706
707 template <typename T, typename Walker>
708 struct generic_def_path_iterator
709 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
710 std::forward_iterator_tag, T *> {
generic_def_path_iterator__anon112ec91d0511::ClobberWalker::generic_def_path_iterator711 generic_def_path_iterator() {}
generic_def_path_iterator__anon112ec91d0511::ClobberWalker::generic_def_path_iterator712 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
713
operator *__anon112ec91d0511::ClobberWalker::generic_def_path_iterator714 T &operator*() const { return curNode(); }
715
operator ++__anon112ec91d0511::ClobberWalker::generic_def_path_iterator716 generic_def_path_iterator &operator++() {
717 N = curNode().Previous;
718 return *this;
719 }
720
operator ==__anon112ec91d0511::ClobberWalker::generic_def_path_iterator721 bool operator==(const generic_def_path_iterator &O) const {
722 if (N.hasValue() != O.N.hasValue())
723 return false;
724 return !N.hasValue() || *N == *O.N;
725 }
726
727 private:
curNode__anon112ec91d0511::ClobberWalker::generic_def_path_iterator728 T &curNode() const { return W->Paths[*N]; }
729
730 Walker *W = nullptr;
731 Optional<ListIndex> N = None;
732 };
733
734 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
735 using const_def_path_iterator =
736 generic_def_path_iterator<const DefPath, const ClobberWalker>;
737
def_path(ListIndex From)738 iterator_range<def_path_iterator> def_path(ListIndex From) {
739 return make_range(def_path_iterator(this, From), def_path_iterator());
740 }
741
const_def_path(ListIndex From) const742 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
743 return make_range(const_def_path_iterator(this, From),
744 const_def_path_iterator());
745 }
746
747 struct OptznResult {
748 /// The path that contains our result.
749 TerminatedPath PrimaryClobber;
750 /// The paths that we can legally cache back from, but that aren't
751 /// necessarily the result of the Phi optimization.
752 SmallVector<TerminatedPath, 4> OtherClobbers;
753 };
754
defPathIndex(const DefPath & N) const755 ListIndex defPathIndex(const DefPath &N) const {
756 // The assert looks nicer if we don't need to do &N
757 const DefPath *NP = &N;
758 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
759 "Out of bounds DefPath!");
760 return NP - &Paths.front();
761 }
762
763 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
764 /// that act as legal clobbers. Note that this won't return *all* clobbers.
765 ///
766 /// Phi optimization algorithm tl;dr:
767 /// - Find the earliest def/phi, A, we can optimize to
768 /// - Find if all paths from the starting memory access ultimately reach A
769 /// - If not, optimization isn't possible.
770 /// - Otherwise, walk from A to another clobber or phi, A'.
771 /// - If A' is a def, we're done.
772 /// - If A' is a phi, try to optimize it.
773 ///
774 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
775 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
tryOptimizePhi(MemoryPhi * Phi,MemoryAccess * Start,const MemoryLocation & Loc)776 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
777 const MemoryLocation &Loc) {
778 assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&
779 "Reset the optimization state.");
780
781 Paths.emplace_back(Loc, Start, Phi, None);
782 // Stores how many "valid" optimization nodes we had prior to calling
783 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
784 auto PriorPathsSize = Paths.size();
785
786 SmallVector<ListIndex, 16> PausedSearches;
787 SmallVector<ListIndex, 8> NewPaused;
788 SmallVector<TerminatedPath, 4> TerminatedPaths;
789
790 addSearches(Phi, PausedSearches, 0);
791
792 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
793 // Paths.
794 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
795 assert(!Paths.empty() && "Need a path to move");
796 auto Dom = Paths.begin();
797 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
798 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
799 Dom = I;
800 auto Last = Paths.end() - 1;
801 if (Last != Dom)
802 std::iter_swap(Last, Dom);
803 };
804
805 MemoryPhi *Current = Phi;
806 while (true) {
807 assert(!MSSA.isLiveOnEntryDef(Current) &&
808 "liveOnEntry wasn't treated as a clobber?");
809
810 const auto *Target = getWalkTarget(Current);
811 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
812 // optimization for the prior phi.
813 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
814 return MSSA.dominates(P.Clobber, Target);
815 }));
816
817 // FIXME: This is broken, because the Blocker may be reported to be
818 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
819 // For the moment, this is fine, since we do nothing with blocker info.
820 if (Optional<TerminatedPath> Blocker = getBlockingAccess(
821 Target, PausedSearches, NewPaused, TerminatedPaths)) {
822
823 // Find the node we started at. We can't search based on N->Last, since
824 // we may have gone around a loop with a different MemoryLocation.
825 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
826 return defPathIndex(N) < PriorPathsSize;
827 });
828 assert(Iter != def_path_iterator());
829
830 DefPath &CurNode = *Iter;
831 assert(CurNode.Last == Current);
832
833 // Two things:
834 // A. We can't reliably cache all of NewPaused back. Consider a case
835 // where we have two paths in NewPaused; one of which can't optimize
836 // above this phi, whereas the other can. If we cache the second path
837 // back, we'll end up with suboptimal cache entries. We can handle
838 // cases like this a bit better when we either try to find all
839 // clobbers that block phi optimization, or when our cache starts
840 // supporting unfinished searches.
841 // B. We can't reliably cache TerminatedPaths back here without doing
842 // extra checks; consider a case like:
843 // T
844 // / \
845 // D C
846 // \ /
847 // S
848 // Where T is our target, C is a node with a clobber on it, D is a
849 // diamond (with a clobber *only* on the left or right node, N), and
850 // S is our start. Say we walk to D, through the node opposite N
851 // (read: ignoring the clobber), and see a cache entry in the top
852 // node of D. That cache entry gets put into TerminatedPaths. We then
853 // walk up to C (N is later in our worklist), find the clobber, and
854 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
855 // the bottom part of D to the cached clobber, ignoring the clobber
856 // in N. Again, this problem goes away if we start tracking all
857 // blockers for a given phi optimization.
858 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
859 return {Result, {}};
860 }
861
862 // If there's nothing left to search, then all paths led to valid clobbers
863 // that we got from our cache; pick the nearest to the start, and allow
864 // the rest to be cached back.
865 if (NewPaused.empty()) {
866 MoveDominatedPathToEnd(TerminatedPaths);
867 TerminatedPath Result = TerminatedPaths.pop_back_val();
868 return {Result, std::move(TerminatedPaths)};
869 }
870
871 MemoryAccess *DefChainEnd = nullptr;
872 SmallVector<TerminatedPath, 4> Clobbers;
873 for (ListIndex Paused : NewPaused) {
874 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
875 if (WR.IsKnownClobber)
876 Clobbers.push_back({WR.Result, Paused});
877 else
878 // Micro-opt: If we hit the end of the chain, save it.
879 DefChainEnd = WR.Result;
880 }
881
882 if (!TerminatedPaths.empty()) {
883 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
884 // do it now.
885 if (!DefChainEnd)
886 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
887 DefChainEnd = MA;
888 assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
889
890 // If any of the terminated paths don't dominate the phi we'll try to
891 // optimize, we need to figure out what they are and quit.
892 const BasicBlock *ChainBB = DefChainEnd->getBlock();
893 for (const TerminatedPath &TP : TerminatedPaths) {
894 // Because we know that DefChainEnd is as "high" as we can go, we
895 // don't need local dominance checks; BB dominance is sufficient.
896 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
897 Clobbers.push_back(TP);
898 }
899 }
900
901 // If we have clobbers in the def chain, find the one closest to Current
902 // and quit.
903 if (!Clobbers.empty()) {
904 MoveDominatedPathToEnd(Clobbers);
905 TerminatedPath Result = Clobbers.pop_back_val();
906 return {Result, std::move(Clobbers)};
907 }
908
909 assert(all_of(NewPaused,
910 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
911
912 // Because liveOnEntry is a clobber, this must be a phi.
913 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
914
915 PriorPathsSize = Paths.size();
916 PausedSearches.clear();
917 for (ListIndex I : NewPaused)
918 addSearches(DefChainPhi, PausedSearches, I);
919 NewPaused.clear();
920
921 Current = DefChainPhi;
922 }
923 }
924
verifyOptResult(const OptznResult & R) const925 void verifyOptResult(const OptznResult &R) const {
926 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
927 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
928 }));
929 }
930
resetPhiOptznState()931 void resetPhiOptznState() {
932 Paths.clear();
933 VisitedPhis.clear();
934 PerformedPhiTranslation = false;
935 }
936
937 public:
ClobberWalker(const MemorySSA & MSSA,AliasAnalysisType & AA,DominatorTree & DT)938 ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
939 : MSSA(MSSA), AA(AA), DT(DT) {}
940
getAA()941 AliasAnalysisType *getAA() { return &AA; }
942 /// Finds the nearest clobber for the given query, optimizing phis if
943 /// possible.
findClobber(MemoryAccess * Start,UpwardsMemoryQuery & Q,unsigned & UpWalkLimit)944 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
945 unsigned &UpWalkLimit) {
946 Query = &Q;
947 UpwardWalkLimit = &UpWalkLimit;
948 // Starting limit must be > 0.
949 if (!UpWalkLimit)
950 UpWalkLimit++;
951
952 MemoryAccess *Current = Start;
953 // This walker pretends uses don't exist. If we're handed one, silently grab
954 // its def. (This has the nice side-effect of ensuring we never cache uses)
955 if (auto *MU = dyn_cast<MemoryUse>(Start))
956 Current = MU->getDefiningAccess();
957
958 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
959 // Fast path for the overly-common case (no crazy phi optimization
960 // necessary)
961 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
962 MemoryAccess *Result;
963 if (WalkResult.IsKnownClobber) {
964 Result = WalkResult.Result;
965 Q.AR = WalkResult.AR;
966 } else {
967 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
968 Current, Q.StartingLoc);
969 verifyOptResult(OptRes);
970 resetPhiOptznState();
971 Result = OptRes.PrimaryClobber.Clobber;
972 }
973
974 #ifdef EXPENSIVE_CHECKS
975 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
976 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
977 #endif
978 return Result;
979 }
980 };
981
982 struct RenamePassData {
983 DomTreeNode *DTN;
984 DomTreeNode::const_iterator ChildIt;
985 MemoryAccess *IncomingVal;
986
RenamePassData__anon112ec91d0511::RenamePassData987 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
988 MemoryAccess *M)
989 : DTN(D), ChildIt(It), IncomingVal(M) {}
990
swap__anon112ec91d0511::RenamePassData991 void swap(RenamePassData &RHS) {
992 std::swap(DTN, RHS.DTN);
993 std::swap(ChildIt, RHS.ChildIt);
994 std::swap(IncomingVal, RHS.IncomingVal);
995 }
996 };
997
998 } // end anonymous namespace
999
1000 namespace llvm {
1001
1002 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
1003 ClobberWalker<AliasAnalysisType> Walker;
1004 MemorySSA *MSSA;
1005
1006 public:
ClobberWalkerBase(MemorySSA * M,AliasAnalysisType * A,DominatorTree * D)1007 ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
1008 : Walker(*M, *A, *D), MSSA(M) {}
1009
1010 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
1011 const MemoryLocation &,
1012 unsigned &);
1013 // Third argument (bool), defines whether the clobber search should skip the
1014 // original queried access. If true, there will be a follow-up query searching
1015 // for a clobber access past "self". Note that the Optimized access is not
1016 // updated if a new clobber is found by this SkipSelf search. If this
1017 // additional query becomes heavily used we may decide to cache the result.
1018 // Walker instantiations will decide how to set the SkipSelf bool.
1019 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
1020 };
1021
1022 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1023 /// longer does caching on its own, but the name has been retained for the
1024 /// moment.
1025 template <class AliasAnalysisType>
1026 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1027 ClobberWalkerBase<AliasAnalysisType> *Walker;
1028
1029 public:
CachingWalker(MemorySSA * M,ClobberWalkerBase<AliasAnalysisType> * W)1030 CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1031 : MemorySSAWalker(M), Walker(W) {}
1032 ~CachingWalker() override = default;
1033
1034 using MemorySSAWalker::getClobberingMemoryAccess;
1035
getClobberingMemoryAccess(MemoryAccess * MA,unsigned & UWL)1036 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1037 return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1038 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc,unsigned & UWL)1039 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1040 const MemoryLocation &Loc,
1041 unsigned &UWL) {
1042 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1043 }
1044
getClobberingMemoryAccess(MemoryAccess * MA)1045 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1046 unsigned UpwardWalkLimit = MaxCheckLimit;
1047 return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1048 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc)1049 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1050 const MemoryLocation &Loc) override {
1051 unsigned UpwardWalkLimit = MaxCheckLimit;
1052 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1053 }
1054
invalidateInfo(MemoryAccess * MA)1055 void invalidateInfo(MemoryAccess *MA) override {
1056 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1057 MUD->resetOptimized();
1058 }
1059 };
1060
1061 template <class AliasAnalysisType>
1062 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1063 ClobberWalkerBase<AliasAnalysisType> *Walker;
1064
1065 public:
SkipSelfWalker(MemorySSA * M,ClobberWalkerBase<AliasAnalysisType> * W)1066 SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1067 : MemorySSAWalker(M), Walker(W) {}
1068 ~SkipSelfWalker() override = default;
1069
1070 using MemorySSAWalker::getClobberingMemoryAccess;
1071
getClobberingMemoryAccess(MemoryAccess * MA,unsigned & UWL)1072 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1073 return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1074 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc,unsigned & UWL)1075 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1076 const MemoryLocation &Loc,
1077 unsigned &UWL) {
1078 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1079 }
1080
getClobberingMemoryAccess(MemoryAccess * MA)1081 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1082 unsigned UpwardWalkLimit = MaxCheckLimit;
1083 return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1084 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc)1085 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1086 const MemoryLocation &Loc) override {
1087 unsigned UpwardWalkLimit = MaxCheckLimit;
1088 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1089 }
1090
invalidateInfo(MemoryAccess * MA)1091 void invalidateInfo(MemoryAccess *MA) override {
1092 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1093 MUD->resetOptimized();
1094 }
1095 };
1096
1097 } // end namespace llvm
1098
renameSuccessorPhis(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)1099 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1100 bool RenameAllUses) {
1101 // Pass through values to our successors
1102 for (const BasicBlock *S : successors(BB)) {
1103 auto It = PerBlockAccesses.find(S);
1104 // Rename the phi nodes in our successor block
1105 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1106 continue;
1107 AccessList *Accesses = It->second.get();
1108 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1109 if (RenameAllUses) {
1110 bool ReplacementDone = false;
1111 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1112 if (Phi->getIncomingBlock(I) == BB) {
1113 Phi->setIncomingValue(I, IncomingVal);
1114 ReplacementDone = true;
1115 }
1116 (void) ReplacementDone;
1117 assert(ReplacementDone && "Incomplete phi during partial rename");
1118 } else
1119 Phi->addIncoming(IncomingVal, BB);
1120 }
1121 }
1122
1123 /// Rename a single basic block into MemorySSA form.
1124 /// Uses the standard SSA renaming algorithm.
1125 /// \returns The new incoming value.
renameBlock(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)1126 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1127 bool RenameAllUses) {
1128 auto It = PerBlockAccesses.find(BB);
1129 // Skip most processing if the list is empty.
1130 if (It != PerBlockAccesses.end()) {
1131 AccessList *Accesses = It->second.get();
1132 for (MemoryAccess &L : *Accesses) {
1133 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1134 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1135 MUD->setDefiningAccess(IncomingVal);
1136 if (isa<MemoryDef>(&L))
1137 IncomingVal = &L;
1138 } else {
1139 IncomingVal = &L;
1140 }
1141 }
1142 }
1143 return IncomingVal;
1144 }
1145
1146 /// This is the standard SSA renaming algorithm.
1147 ///
1148 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1149 /// in phi nodes in our successors.
renamePass(DomTreeNode * Root,MemoryAccess * IncomingVal,SmallPtrSetImpl<BasicBlock * > & Visited,bool SkipVisited,bool RenameAllUses)1150 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1151 SmallPtrSetImpl<BasicBlock *> &Visited,
1152 bool SkipVisited, bool RenameAllUses) {
1153 assert(Root && "Trying to rename accesses in an unreachable block");
1154
1155 SmallVector<RenamePassData, 32> WorkStack;
1156 // Skip everything if we already renamed this block and we are skipping.
1157 // Note: You can't sink this into the if, because we need it to occur
1158 // regardless of whether we skip blocks or not.
1159 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1160 if (SkipVisited && AlreadyVisited)
1161 return;
1162
1163 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1164 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1165 WorkStack.push_back({Root, Root->begin(), IncomingVal});
1166
1167 while (!WorkStack.empty()) {
1168 DomTreeNode *Node = WorkStack.back().DTN;
1169 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1170 IncomingVal = WorkStack.back().IncomingVal;
1171
1172 if (ChildIt == Node->end()) {
1173 WorkStack.pop_back();
1174 } else {
1175 DomTreeNode *Child = *ChildIt;
1176 ++WorkStack.back().ChildIt;
1177 BasicBlock *BB = Child->getBlock();
1178 // Note: You can't sink this into the if, because we need it to occur
1179 // regardless of whether we skip blocks or not.
1180 AlreadyVisited = !Visited.insert(BB).second;
1181 if (SkipVisited && AlreadyVisited) {
1182 // We already visited this during our renaming, which can happen when
1183 // being asked to rename multiple blocks. Figure out the incoming val,
1184 // which is the last def.
1185 // Incoming value can only change if there is a block def, and in that
1186 // case, it's the last block def in the list.
1187 if (auto *BlockDefs = getWritableBlockDefs(BB))
1188 IncomingVal = &*BlockDefs->rbegin();
1189 } else
1190 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1191 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1192 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1193 }
1194 }
1195 }
1196
1197 /// This handles unreachable block accesses by deleting phi nodes in
1198 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1199 /// being uses of the live on entry definition.
markUnreachableAsLiveOnEntry(BasicBlock * BB)1200 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1201 assert(!DT->isReachableFromEntry(BB) &&
1202 "Reachable block found while handling unreachable blocks");
1203
1204 // Make sure phi nodes in our reachable successors end up with a
1205 // LiveOnEntryDef for our incoming edge, even though our block is forward
1206 // unreachable. We could just disconnect these blocks from the CFG fully,
1207 // but we do not right now.
1208 for (const BasicBlock *S : successors(BB)) {
1209 if (!DT->isReachableFromEntry(S))
1210 continue;
1211 auto It = PerBlockAccesses.find(S);
1212 // Rename the phi nodes in our successor block
1213 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1214 continue;
1215 AccessList *Accesses = It->second.get();
1216 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1217 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1218 }
1219
1220 auto It = PerBlockAccesses.find(BB);
1221 if (It == PerBlockAccesses.end())
1222 return;
1223
1224 auto &Accesses = It->second;
1225 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1226 auto Next = std::next(AI);
1227 // If we have a phi, just remove it. We are going to replace all
1228 // users with live on entry.
1229 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1230 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1231 else
1232 Accesses->erase(AI);
1233 AI = Next;
1234 }
1235 }
1236
MemorySSA(Function & Func,AliasAnalysis * AA,DominatorTree * DT)1237 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1238 : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1239 SkipWalker(nullptr), NextID(0) {
1240 // Build MemorySSA using a batch alias analysis. This reuses the internal
1241 // state that AA collects during an alias()/getModRefInfo() call. This is
1242 // safe because there are no CFG changes while building MemorySSA and can
1243 // significantly reduce the time spent by the compiler in AA, because we will
1244 // make queries about all the instructions in the Function.
1245 assert(AA && "No alias analysis?");
1246 BatchAAResults BatchAA(*AA);
1247 buildMemorySSA(BatchAA);
1248 // Intentionally leave AA to nullptr while building so we don't accidently
1249 // use non-batch AliasAnalysis.
1250 this->AA = AA;
1251 // Also create the walker here.
1252 getWalker();
1253 }
1254
~MemorySSA()1255 MemorySSA::~MemorySSA() {
1256 // Drop all our references
1257 for (const auto &Pair : PerBlockAccesses)
1258 for (MemoryAccess &MA : *Pair.second)
1259 MA.dropAllReferences();
1260 }
1261
getOrCreateAccessList(const BasicBlock * BB)1262 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1263 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1264
1265 if (Res.second)
1266 Res.first->second = std::make_unique<AccessList>();
1267 return Res.first->second.get();
1268 }
1269
getOrCreateDefsList(const BasicBlock * BB)1270 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1271 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1272
1273 if (Res.second)
1274 Res.first->second = std::make_unique<DefsList>();
1275 return Res.first->second.get();
1276 }
1277
1278 namespace llvm {
1279
1280 /// This class is a batch walker of all MemoryUse's in the program, and points
1281 /// their defining access at the thing that actually clobbers them. Because it
1282 /// is a batch walker that touches everything, it does not operate like the
1283 /// other walkers. This walker is basically performing a top-down SSA renaming
1284 /// pass, where the version stack is used as the cache. This enables it to be
1285 /// significantly more time and memory efficient than using the regular walker,
1286 /// which is walking bottom-up.
1287 class MemorySSA::OptimizeUses {
1288 public:
OptimizeUses(MemorySSA * MSSA,CachingWalker<BatchAAResults> * Walker,BatchAAResults * BAA,DominatorTree * DT)1289 OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1290 BatchAAResults *BAA, DominatorTree *DT)
1291 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1292
1293 void optimizeUses();
1294
1295 private:
1296 /// This represents where a given memorylocation is in the stack.
1297 struct MemlocStackInfo {
1298 // This essentially is keeping track of versions of the stack. Whenever
1299 // the stack changes due to pushes or pops, these versions increase.
1300 unsigned long StackEpoch;
1301 unsigned long PopEpoch;
1302 // This is the lower bound of places on the stack to check. It is equal to
1303 // the place the last stack walk ended.
1304 // Note: Correctness depends on this being initialized to 0, which densemap
1305 // does
1306 unsigned long LowerBound;
1307 const BasicBlock *LowerBoundBlock;
1308 // This is where the last walk for this memory location ended.
1309 unsigned long LastKill;
1310 bool LastKillValid;
1311 Optional<AliasResult> AR;
1312 };
1313
1314 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1315 SmallVectorImpl<MemoryAccess *> &,
1316 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1317
1318 MemorySSA *MSSA;
1319 CachingWalker<BatchAAResults> *Walker;
1320 BatchAAResults *AA;
1321 DominatorTree *DT;
1322 };
1323
1324 } // end namespace llvm
1325
1326 /// Optimize the uses in a given block This is basically the SSA renaming
1327 /// algorithm, with one caveat: We are able to use a single stack for all
1328 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1329 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1330 /// going to be some position in that stack of possible ones.
1331 ///
1332 /// We track the stack positions that each MemoryLocation needs
1333 /// to check, and last ended at. This is because we only want to check the
1334 /// things that changed since last time. The same MemoryLocation should
1335 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1336 /// things like this, and if they start, we can modify MemoryLocOrCall to
1337 /// include relevant data)
optimizeUsesInBlock(const BasicBlock * BB,unsigned long & StackEpoch,unsigned long & PopEpoch,SmallVectorImpl<MemoryAccess * > & VersionStack,DenseMap<MemoryLocOrCall,MemlocStackInfo> & LocStackInfo)1338 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1339 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1340 SmallVectorImpl<MemoryAccess *> &VersionStack,
1341 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1342
1343 /// If no accesses, nothing to do.
1344 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1345 if (Accesses == nullptr)
1346 return;
1347
1348 // Pop everything that doesn't dominate the current block off the stack,
1349 // increment the PopEpoch to account for this.
1350 while (true) {
1351 assert(
1352 !VersionStack.empty() &&
1353 "Version stack should have liveOnEntry sentinel dominating everything");
1354 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1355 if (DT->dominates(BackBlock, BB))
1356 break;
1357 while (VersionStack.back()->getBlock() == BackBlock)
1358 VersionStack.pop_back();
1359 ++PopEpoch;
1360 }
1361
1362 for (MemoryAccess &MA : *Accesses) {
1363 auto *MU = dyn_cast<MemoryUse>(&MA);
1364 if (!MU) {
1365 VersionStack.push_back(&MA);
1366 ++StackEpoch;
1367 continue;
1368 }
1369
1370 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1371 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1372 continue;
1373 }
1374
1375 MemoryLocOrCall UseMLOC(MU);
1376 auto &LocInfo = LocStackInfo[UseMLOC];
1377 // If the pop epoch changed, it means we've removed stuff from top of
1378 // stack due to changing blocks. We may have to reset the lower bound or
1379 // last kill info.
1380 if (LocInfo.PopEpoch != PopEpoch) {
1381 LocInfo.PopEpoch = PopEpoch;
1382 LocInfo.StackEpoch = StackEpoch;
1383 // If the lower bound was in something that no longer dominates us, we
1384 // have to reset it.
1385 // We can't simply track stack size, because the stack may have had
1386 // pushes/pops in the meantime.
1387 // XXX: This is non-optimal, but only is slower cases with heavily
1388 // branching dominator trees. To get the optimal number of queries would
1389 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1390 // the top of that stack dominates us. This does not seem worth it ATM.
1391 // A much cheaper optimization would be to always explore the deepest
1392 // branch of the dominator tree first. This will guarantee this resets on
1393 // the smallest set of blocks.
1394 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1395 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1396 // Reset the lower bound of things to check.
1397 // TODO: Some day we should be able to reset to last kill, rather than
1398 // 0.
1399 LocInfo.LowerBound = 0;
1400 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1401 LocInfo.LastKillValid = false;
1402 }
1403 } else if (LocInfo.StackEpoch != StackEpoch) {
1404 // If all that has changed is the StackEpoch, we only have to check the
1405 // new things on the stack, because we've checked everything before. In
1406 // this case, the lower bound of things to check remains the same.
1407 LocInfo.PopEpoch = PopEpoch;
1408 LocInfo.StackEpoch = StackEpoch;
1409 }
1410 if (!LocInfo.LastKillValid) {
1411 LocInfo.LastKill = VersionStack.size() - 1;
1412 LocInfo.LastKillValid = true;
1413 LocInfo.AR = AliasResult::MayAlias;
1414 }
1415
1416 // At this point, we should have corrected last kill and LowerBound to be
1417 // in bounds.
1418 assert(LocInfo.LowerBound < VersionStack.size() &&
1419 "Lower bound out of range");
1420 assert(LocInfo.LastKill < VersionStack.size() &&
1421 "Last kill info out of range");
1422 // In any case, the new upper bound is the top of the stack.
1423 unsigned long UpperBound = VersionStack.size() - 1;
1424
1425 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1426 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1427 << *(MU->getMemoryInst()) << ")"
1428 << " because there are "
1429 << UpperBound - LocInfo.LowerBound
1430 << " stores to disambiguate\n");
1431 // Because we did not walk, LastKill is no longer valid, as this may
1432 // have been a kill.
1433 LocInfo.LastKillValid = false;
1434 continue;
1435 }
1436 bool FoundClobberResult = false;
1437 unsigned UpwardWalkLimit = MaxCheckLimit;
1438 while (UpperBound > LocInfo.LowerBound) {
1439 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1440 // For phis, use the walker, see where we ended up, go there
1441 MemoryAccess *Result =
1442 Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
1443 // We are guaranteed to find it or something is wrong
1444 while (VersionStack[UpperBound] != Result) {
1445 assert(UpperBound != 0);
1446 --UpperBound;
1447 }
1448 FoundClobberResult = true;
1449 break;
1450 }
1451
1452 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1453 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1454 if (CA.IsClobber) {
1455 FoundClobberResult = true;
1456 LocInfo.AR = CA.AR;
1457 break;
1458 }
1459 --UpperBound;
1460 }
1461
1462 // Note: Phis always have AliasResult AR set to MayAlias ATM.
1463
1464 // At the end of this loop, UpperBound is either a clobber, or lower bound
1465 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1466 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1467 // We were last killed now by where we got to
1468 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1469 LocInfo.AR = None;
1470 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1471 LocInfo.LastKill = UpperBound;
1472 } else {
1473 // Otherwise, we checked all the new ones, and now we know we can get to
1474 // LastKill.
1475 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1476 }
1477 LocInfo.LowerBound = VersionStack.size() - 1;
1478 LocInfo.LowerBoundBlock = BB;
1479 }
1480 }
1481
1482 /// Optimize uses to point to their actual clobbering definitions.
optimizeUses()1483 void MemorySSA::OptimizeUses::optimizeUses() {
1484 SmallVector<MemoryAccess *, 16> VersionStack;
1485 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1486 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1487
1488 unsigned long StackEpoch = 1;
1489 unsigned long PopEpoch = 1;
1490 // We perform a non-recursive top-down dominator tree walk.
1491 for (const auto *DomNode : depth_first(DT->getRootNode()))
1492 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1493 LocStackInfo);
1494 }
1495
placePHINodes(const SmallPtrSetImpl<BasicBlock * > & DefiningBlocks)1496 void MemorySSA::placePHINodes(
1497 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1498 // Determine where our MemoryPhi's should go
1499 ForwardIDFCalculator IDFs(*DT);
1500 IDFs.setDefiningBlocks(DefiningBlocks);
1501 SmallVector<BasicBlock *, 32> IDFBlocks;
1502 IDFs.calculate(IDFBlocks);
1503
1504 // Now place MemoryPhi nodes.
1505 for (auto &BB : IDFBlocks)
1506 createMemoryPhi(BB);
1507 }
1508
buildMemorySSA(BatchAAResults & BAA)1509 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1510 // We create an access to represent "live on entry", for things like
1511 // arguments or users of globals, where the memory they use is defined before
1512 // the beginning of the function. We do not actually insert it into the IR.
1513 // We do not define a live on exit for the immediate uses, and thus our
1514 // semantics do *not* imply that something with no immediate uses can simply
1515 // be removed.
1516 BasicBlock &StartingPoint = F.getEntryBlock();
1517 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1518 &StartingPoint, NextID++));
1519
1520 // We maintain lists of memory accesses per-block, trading memory for time. We
1521 // could just look up the memory access for every possible instruction in the
1522 // stream.
1523 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1524 // Go through each block, figure out where defs occur, and chain together all
1525 // the accesses.
1526 for (BasicBlock &B : F) {
1527 bool InsertIntoDef = false;
1528 AccessList *Accesses = nullptr;
1529 DefsList *Defs = nullptr;
1530 for (Instruction &I : B) {
1531 MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1532 if (!MUD)
1533 continue;
1534
1535 if (!Accesses)
1536 Accesses = getOrCreateAccessList(&B);
1537 Accesses->push_back(MUD);
1538 if (isa<MemoryDef>(MUD)) {
1539 InsertIntoDef = true;
1540 if (!Defs)
1541 Defs = getOrCreateDefsList(&B);
1542 Defs->push_back(*MUD);
1543 }
1544 }
1545 if (InsertIntoDef)
1546 DefiningBlocks.insert(&B);
1547 }
1548 placePHINodes(DefiningBlocks);
1549
1550 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1551 // filled in with all blocks.
1552 SmallPtrSet<BasicBlock *, 16> Visited;
1553 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1554
1555 ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1556 CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1557 OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1558
1559 // Mark the uses in unreachable blocks as live on entry, so that they go
1560 // somewhere.
1561 for (auto &BB : F)
1562 if (!Visited.count(&BB))
1563 markUnreachableAsLiveOnEntry(&BB);
1564 }
1565
getWalker()1566 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1567
getWalkerImpl()1568 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1569 if (Walker)
1570 return Walker.get();
1571
1572 if (!WalkerBase)
1573 WalkerBase =
1574 std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1575
1576 Walker =
1577 std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1578 return Walker.get();
1579 }
1580
getSkipSelfWalker()1581 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1582 if (SkipWalker)
1583 return SkipWalker.get();
1584
1585 if (!WalkerBase)
1586 WalkerBase =
1587 std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1588
1589 SkipWalker =
1590 std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1591 return SkipWalker.get();
1592 }
1593
1594
1595 // This is a helper function used by the creation routines. It places NewAccess
1596 // into the access and defs lists for a given basic block, at the given
1597 // insertion point.
insertIntoListsForBlock(MemoryAccess * NewAccess,const BasicBlock * BB,InsertionPlace Point)1598 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1599 const BasicBlock *BB,
1600 InsertionPlace Point) {
1601 auto *Accesses = getOrCreateAccessList(BB);
1602 if (Point == Beginning) {
1603 // If it's a phi node, it goes first, otherwise, it goes after any phi
1604 // nodes.
1605 if (isa<MemoryPhi>(NewAccess)) {
1606 Accesses->push_front(NewAccess);
1607 auto *Defs = getOrCreateDefsList(BB);
1608 Defs->push_front(*NewAccess);
1609 } else {
1610 auto AI = find_if_not(
1611 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1612 Accesses->insert(AI, NewAccess);
1613 if (!isa<MemoryUse>(NewAccess)) {
1614 auto *Defs = getOrCreateDefsList(BB);
1615 auto DI = find_if_not(
1616 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1617 Defs->insert(DI, *NewAccess);
1618 }
1619 }
1620 } else {
1621 Accesses->push_back(NewAccess);
1622 if (!isa<MemoryUse>(NewAccess)) {
1623 auto *Defs = getOrCreateDefsList(BB);
1624 Defs->push_back(*NewAccess);
1625 }
1626 }
1627 BlockNumberingValid.erase(BB);
1628 }
1629
insertIntoListsBefore(MemoryAccess * What,const BasicBlock * BB,AccessList::iterator InsertPt)1630 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1631 AccessList::iterator InsertPt) {
1632 auto *Accesses = getWritableBlockAccesses(BB);
1633 bool WasEnd = InsertPt == Accesses->end();
1634 Accesses->insert(AccessList::iterator(InsertPt), What);
1635 if (!isa<MemoryUse>(What)) {
1636 auto *Defs = getOrCreateDefsList(BB);
1637 // If we got asked to insert at the end, we have an easy job, just shove it
1638 // at the end. If we got asked to insert before an existing def, we also get
1639 // an iterator. If we got asked to insert before a use, we have to hunt for
1640 // the next def.
1641 if (WasEnd) {
1642 Defs->push_back(*What);
1643 } else if (isa<MemoryDef>(InsertPt)) {
1644 Defs->insert(InsertPt->getDefsIterator(), *What);
1645 } else {
1646 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1647 ++InsertPt;
1648 // Either we found a def, or we are inserting at the end
1649 if (InsertPt == Accesses->end())
1650 Defs->push_back(*What);
1651 else
1652 Defs->insert(InsertPt->getDefsIterator(), *What);
1653 }
1654 }
1655 BlockNumberingValid.erase(BB);
1656 }
1657
prepareForMoveTo(MemoryAccess * What,BasicBlock * BB)1658 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1659 // Keep it in the lookup tables, remove from the lists
1660 removeFromLists(What, false);
1661
1662 // Note that moving should implicitly invalidate the optimized state of a
1663 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1664 // MemoryDef.
1665 if (auto *MD = dyn_cast<MemoryDef>(What))
1666 MD->resetOptimized();
1667 What->setBlock(BB);
1668 }
1669
1670 // Move What before Where in the IR. The end result is that What will belong to
1671 // the right lists and have the right Block set, but will not otherwise be
1672 // correct. It will not have the right defining access, and if it is a def,
1673 // things below it will not properly be updated.
moveTo(MemoryUseOrDef * What,BasicBlock * BB,AccessList::iterator Where)1674 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1675 AccessList::iterator Where) {
1676 prepareForMoveTo(What, BB);
1677 insertIntoListsBefore(What, BB, Where);
1678 }
1679
moveTo(MemoryAccess * What,BasicBlock * BB,InsertionPlace Point)1680 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1681 InsertionPlace Point) {
1682 if (isa<MemoryPhi>(What)) {
1683 assert(Point == Beginning &&
1684 "Can only move a Phi at the beginning of the block");
1685 // Update lookup table entry
1686 ValueToMemoryAccess.erase(What->getBlock());
1687 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1688 (void)Inserted;
1689 assert(Inserted && "Cannot move a Phi to a block that already has one");
1690 }
1691
1692 prepareForMoveTo(What, BB);
1693 insertIntoListsForBlock(What, BB, Point);
1694 }
1695
createMemoryPhi(BasicBlock * BB)1696 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1697 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1698 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1699 // Phi's always are placed at the front of the block.
1700 insertIntoListsForBlock(Phi, BB, Beginning);
1701 ValueToMemoryAccess[BB] = Phi;
1702 return Phi;
1703 }
1704
createDefinedAccess(Instruction * I,MemoryAccess * Definition,const MemoryUseOrDef * Template,bool CreationMustSucceed)1705 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1706 MemoryAccess *Definition,
1707 const MemoryUseOrDef *Template,
1708 bool CreationMustSucceed) {
1709 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1710 MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1711 if (CreationMustSucceed)
1712 assert(NewAccess != nullptr && "Tried to create a memory access for a "
1713 "non-memory touching instruction");
1714 if (NewAccess) {
1715 assert((!Definition || !isa<MemoryUse>(Definition)) &&
1716 "A use cannot be a defining access");
1717 NewAccess->setDefiningAccess(Definition);
1718 }
1719 return NewAccess;
1720 }
1721
1722 // Return true if the instruction has ordering constraints.
1723 // Note specifically that this only considers stores and loads
1724 // because others are still considered ModRef by getModRefInfo.
isOrdered(const Instruction * I)1725 static inline bool isOrdered(const Instruction *I) {
1726 if (auto *SI = dyn_cast<StoreInst>(I)) {
1727 if (!SI->isUnordered())
1728 return true;
1729 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1730 if (!LI->isUnordered())
1731 return true;
1732 }
1733 return false;
1734 }
1735
1736 /// Helper function to create new memory accesses
1737 template <typename AliasAnalysisType>
createNewAccess(Instruction * I,AliasAnalysisType * AAP,const MemoryUseOrDef * Template)1738 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1739 AliasAnalysisType *AAP,
1740 const MemoryUseOrDef *Template) {
1741 // The assume intrinsic has a control dependency which we model by claiming
1742 // that it writes arbitrarily. Debuginfo intrinsics may be considered
1743 // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1744 // dependencies here.
1745 // FIXME: Replace this special casing with a more accurate modelling of
1746 // assume's control dependency.
1747 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1748 switch (II->getIntrinsicID()) {
1749 default:
1750 break;
1751 case Intrinsic::assume:
1752 case Intrinsic::experimental_noalias_scope_decl:
1753 return nullptr;
1754 }
1755 }
1756
1757 // Using a nonstandard AA pipelines might leave us with unexpected modref
1758 // results for I, so add a check to not model instructions that may not read
1759 // from or write to memory. This is necessary for correctness.
1760 if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1761 return nullptr;
1762
1763 bool Def, Use;
1764 if (Template) {
1765 Def = isa<MemoryDef>(Template);
1766 Use = isa<MemoryUse>(Template);
1767 #if !defined(NDEBUG)
1768 ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1769 bool DefCheck, UseCheck;
1770 DefCheck = isModSet(ModRef) || isOrdered(I);
1771 UseCheck = isRefSet(ModRef);
1772 assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1773 #endif
1774 } else {
1775 // Find out what affect this instruction has on memory.
1776 ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1777 // The isOrdered check is used to ensure that volatiles end up as defs
1778 // (atomics end up as ModRef right now anyway). Until we separate the
1779 // ordering chain from the memory chain, this enables people to see at least
1780 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1781 // will still give an answer that bypasses other volatile loads. TODO:
1782 // Separate memory aliasing and ordering into two different chains so that
1783 // we can precisely represent both "what memory will this read/write/is
1784 // clobbered by" and "what instructions can I move this past".
1785 Def = isModSet(ModRef) || isOrdered(I);
1786 Use = isRefSet(ModRef);
1787 }
1788
1789 // It's possible for an instruction to not modify memory at all. During
1790 // construction, we ignore them.
1791 if (!Def && !Use)
1792 return nullptr;
1793
1794 MemoryUseOrDef *MUD;
1795 if (Def)
1796 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1797 else
1798 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1799 ValueToMemoryAccess[I] = MUD;
1800 return MUD;
1801 }
1802
1803 /// Properly remove \p MA from all of MemorySSA's lookup tables.
removeFromLookups(MemoryAccess * MA)1804 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1805 assert(MA->use_empty() &&
1806 "Trying to remove memory access that still has uses");
1807 BlockNumbering.erase(MA);
1808 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1809 MUD->setDefiningAccess(nullptr);
1810 // Invalidate our walker's cache if necessary
1811 if (!isa<MemoryUse>(MA))
1812 getWalker()->invalidateInfo(MA);
1813
1814 Value *MemoryInst;
1815 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1816 MemoryInst = MUD->getMemoryInst();
1817 else
1818 MemoryInst = MA->getBlock();
1819
1820 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1821 if (VMA->second == MA)
1822 ValueToMemoryAccess.erase(VMA);
1823 }
1824
1825 /// Properly remove \p MA from all of MemorySSA's lists.
1826 ///
1827 /// Because of the way the intrusive list and use lists work, it is important to
1828 /// do removal in the right order.
1829 /// ShouldDelete defaults to true, and will cause the memory access to also be
1830 /// deleted, not just removed.
removeFromLists(MemoryAccess * MA,bool ShouldDelete)1831 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1832 BasicBlock *BB = MA->getBlock();
1833 // The access list owns the reference, so we erase it from the non-owning list
1834 // first.
1835 if (!isa<MemoryUse>(MA)) {
1836 auto DefsIt = PerBlockDefs.find(BB);
1837 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1838 Defs->remove(*MA);
1839 if (Defs->empty())
1840 PerBlockDefs.erase(DefsIt);
1841 }
1842
1843 // The erase call here will delete it. If we don't want it deleted, we call
1844 // remove instead.
1845 auto AccessIt = PerBlockAccesses.find(BB);
1846 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1847 if (ShouldDelete)
1848 Accesses->erase(MA);
1849 else
1850 Accesses->remove(MA);
1851
1852 if (Accesses->empty()) {
1853 PerBlockAccesses.erase(AccessIt);
1854 BlockNumberingValid.erase(BB);
1855 }
1856 }
1857
print(raw_ostream & OS) const1858 void MemorySSA::print(raw_ostream &OS) const {
1859 MemorySSAAnnotatedWriter Writer(this);
1860 F.print(OS, &Writer);
1861 }
1862
1863 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1864 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1865 #endif
1866
verifyMemorySSA() const1867 void MemorySSA::verifyMemorySSA() const {
1868 verifyOrderingDominationAndDefUses(F);
1869 verifyDominationNumbers(F);
1870 verifyPrevDefInPhis(F);
1871 // Previously, the verification used to also verify that the clobberingAccess
1872 // cached by MemorySSA is the same as the clobberingAccess found at a later
1873 // query to AA. This does not hold true in general due to the current fragility
1874 // of BasicAA which has arbitrary caps on the things it analyzes before giving
1875 // up. As a result, transformations that are correct, will lead to BasicAA
1876 // returning different Alias answers before and after that transformation.
1877 // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1878 // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1879 // every transformation, which defeats the purpose of using it. For such an
1880 // example, see test4 added in D51960.
1881 }
1882
verifyPrevDefInPhis(Function & F) const1883 void MemorySSA::verifyPrevDefInPhis(Function &F) const {
1884 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1885 for (const BasicBlock &BB : F) {
1886 if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1887 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1888 auto *Pred = Phi->getIncomingBlock(I);
1889 auto *IncAcc = Phi->getIncomingValue(I);
1890 // If Pred has no unreachable predecessors, get last def looking at
1891 // IDoms. If, while walkings IDoms, any of these has an unreachable
1892 // predecessor, then the incoming def can be any access.
1893 if (auto *DTNode = DT->getNode(Pred)) {
1894 while (DTNode) {
1895 if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1896 auto *LastAcc = &*(--DefList->end());
1897 assert(LastAcc == IncAcc &&
1898 "Incorrect incoming access into phi.");
1899 break;
1900 }
1901 DTNode = DTNode->getIDom();
1902 }
1903 } else {
1904 // If Pred has unreachable predecessors, but has at least a Def, the
1905 // incoming access can be the last Def in Pred, or it could have been
1906 // optimized to LoE. After an update, though, the LoE may have been
1907 // replaced by another access, so IncAcc may be any access.
1908 // If Pred has unreachable predecessors and no Defs, incoming access
1909 // should be LoE; However, after an update, it may be any access.
1910 }
1911 }
1912 }
1913 }
1914 #endif
1915 }
1916
1917 /// Verify that all of the blocks we believe to have valid domination numbers
1918 /// actually have valid domination numbers.
verifyDominationNumbers(const Function & F) const1919 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1920 #ifndef NDEBUG
1921 if (BlockNumberingValid.empty())
1922 return;
1923
1924 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1925 for (const BasicBlock &BB : F) {
1926 if (!ValidBlocks.count(&BB))
1927 continue;
1928
1929 ValidBlocks.erase(&BB);
1930
1931 const AccessList *Accesses = getBlockAccesses(&BB);
1932 // It's correct to say an empty block has valid numbering.
1933 if (!Accesses)
1934 continue;
1935
1936 // Block numbering starts at 1.
1937 unsigned long LastNumber = 0;
1938 for (const MemoryAccess &MA : *Accesses) {
1939 auto ThisNumberIter = BlockNumbering.find(&MA);
1940 assert(ThisNumberIter != BlockNumbering.end() &&
1941 "MemoryAccess has no domination number in a valid block!");
1942
1943 unsigned long ThisNumber = ThisNumberIter->second;
1944 assert(ThisNumber > LastNumber &&
1945 "Domination numbers should be strictly increasing!");
1946 LastNumber = ThisNumber;
1947 }
1948 }
1949
1950 assert(ValidBlocks.empty() &&
1951 "All valid BasicBlocks should exist in F -- dangling pointers?");
1952 #endif
1953 }
1954
1955 /// Verify ordering: the order and existence of MemoryAccesses matches the
1956 /// order and existence of memory affecting instructions.
1957 /// Verify domination: each definition dominates all of its uses.
1958 /// Verify def-uses: the immediate use information - walk all the memory
1959 /// accesses and verifying that, for each use, it appears in the appropriate
1960 /// def's use list
verifyOrderingDominationAndDefUses(Function & F) const1961 void MemorySSA::verifyOrderingDominationAndDefUses(Function &F) const {
1962 #if !defined(NDEBUG)
1963 // Walk all the blocks, comparing what the lookups think and what the access
1964 // lists think, as well as the order in the blocks vs the order in the access
1965 // lists.
1966 SmallVector<MemoryAccess *, 32> ActualAccesses;
1967 SmallVector<MemoryAccess *, 32> ActualDefs;
1968 for (BasicBlock &B : F) {
1969 const AccessList *AL = getBlockAccesses(&B);
1970 const auto *DL = getBlockDefs(&B);
1971 MemoryPhi *Phi = getMemoryAccess(&B);
1972 if (Phi) {
1973 // Verify ordering.
1974 ActualAccesses.push_back(Phi);
1975 ActualDefs.push_back(Phi);
1976 // Verify domination
1977 for (const Use &U : Phi->uses())
1978 assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
1979 #if defined(EXPENSIVE_CHECKS)
1980 // Verify def-uses.
1981 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1982 pred_begin(&B), pred_end(&B))) &&
1983 "Incomplete MemoryPhi Node");
1984 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1985 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1986 assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&
1987 "Incoming phi block not a block predecessor");
1988 }
1989 #endif
1990 }
1991
1992 for (Instruction &I : B) {
1993 MemoryUseOrDef *MA = getMemoryAccess(&I);
1994 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1995 "We have memory affecting instructions "
1996 "in this block but they are not in the "
1997 "access list or defs list");
1998 if (MA) {
1999 // Verify ordering.
2000 ActualAccesses.push_back(MA);
2001 if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
2002 // Verify ordering.
2003 ActualDefs.push_back(MA);
2004 // Verify domination.
2005 for (const Use &U : MD->uses())
2006 assert(dominates(MD, U) &&
2007 "Memory Def does not dominate it's uses");
2008 }
2009 #if defined(EXPENSIVE_CHECKS)
2010 // Verify def-uses.
2011 verifyUseInDefs(MA->getDefiningAccess(), MA);
2012 #endif
2013 }
2014 }
2015 // Either we hit the assert, really have no accesses, or we have both
2016 // accesses and an access list. Same with defs.
2017 if (!AL && !DL)
2018 continue;
2019 // Verify ordering.
2020 assert(AL->size() == ActualAccesses.size() &&
2021 "We don't have the same number of accesses in the block as on the "
2022 "access list");
2023 assert((DL || ActualDefs.size() == 0) &&
2024 "Either we should have a defs list, or we should have no defs");
2025 assert((!DL || DL->size() == ActualDefs.size()) &&
2026 "We don't have the same number of defs in the block as on the "
2027 "def list");
2028 auto ALI = AL->begin();
2029 auto AAI = ActualAccesses.begin();
2030 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2031 assert(&*ALI == *AAI && "Not the same accesses in the same order");
2032 ++ALI;
2033 ++AAI;
2034 }
2035 ActualAccesses.clear();
2036 if (DL) {
2037 auto DLI = DL->begin();
2038 auto ADI = ActualDefs.begin();
2039 while (DLI != DL->end() && ADI != ActualDefs.end()) {
2040 assert(&*DLI == *ADI && "Not the same defs in the same order");
2041 ++DLI;
2042 ++ADI;
2043 }
2044 }
2045 ActualDefs.clear();
2046 }
2047 #endif
2048 }
2049
2050 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2051 /// appears in the use list of \p Def.
verifyUseInDefs(MemoryAccess * Def,MemoryAccess * Use) const2052 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2053 #ifndef NDEBUG
2054 // The live on entry use may cause us to get a NULL def here
2055 if (!Def)
2056 assert(isLiveOnEntryDef(Use) &&
2057 "Null def but use not point to live on entry def");
2058 else
2059 assert(is_contained(Def->users(), Use) &&
2060 "Did not find use in def's use list");
2061 #endif
2062 }
2063
2064 /// Perform a local numbering on blocks so that instruction ordering can be
2065 /// determined in constant time.
2066 /// TODO: We currently just number in order. If we numbered by N, we could
2067 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2068 /// log2(N) sequences of mixed before and after) without needing to invalidate
2069 /// the numbering.
renumberBlock(const BasicBlock * B) const2070 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2071 // The pre-increment ensures the numbers really start at 1.
2072 unsigned long CurrentNumber = 0;
2073 const AccessList *AL = getBlockAccesses(B);
2074 assert(AL != nullptr && "Asking to renumber an empty block");
2075 for (const auto &I : *AL)
2076 BlockNumbering[&I] = ++CurrentNumber;
2077 BlockNumberingValid.insert(B);
2078 }
2079
2080 /// Determine, for two memory accesses in the same block,
2081 /// whether \p Dominator dominates \p Dominatee.
2082 /// \returns True if \p Dominator dominates \p Dominatee.
locallyDominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const2083 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2084 const MemoryAccess *Dominatee) const {
2085 const BasicBlock *DominatorBlock = Dominator->getBlock();
2086
2087 assert((DominatorBlock == Dominatee->getBlock()) &&
2088 "Asking for local domination when accesses are in different blocks!");
2089 // A node dominates itself.
2090 if (Dominatee == Dominator)
2091 return true;
2092
2093 // When Dominatee is defined on function entry, it is not dominated by another
2094 // memory access.
2095 if (isLiveOnEntryDef(Dominatee))
2096 return false;
2097
2098 // When Dominator is defined on function entry, it dominates the other memory
2099 // access.
2100 if (isLiveOnEntryDef(Dominator))
2101 return true;
2102
2103 if (!BlockNumberingValid.count(DominatorBlock))
2104 renumberBlock(DominatorBlock);
2105
2106 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2107 // All numbers start with 1
2108 assert(DominatorNum != 0 && "Block was not numbered properly");
2109 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2110 assert(DominateeNum != 0 && "Block was not numbered properly");
2111 return DominatorNum < DominateeNum;
2112 }
2113
dominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const2114 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2115 const MemoryAccess *Dominatee) const {
2116 if (Dominator == Dominatee)
2117 return true;
2118
2119 if (isLiveOnEntryDef(Dominatee))
2120 return false;
2121
2122 if (Dominator->getBlock() != Dominatee->getBlock())
2123 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2124 return locallyDominates(Dominator, Dominatee);
2125 }
2126
dominates(const MemoryAccess * Dominator,const Use & Dominatee) const2127 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2128 const Use &Dominatee) const {
2129 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2130 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2131 // The def must dominate the incoming block of the phi.
2132 if (UseBB != Dominator->getBlock())
2133 return DT->dominates(Dominator->getBlock(), UseBB);
2134 // If the UseBB and the DefBB are the same, compare locally.
2135 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2136 }
2137 // If it's not a PHI node use, the normal dominates can already handle it.
2138 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2139 }
2140
2141 const static char LiveOnEntryStr[] = "liveOnEntry";
2142
print(raw_ostream & OS) const2143 void MemoryAccess::print(raw_ostream &OS) const {
2144 switch (getValueID()) {
2145 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2146 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2147 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2148 }
2149 llvm_unreachable("invalid value id");
2150 }
2151
print(raw_ostream & OS) const2152 void MemoryDef::print(raw_ostream &OS) const {
2153 MemoryAccess *UO = getDefiningAccess();
2154
2155 auto printID = [&OS](MemoryAccess *A) {
2156 if (A && A->getID())
2157 OS << A->getID();
2158 else
2159 OS << LiveOnEntryStr;
2160 };
2161
2162 OS << getID() << " = MemoryDef(";
2163 printID(UO);
2164 OS << ")";
2165
2166 if (isOptimized()) {
2167 OS << "->";
2168 printID(getOptimized());
2169
2170 if (Optional<AliasResult> AR = getOptimizedAccessType())
2171 OS << " " << *AR;
2172 }
2173 }
2174
print(raw_ostream & OS) const2175 void MemoryPhi::print(raw_ostream &OS) const {
2176 ListSeparator LS(",");
2177 OS << getID() << " = MemoryPhi(";
2178 for (const auto &Op : operands()) {
2179 BasicBlock *BB = getIncomingBlock(Op);
2180 MemoryAccess *MA = cast<MemoryAccess>(Op);
2181
2182 OS << LS << '{';
2183 if (BB->hasName())
2184 OS << BB->getName();
2185 else
2186 BB->printAsOperand(OS, false);
2187 OS << ',';
2188 if (unsigned ID = MA->getID())
2189 OS << ID;
2190 else
2191 OS << LiveOnEntryStr;
2192 OS << '}';
2193 }
2194 OS << ')';
2195 }
2196
print(raw_ostream & OS) const2197 void MemoryUse::print(raw_ostream &OS) const {
2198 MemoryAccess *UO = getDefiningAccess();
2199 OS << "MemoryUse(";
2200 if (UO && UO->getID())
2201 OS << UO->getID();
2202 else
2203 OS << LiveOnEntryStr;
2204 OS << ')';
2205
2206 if (Optional<AliasResult> AR = getOptimizedAccessType())
2207 OS << " " << *AR;
2208 }
2209
dump() const2210 void MemoryAccess::dump() const {
2211 // Cannot completely remove virtual function even in release mode.
2212 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2213 print(dbgs());
2214 dbgs() << "\n";
2215 #endif
2216 }
2217
2218 char MemorySSAPrinterLegacyPass::ID = 0;
2219
MemorySSAPrinterLegacyPass()2220 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2221 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2222 }
2223
getAnalysisUsage(AnalysisUsage & AU) const2224 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2225 AU.setPreservesAll();
2226 AU.addRequired<MemorySSAWrapperPass>();
2227 }
2228
2229 class DOTFuncMSSAInfo {
2230 private:
2231 const Function &F;
2232 MemorySSAAnnotatedWriter MSSAWriter;
2233
2234 public:
DOTFuncMSSAInfo(const Function & F,MemorySSA & MSSA)2235 DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
2236 : F(F), MSSAWriter(&MSSA) {}
2237
getFunction()2238 const Function *getFunction() { return &F; }
getWriter()2239 MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }
2240 };
2241
2242 namespace llvm {
2243
2244 template <>
2245 struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> {
getEntryNodellvm::GraphTraits2246 static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) {
2247 return &(CFGInfo->getFunction()->getEntryBlock());
2248 }
2249
2250 // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2251 using nodes_iterator = pointer_iterator<Function::const_iterator>;
2252
nodes_beginllvm::GraphTraits2253 static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) {
2254 return nodes_iterator(CFGInfo->getFunction()->begin());
2255 }
2256
nodes_endllvm::GraphTraits2257 static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) {
2258 return nodes_iterator(CFGInfo->getFunction()->end());
2259 }
2260
sizellvm::GraphTraits2261 static size_t size(DOTFuncMSSAInfo *CFGInfo) {
2262 return CFGInfo->getFunction()->size();
2263 }
2264 };
2265
2266 template <>
2267 struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits {
2268
DOTGraphTraitsllvm::DOTGraphTraits2269 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
2270
getGraphNamellvm::DOTGraphTraits2271 static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) {
2272 return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
2273 "' function";
2274 }
2275
getNodeLabelllvm::DOTGraphTraits2276 std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) {
2277 return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(
2278 Node, nullptr,
2279 [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void {
2280 BB.print(OS, &CFGInfo->getWriter(), true, true);
2281 },
2282 [](std::string &S, unsigned &I, unsigned Idx) -> void {
2283 std::string Str = S.substr(I, Idx - I);
2284 StringRef SR = Str;
2285 if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||
2286 SR.count("MemoryUse("))
2287 return;
2288 DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx);
2289 });
2290 }
2291
getEdgeSourceLabelllvm::DOTGraphTraits2292 static std::string getEdgeSourceLabel(const BasicBlock *Node,
2293 const_succ_iterator I) {
2294 return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I);
2295 }
2296
2297 /// Display the raw branch weights from PGO.
getEdgeAttributesllvm::DOTGraphTraits2298 std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I,
2299 DOTFuncMSSAInfo *CFGInfo) {
2300 return "";
2301 }
2302
getNodeAttributesllvm::DOTGraphTraits2303 std::string getNodeAttributes(const BasicBlock *Node,
2304 DOTFuncMSSAInfo *CFGInfo) {
2305 return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos
2306 ? "style=filled, fillcolor=lightpink"
2307 : "";
2308 }
2309 };
2310
2311 } // namespace llvm
2312
runOnFunction(Function & F)2313 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2314 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2315 if (DotCFGMSSA != "") {
2316 DOTFuncMSSAInfo CFGInfo(F, MSSA);
2317 WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2318 } else
2319 MSSA.print(dbgs());
2320
2321 if (VerifyMemorySSA)
2322 MSSA.verifyMemorySSA();
2323 return false;
2324 }
2325
2326 AnalysisKey MemorySSAAnalysis::Key;
2327
run(Function & F,FunctionAnalysisManager & AM)2328 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2329 FunctionAnalysisManager &AM) {
2330 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2331 auto &AA = AM.getResult<AAManager>(F);
2332 return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2333 }
2334
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)2335 bool MemorySSAAnalysis::Result::invalidate(
2336 Function &F, const PreservedAnalyses &PA,
2337 FunctionAnalysisManager::Invalidator &Inv) {
2338 auto PAC = PA.getChecker<MemorySSAAnalysis>();
2339 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2340 Inv.invalidate<AAManager>(F, PA) ||
2341 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2342 }
2343
run(Function & F,FunctionAnalysisManager & AM)2344 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2345 FunctionAnalysisManager &AM) {
2346 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2347 if (DotCFGMSSA != "") {
2348 DOTFuncMSSAInfo CFGInfo(F, MSSA);
2349 WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2350 } else {
2351 OS << "MemorySSA for function: " << F.getName() << "\n";
2352 MSSA.print(OS);
2353 }
2354
2355 return PreservedAnalyses::all();
2356 }
2357
run(Function & F,FunctionAnalysisManager & AM)2358 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2359 FunctionAnalysisManager &AM) {
2360 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2361
2362 return PreservedAnalyses::all();
2363 }
2364
2365 char MemorySSAWrapperPass::ID = 0;
2366
MemorySSAWrapperPass()2367 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2368 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2369 }
2370
releaseMemory()2371 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2372
getAnalysisUsage(AnalysisUsage & AU) const2373 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2374 AU.setPreservesAll();
2375 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2376 AU.addRequiredTransitive<AAResultsWrapperPass>();
2377 }
2378
runOnFunction(Function & F)2379 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2380 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2381 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2382 MSSA.reset(new MemorySSA(F, &AA, &DT));
2383 return false;
2384 }
2385
verifyAnalysis() const2386 void MemorySSAWrapperPass::verifyAnalysis() const {
2387 if (VerifyMemorySSA)
2388 MSSA->verifyMemorySSA();
2389 }
2390
print(raw_ostream & OS,const Module * M) const2391 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2392 MSSA->print(OS);
2393 }
2394
MemorySSAWalker(MemorySSA * M)2395 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2396
2397 /// Walk the use-def chains starting at \p StartingAccess and find
2398 /// the MemoryAccess that actually clobbers Loc.
2399 ///
2400 /// \returns our clobbering memory access
2401 template <typename AliasAnalysisType>
2402 MemoryAccess *
getClobberingMemoryAccessBase(MemoryAccess * StartingAccess,const MemoryLocation & Loc,unsigned & UpwardWalkLimit)2403 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2404 MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2405 unsigned &UpwardWalkLimit) {
2406 assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
2407
2408 Instruction *I = nullptr;
2409 if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2410 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2411 return StartingUseOrDef;
2412
2413 I = StartingUseOrDef->getMemoryInst();
2414
2415 // Conservatively, fences are always clobbers, so don't perform the walk if
2416 // we hit a fence.
2417 if (!isa<CallBase>(I) && I->isFenceLike())
2418 return StartingUseOrDef;
2419 }
2420
2421 UpwardsMemoryQuery Q;
2422 Q.OriginalAccess = StartingAccess;
2423 Q.StartingLoc = Loc;
2424 Q.Inst = nullptr;
2425 Q.IsCall = false;
2426
2427 // Unlike the other function, do not walk to the def of a def, because we are
2428 // handed something we already believe is the clobbering access.
2429 // We never set SkipSelf to true in Q in this method.
2430 MemoryAccess *Clobber =
2431 Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
2432 LLVM_DEBUG({
2433 dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
2434 if (I)
2435 dbgs() << " for instruction " << *I << "\n";
2436 dbgs() << " is " << *Clobber << "\n";
2437 });
2438 return Clobber;
2439 }
2440
2441 template <typename AliasAnalysisType>
2442 MemoryAccess *
getClobberingMemoryAccessBase(MemoryAccess * MA,unsigned & UpwardWalkLimit,bool SkipSelf)2443 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2444 MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
2445 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2446 // If this is a MemoryPhi, we can't do anything.
2447 if (!StartingAccess)
2448 return MA;
2449
2450 bool IsOptimized = false;
2451
2452 // If this is an already optimized use or def, return the optimized result.
2453 // Note: Currently, we store the optimized def result in a separate field,
2454 // since we can't use the defining access.
2455 if (StartingAccess->isOptimized()) {
2456 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2457 return StartingAccess->getOptimized();
2458 IsOptimized = true;
2459 }
2460
2461 const Instruction *I = StartingAccess->getMemoryInst();
2462 // We can't sanely do anything with a fence, since they conservatively clobber
2463 // all memory, and have no locations to get pointers from to try to
2464 // disambiguate.
2465 if (!isa<CallBase>(I) && I->isFenceLike())
2466 return StartingAccess;
2467
2468 UpwardsMemoryQuery Q(I, StartingAccess);
2469
2470 if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2471 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2472 StartingAccess->setOptimized(LiveOnEntry);
2473 StartingAccess->setOptimizedAccessType(None);
2474 return LiveOnEntry;
2475 }
2476
2477 MemoryAccess *OptimizedAccess;
2478 if (!IsOptimized) {
2479 // Start with the thing we already think clobbers this location
2480 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2481
2482 // At this point, DefiningAccess may be the live on entry def.
2483 // If it is, we will not get a better result.
2484 if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2485 StartingAccess->setOptimized(DefiningAccess);
2486 StartingAccess->setOptimizedAccessType(None);
2487 return DefiningAccess;
2488 }
2489
2490 OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2491 StartingAccess->setOptimized(OptimizedAccess);
2492 if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2493 StartingAccess->setOptimizedAccessType(None);
2494 else if (Q.AR && *Q.AR == AliasResult::MustAlias)
2495 StartingAccess->setOptimizedAccessType(
2496 AliasResult(AliasResult::MustAlias));
2497 } else
2498 OptimizedAccess = StartingAccess->getOptimized();
2499
2500 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2501 LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2502 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2503 LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2504
2505 MemoryAccess *Result;
2506 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2507 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2508 assert(isa<MemoryDef>(Q.OriginalAccess));
2509 Q.SkipSelfAccess = true;
2510 Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2511 } else
2512 Result = OptimizedAccess;
2513
2514 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2515 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2516
2517 return Result;
2518 }
2519
2520 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA)2521 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2522 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2523 return Use->getDefiningAccess();
2524 return MA;
2525 }
2526
getClobberingMemoryAccess(MemoryAccess * StartingAccess,const MemoryLocation &)2527 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2528 MemoryAccess *StartingAccess, const MemoryLocation &) {
2529 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2530 return Use->getDefiningAccess();
2531 return StartingAccess;
2532 }
2533
deleteMe(DerivedUser * Self)2534 void MemoryPhi::deleteMe(DerivedUser *Self) {
2535 delete static_cast<MemoryPhi *>(Self);
2536 }
2537
deleteMe(DerivedUser * Self)2538 void MemoryDef::deleteMe(DerivedUser *Self) {
2539 delete static_cast<MemoryDef *>(Self);
2540 }
2541
deleteMe(DerivedUser * Self)2542 void MemoryUse::deleteMe(DerivedUser *Self) {
2543 delete static_cast<MemoryUse *>(Self);
2544 }
2545
IsGuaranteedLoopInvariant(Value * Ptr) const2546 bool upward_defs_iterator::IsGuaranteedLoopInvariant(Value *Ptr) const {
2547 auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) {
2548 Ptr = Ptr->stripPointerCasts();
2549 if (!isa<Instruction>(Ptr))
2550 return true;
2551 return isa<AllocaInst>(Ptr);
2552 };
2553
2554 Ptr = Ptr->stripPointerCasts();
2555 if (auto *I = dyn_cast<Instruction>(Ptr)) {
2556 if (I->getParent()->isEntryBlock())
2557 return true;
2558 }
2559 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
2560 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
2561 GEP->hasAllConstantIndices();
2562 }
2563 return IsGuaranteedLoopInvariantBase(Ptr);
2564 }
2565