1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringExtras.h"
23 #include "llvm/ADT/iterator.h"
24 #include "llvm/ADT/iterator_range.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/CFGPrinter.h"
27 #include "llvm/Analysis/IteratedDominanceFrontier.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/AssemblyAnnotationWriter.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/LLVMContext.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/IR/Use.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/AtomicOrdering.h"
44 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/FormattedStream.h"
50 #include "llvm/Support/GraphWriter.h"
51 #include "llvm/Support/raw_ostream.h"
52 #include <algorithm>
53 #include <cassert>
54 #include <iterator>
55 #include <memory>
56 #include <utility>
57
58 using namespace llvm;
59
60 #define DEBUG_TYPE "memoryssa"
61
62 static cl::opt<std::string>
63 DotCFGMSSA("dot-cfg-mssa",
64 cl::value_desc("file name for generated dot file"),
65 cl::desc("file name for generated dot file"), cl::init(""));
66
67 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
68 true)
69 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
70 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
71 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
72 true)
73
74 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
75 "Memory SSA Printer", false, false)
76 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
77 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
78 "Memory SSA Printer", false, false)
79
80 static cl::opt<unsigned> MaxCheckLimit(
81 "memssa-check-limit", cl::Hidden, cl::init(100),
82 cl::desc("The maximum number of stores/phis MemorySSA"
83 "will consider trying to walk past (default = 100)"));
84
85 // Always verify MemorySSA if expensive checking is enabled.
86 #ifdef EXPENSIVE_CHECKS
87 bool llvm::VerifyMemorySSA = true;
88 #else
89 bool llvm::VerifyMemorySSA = false;
90 #endif
91
92 static cl::opt<bool, true>
93 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
94 cl::Hidden, cl::desc("Enable verification of MemorySSA."));
95
96 const static char LiveOnEntryStr[] = "liveOnEntry";
97
98 namespace {
99
100 /// An assembly annotator class to print Memory SSA information in
101 /// comments.
102 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
103 const MemorySSA *MSSA;
104
105 public:
MemorySSAAnnotatedWriter(const MemorySSA * M)106 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
107
emitBasicBlockStartAnnot(const BasicBlock * BB,formatted_raw_ostream & OS)108 void emitBasicBlockStartAnnot(const BasicBlock *BB,
109 formatted_raw_ostream &OS) override {
110 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
111 OS << "; " << *MA << "\n";
112 }
113
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)114 void emitInstructionAnnot(const Instruction *I,
115 formatted_raw_ostream &OS) override {
116 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
117 OS << "; " << *MA << "\n";
118 }
119 };
120
121 /// An assembly annotator class to print Memory SSA information in
122 /// comments.
123 class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter {
124 MemorySSA *MSSA;
125 MemorySSAWalker *Walker;
126 BatchAAResults BAA;
127
128 public:
MemorySSAWalkerAnnotatedWriter(MemorySSA * M)129 MemorySSAWalkerAnnotatedWriter(MemorySSA *M)
130 : MSSA(M), Walker(M->getWalker()), BAA(M->getAA()) {}
131
emitBasicBlockStartAnnot(const BasicBlock * BB,formatted_raw_ostream & OS)132 void emitBasicBlockStartAnnot(const BasicBlock *BB,
133 formatted_raw_ostream &OS) override {
134 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
135 OS << "; " << *MA << "\n";
136 }
137
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)138 void emitInstructionAnnot(const Instruction *I,
139 formatted_raw_ostream &OS) override {
140 if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) {
141 MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA, BAA);
142 OS << "; " << *MA;
143 if (Clobber) {
144 OS << " - clobbered by ";
145 if (MSSA->isLiveOnEntryDef(Clobber))
146 OS << LiveOnEntryStr;
147 else
148 OS << *Clobber;
149 }
150 OS << "\n";
151 }
152 }
153 };
154
155 } // namespace
156
157 namespace {
158
159 /// Our current alias analysis API differentiates heavily between calls and
160 /// non-calls, and functions called on one usually assert on the other.
161 /// This class encapsulates the distinction to simplify other code that wants
162 /// "Memory affecting instructions and related data" to use as a key.
163 /// For example, this class is used as a densemap key in the use optimizer.
164 class MemoryLocOrCall {
165 public:
166 bool IsCall = false;
167
MemoryLocOrCall(MemoryUseOrDef * MUD)168 MemoryLocOrCall(MemoryUseOrDef *MUD)
169 : MemoryLocOrCall(MUD->getMemoryInst()) {}
MemoryLocOrCall(const MemoryUseOrDef * MUD)170 MemoryLocOrCall(const MemoryUseOrDef *MUD)
171 : MemoryLocOrCall(MUD->getMemoryInst()) {}
172
MemoryLocOrCall(Instruction * Inst)173 MemoryLocOrCall(Instruction *Inst) {
174 if (auto *C = dyn_cast<CallBase>(Inst)) {
175 IsCall = true;
176 Call = C;
177 } else {
178 IsCall = false;
179 // There is no such thing as a memorylocation for a fence inst, and it is
180 // unique in that regard.
181 if (!isa<FenceInst>(Inst))
182 Loc = MemoryLocation::get(Inst);
183 }
184 }
185
MemoryLocOrCall(const MemoryLocation & Loc)186 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
187
getCall() const188 const CallBase *getCall() const {
189 assert(IsCall);
190 return Call;
191 }
192
getLoc() const193 MemoryLocation getLoc() const {
194 assert(!IsCall);
195 return Loc;
196 }
197
operator ==(const MemoryLocOrCall & Other) const198 bool operator==(const MemoryLocOrCall &Other) const {
199 if (IsCall != Other.IsCall)
200 return false;
201
202 if (!IsCall)
203 return Loc == Other.Loc;
204
205 if (Call->getCalledOperand() != Other.Call->getCalledOperand())
206 return false;
207
208 return Call->arg_size() == Other.Call->arg_size() &&
209 std::equal(Call->arg_begin(), Call->arg_end(),
210 Other.Call->arg_begin());
211 }
212
213 private:
214 union {
215 const CallBase *Call;
216 MemoryLocation Loc;
217 };
218 };
219
220 } // end anonymous namespace
221
222 namespace llvm {
223
224 template <> struct DenseMapInfo<MemoryLocOrCall> {
getEmptyKeyllvm::DenseMapInfo225 static inline MemoryLocOrCall getEmptyKey() {
226 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
227 }
228
getTombstoneKeyllvm::DenseMapInfo229 static inline MemoryLocOrCall getTombstoneKey() {
230 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
231 }
232
getHashValuellvm::DenseMapInfo233 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
234 if (!MLOC.IsCall)
235 return hash_combine(
236 MLOC.IsCall,
237 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
238
239 hash_code hash =
240 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
241 MLOC.getCall()->getCalledOperand()));
242
243 for (const Value *Arg : MLOC.getCall()->args())
244 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
245 return hash;
246 }
247
isEqualllvm::DenseMapInfo248 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
249 return LHS == RHS;
250 }
251 };
252
253 } // end namespace llvm
254
255 /// This does one-way checks to see if Use could theoretically be hoisted above
256 /// MayClobber. This will not check the other way around.
257 ///
258 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
259 /// MayClobber, with no potentially clobbering operations in between them.
260 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
areLoadsReorderable(const LoadInst * Use,const LoadInst * MayClobber)261 static bool areLoadsReorderable(const LoadInst *Use,
262 const LoadInst *MayClobber) {
263 bool VolatileUse = Use->isVolatile();
264 bool VolatileClobber = MayClobber->isVolatile();
265 // Volatile operations may never be reordered with other volatile operations.
266 if (VolatileUse && VolatileClobber)
267 return false;
268 // Otherwise, volatile doesn't matter here. From the language reference:
269 // 'optimizers may change the order of volatile operations relative to
270 // non-volatile operations.'"
271
272 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
273 // is weaker, it can be moved above other loads. We just need to be sure that
274 // MayClobber isn't an acquire load, because loads can't be moved above
275 // acquire loads.
276 //
277 // Note that this explicitly *does* allow the free reordering of monotonic (or
278 // weaker) loads of the same address.
279 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
280 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
281 AtomicOrdering::Acquire);
282 return !(SeqCstUse || MayClobberIsAcquire);
283 }
284
285 template <typename AliasAnalysisType>
286 static bool
instructionClobbersQuery(const MemoryDef * MD,const MemoryLocation & UseLoc,const Instruction * UseInst,AliasAnalysisType & AA)287 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
288 const Instruction *UseInst, AliasAnalysisType &AA) {
289 Instruction *DefInst = MD->getMemoryInst();
290 assert(DefInst && "Defining instruction not actually an instruction");
291
292 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
293 // These intrinsics will show up as affecting memory, but they are just
294 // markers, mostly.
295 //
296 // FIXME: We probably don't actually want MemorySSA to model these at all
297 // (including creating MemoryAccesses for them): we just end up inventing
298 // clobbers where they don't really exist at all. Please see D43269 for
299 // context.
300 switch (II->getIntrinsicID()) {
301 case Intrinsic::invariant_start:
302 case Intrinsic::invariant_end:
303 case Intrinsic::assume:
304 case Intrinsic::experimental_noalias_scope_decl:
305 case Intrinsic::pseudoprobe:
306 return false;
307 case Intrinsic::dbg_addr:
308 case Intrinsic::dbg_declare:
309 case Intrinsic::dbg_label:
310 case Intrinsic::dbg_value:
311 llvm_unreachable("debuginfo shouldn't have associated defs!");
312 default:
313 break;
314 }
315 }
316
317 if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
318 ModRefInfo I = AA.getModRefInfo(DefInst, CB);
319 return isModOrRefSet(I);
320 }
321
322 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
323 if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
324 return !areLoadsReorderable(UseLoad, DefLoad);
325
326 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
327 return isModSet(I);
328 }
329
330 template <typename AliasAnalysisType>
instructionClobbersQuery(MemoryDef * MD,const MemoryUseOrDef * MU,const MemoryLocOrCall & UseMLOC,AliasAnalysisType & AA)331 static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
332 const MemoryLocOrCall &UseMLOC,
333 AliasAnalysisType &AA) {
334 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
335 // to exist while MemoryLocOrCall is pushed through places.
336 if (UseMLOC.IsCall)
337 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
338 AA);
339 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
340 AA);
341 }
342
343 // Return true when MD may alias MU, return false otherwise.
defClobbersUseOrDef(MemoryDef * MD,const MemoryUseOrDef * MU,AliasAnalysis & AA)344 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
345 AliasAnalysis &AA) {
346 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
347 }
348
349 namespace {
350
351 struct UpwardsMemoryQuery {
352 // True if our original query started off as a call
353 bool IsCall = false;
354 // The pointer location we started the query with. This will be empty if
355 // IsCall is true.
356 MemoryLocation StartingLoc;
357 // This is the instruction we were querying about.
358 const Instruction *Inst = nullptr;
359 // The MemoryAccess we actually got called with, used to test local domination
360 const MemoryAccess *OriginalAccess = nullptr;
361 bool SkipSelfAccess = false;
362
363 UpwardsMemoryQuery() = default;
364
UpwardsMemoryQuery__anon871ccb700411::UpwardsMemoryQuery365 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
366 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
367 if (!IsCall)
368 StartingLoc = MemoryLocation::get(Inst);
369 }
370 };
371
372 } // end anonymous namespace
373
isUseTriviallyOptimizableToLiveOnEntry(BatchAAResults & AA,const Instruction * I)374 static bool isUseTriviallyOptimizableToLiveOnEntry(BatchAAResults &AA,
375 const Instruction *I) {
376 // If the memory can't be changed, then loads of the memory can't be
377 // clobbered.
378 if (auto *LI = dyn_cast<LoadInst>(I)) {
379 return I->hasMetadata(LLVMContext::MD_invariant_load) ||
380 !isModSet(AA.getModRefInfoMask(MemoryLocation::get(LI)));
381 }
382 return false;
383 }
384
385 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
386 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
387 ///
388 /// This is meant to be as simple and self-contained as possible. Because it
389 /// uses no cache, etc., it can be relatively expensive.
390 ///
391 /// \param Start The MemoryAccess that we want to walk from.
392 /// \param ClobberAt A clobber for Start.
393 /// \param StartLoc The MemoryLocation for Start.
394 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
395 /// \param Query The UpwardsMemoryQuery we used for our search.
396 /// \param AA The AliasAnalysis we used for our search.
397 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
398
399 LLVM_ATTRIBUTE_UNUSED static void
checkClobberSanity(const MemoryAccess * Start,MemoryAccess * ClobberAt,const MemoryLocation & StartLoc,const MemorySSA & MSSA,const UpwardsMemoryQuery & Query,BatchAAResults & AA,bool AllowImpreciseClobber=false)400 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
401 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
402 const UpwardsMemoryQuery &Query, BatchAAResults &AA,
403 bool AllowImpreciseClobber = false) {
404 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
405
406 if (MSSA.isLiveOnEntryDef(Start)) {
407 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
408 "liveOnEntry must clobber itself");
409 return;
410 }
411
412 bool FoundClobber = false;
413 DenseSet<ConstMemoryAccessPair> VisitedPhis;
414 SmallVector<ConstMemoryAccessPair, 8> Worklist;
415 Worklist.emplace_back(Start, StartLoc);
416 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
417 // is found, complain.
418 while (!Worklist.empty()) {
419 auto MAP = Worklist.pop_back_val();
420 // All we care about is that nothing from Start to ClobberAt clobbers Start.
421 // We learn nothing from revisiting nodes.
422 if (!VisitedPhis.insert(MAP).second)
423 continue;
424
425 for (const auto *MA : def_chain(MAP.first)) {
426 if (MA == ClobberAt) {
427 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
428 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
429 // since it won't let us short-circuit.
430 //
431 // Also, note that this can't be hoisted out of the `Worklist` loop,
432 // since MD may only act as a clobber for 1 of N MemoryLocations.
433 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
434 if (!FoundClobber) {
435 if (instructionClobbersQuery(MD, MAP.second, Query.Inst, AA))
436 FoundClobber = true;
437 }
438 }
439 break;
440 }
441
442 // We should never hit liveOnEntry, unless it's the clobber.
443 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
444
445 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
446 // If Start is a Def, skip self.
447 if (MD == Start)
448 continue;
449
450 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&
451 "Found clobber before reaching ClobberAt!");
452 continue;
453 }
454
455 if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
456 (void)MU;
457 assert (MU == Start &&
458 "Can only find use in def chain if Start is a use");
459 continue;
460 }
461
462 assert(isa<MemoryPhi>(MA));
463
464 // Add reachable phi predecessors
465 for (auto ItB = upward_defs_begin(
466 {const_cast<MemoryAccess *>(MA), MAP.second},
467 MSSA.getDomTree()),
468 ItE = upward_defs_end();
469 ItB != ItE; ++ItB)
470 if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
471 Worklist.emplace_back(*ItB);
472 }
473 }
474
475 // If the verify is done following an optimization, it's possible that
476 // ClobberAt was a conservative clobbering, that we can now infer is not a
477 // true clobbering access. Don't fail the verify if that's the case.
478 // We do have accesses that claim they're optimized, but could be optimized
479 // further. Updating all these can be expensive, so allow it for now (FIXME).
480 if (AllowImpreciseClobber)
481 return;
482
483 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
484 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
485 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
486 "ClobberAt never acted as a clobber");
487 }
488
489 namespace {
490
491 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
492 /// in one class.
493 class ClobberWalker {
494 /// Save a few bytes by using unsigned instead of size_t.
495 using ListIndex = unsigned;
496
497 /// Represents a span of contiguous MemoryDefs, potentially ending in a
498 /// MemoryPhi.
499 struct DefPath {
500 MemoryLocation Loc;
501 // Note that, because we always walk in reverse, Last will always dominate
502 // First. Also note that First and Last are inclusive.
503 MemoryAccess *First;
504 MemoryAccess *Last;
505 std::optional<ListIndex> Previous;
506
DefPath__anon871ccb700511::ClobberWalker::DefPath507 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
508 std::optional<ListIndex> Previous)
509 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
510
DefPath__anon871ccb700511::ClobberWalker::DefPath511 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
512 std::optional<ListIndex> Previous)
513 : DefPath(Loc, Init, Init, Previous) {}
514 };
515
516 const MemorySSA &MSSA;
517 DominatorTree &DT;
518 BatchAAResults *AA;
519 UpwardsMemoryQuery *Query;
520 unsigned *UpwardWalkLimit;
521
522 // Phi optimization bookkeeping:
523 // List of DefPath to process during the current phi optimization walk.
524 SmallVector<DefPath, 32> Paths;
525 // List of visited <Access, Location> pairs; we can skip paths already
526 // visited with the same memory location.
527 DenseSet<ConstMemoryAccessPair> VisitedPhis;
528
529 /// Find the nearest def or phi that `From` can legally be optimized to.
getWalkTarget(const MemoryPhi * From) const530 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
531 assert(From->getNumOperands() && "Phi with no operands?");
532
533 BasicBlock *BB = From->getBlock();
534 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
535 DomTreeNode *Node = DT.getNode(BB);
536 while ((Node = Node->getIDom())) {
537 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
538 if (Defs)
539 return &*Defs->rbegin();
540 }
541 return Result;
542 }
543
544 /// Result of calling walkToPhiOrClobber.
545 struct UpwardsWalkResult {
546 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
547 /// both. Include alias info when clobber found.
548 MemoryAccess *Result;
549 bool IsKnownClobber;
550 };
551
552 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
553 /// This will update Desc.Last as it walks. It will (optionally) also stop at
554 /// StopAt.
555 ///
556 /// This does not test for whether StopAt is a clobber
557 UpwardsWalkResult
walkToPhiOrClobber(DefPath & Desc,const MemoryAccess * StopAt=nullptr,const MemoryAccess * SkipStopAt=nullptr) const558 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
559 const MemoryAccess *SkipStopAt = nullptr) const {
560 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
561 assert(UpwardWalkLimit && "Need a valid walk limit");
562 bool LimitAlreadyReached = false;
563 // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
564 // it to 1. This will not do any alias() calls. It either returns in the
565 // first iteration in the loop below, or is set back to 0 if all def chains
566 // are free of MemoryDefs.
567 if (!*UpwardWalkLimit) {
568 *UpwardWalkLimit = 1;
569 LimitAlreadyReached = true;
570 }
571
572 for (MemoryAccess *Current : def_chain(Desc.Last)) {
573 Desc.Last = Current;
574 if (Current == StopAt || Current == SkipStopAt)
575 return {Current, false};
576
577 if (auto *MD = dyn_cast<MemoryDef>(Current)) {
578 if (MSSA.isLiveOnEntryDef(MD))
579 return {MD, true};
580
581 if (!--*UpwardWalkLimit)
582 return {Current, true};
583
584 if (instructionClobbersQuery(MD, Desc.Loc, Query->Inst, *AA))
585 return {MD, true};
586 }
587 }
588
589 if (LimitAlreadyReached)
590 *UpwardWalkLimit = 0;
591
592 assert(isa<MemoryPhi>(Desc.Last) &&
593 "Ended at a non-clobber that's not a phi?");
594 return {Desc.Last, false};
595 }
596
addSearches(MemoryPhi * Phi,SmallVectorImpl<ListIndex> & PausedSearches,ListIndex PriorNode)597 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
598 ListIndex PriorNode) {
599 auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT);
600 auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
601 for (const MemoryAccessPair &P : UpwardDefs) {
602 PausedSearches.push_back(Paths.size());
603 Paths.emplace_back(P.second, P.first, PriorNode);
604 }
605 }
606
607 /// Represents a search that terminated after finding a clobber. This clobber
608 /// may or may not be present in the path of defs from LastNode..SearchStart,
609 /// since it may have been retrieved from cache.
610 struct TerminatedPath {
611 MemoryAccess *Clobber;
612 ListIndex LastNode;
613 };
614
615 /// Get an access that keeps us from optimizing to the given phi.
616 ///
617 /// PausedSearches is an array of indices into the Paths array. Its incoming
618 /// value is the indices of searches that stopped at the last phi optimization
619 /// target. It's left in an unspecified state.
620 ///
621 /// If this returns std::nullopt, NewPaused is a vector of searches that
622 /// terminated at StopWhere. Otherwise, NewPaused is left in an unspecified
623 /// state.
624 std::optional<TerminatedPath>
getBlockingAccess(const MemoryAccess * StopWhere,SmallVectorImpl<ListIndex> & PausedSearches,SmallVectorImpl<ListIndex> & NewPaused,SmallVectorImpl<TerminatedPath> & Terminated)625 getBlockingAccess(const MemoryAccess *StopWhere,
626 SmallVectorImpl<ListIndex> &PausedSearches,
627 SmallVectorImpl<ListIndex> &NewPaused,
628 SmallVectorImpl<TerminatedPath> &Terminated) {
629 assert(!PausedSearches.empty() && "No searches to continue?");
630
631 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
632 // PausedSearches as our stack.
633 while (!PausedSearches.empty()) {
634 ListIndex PathIndex = PausedSearches.pop_back_val();
635 DefPath &Node = Paths[PathIndex];
636
637 // If we've already visited this path with this MemoryLocation, we don't
638 // need to do so again.
639 //
640 // NOTE: That we just drop these paths on the ground makes caching
641 // behavior sporadic. e.g. given a diamond:
642 // A
643 // B C
644 // D
645 //
646 // ...If we walk D, B, A, C, we'll only cache the result of phi
647 // optimization for A, B, and D; C will be skipped because it dies here.
648 // This arguably isn't the worst thing ever, since:
649 // - We generally query things in a top-down order, so if we got below D
650 // without needing cache entries for {C, MemLoc}, then chances are
651 // that those cache entries would end up ultimately unused.
652 // - We still cache things for A, so C only needs to walk up a bit.
653 // If this behavior becomes problematic, we can fix without a ton of extra
654 // work.
655 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
656 continue;
657
658 const MemoryAccess *SkipStopWhere = nullptr;
659 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
660 assert(isa<MemoryDef>(Query->OriginalAccess));
661 SkipStopWhere = Query->OriginalAccess;
662 }
663
664 UpwardsWalkResult Res = walkToPhiOrClobber(Node,
665 /*StopAt=*/StopWhere,
666 /*SkipStopAt=*/SkipStopWhere);
667 if (Res.IsKnownClobber) {
668 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
669
670 // If this wasn't a cache hit, we hit a clobber when walking. That's a
671 // failure.
672 TerminatedPath Term{Res.Result, PathIndex};
673 if (!MSSA.dominates(Res.Result, StopWhere))
674 return Term;
675
676 // Otherwise, it's a valid thing to potentially optimize to.
677 Terminated.push_back(Term);
678 continue;
679 }
680
681 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
682 // We've hit our target. Save this path off for if we want to continue
683 // walking. If we are in the mode of skipping the OriginalAccess, and
684 // we've reached back to the OriginalAccess, do not save path, we've
685 // just looped back to self.
686 if (Res.Result != SkipStopWhere)
687 NewPaused.push_back(PathIndex);
688 continue;
689 }
690
691 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
692 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
693 }
694
695 return std::nullopt;
696 }
697
698 template <typename T, typename Walker>
699 struct generic_def_path_iterator
700 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
701 std::forward_iterator_tag, T *> {
702 generic_def_path_iterator() = default;
generic_def_path_iterator__anon871ccb700511::ClobberWalker::generic_def_path_iterator703 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
704
operator *__anon871ccb700511::ClobberWalker::generic_def_path_iterator705 T &operator*() const { return curNode(); }
706
operator ++__anon871ccb700511::ClobberWalker::generic_def_path_iterator707 generic_def_path_iterator &operator++() {
708 N = curNode().Previous;
709 return *this;
710 }
711
operator ==__anon871ccb700511::ClobberWalker::generic_def_path_iterator712 bool operator==(const generic_def_path_iterator &O) const {
713 if (N.has_value() != O.N.has_value())
714 return false;
715 return !N || *N == *O.N;
716 }
717
718 private:
curNode__anon871ccb700511::ClobberWalker::generic_def_path_iterator719 T &curNode() const { return W->Paths[*N]; }
720
721 Walker *W = nullptr;
722 std::optional<ListIndex> N;
723 };
724
725 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
726 using const_def_path_iterator =
727 generic_def_path_iterator<const DefPath, const ClobberWalker>;
728
def_path(ListIndex From)729 iterator_range<def_path_iterator> def_path(ListIndex From) {
730 return make_range(def_path_iterator(this, From), def_path_iterator());
731 }
732
const_def_path(ListIndex From) const733 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
734 return make_range(const_def_path_iterator(this, From),
735 const_def_path_iterator());
736 }
737
738 struct OptznResult {
739 /// The path that contains our result.
740 TerminatedPath PrimaryClobber;
741 /// The paths that we can legally cache back from, but that aren't
742 /// necessarily the result of the Phi optimization.
743 SmallVector<TerminatedPath, 4> OtherClobbers;
744 };
745
defPathIndex(const DefPath & N) const746 ListIndex defPathIndex(const DefPath &N) const {
747 // The assert looks nicer if we don't need to do &N
748 const DefPath *NP = &N;
749 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
750 "Out of bounds DefPath!");
751 return NP - &Paths.front();
752 }
753
754 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
755 /// that act as legal clobbers. Note that this won't return *all* clobbers.
756 ///
757 /// Phi optimization algorithm tl;dr:
758 /// - Find the earliest def/phi, A, we can optimize to
759 /// - Find if all paths from the starting memory access ultimately reach A
760 /// - If not, optimization isn't possible.
761 /// - Otherwise, walk from A to another clobber or phi, A'.
762 /// - If A' is a def, we're done.
763 /// - If A' is a phi, try to optimize it.
764 ///
765 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
766 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
tryOptimizePhi(MemoryPhi * Phi,MemoryAccess * Start,const MemoryLocation & Loc)767 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
768 const MemoryLocation &Loc) {
769 assert(Paths.empty() && VisitedPhis.empty() &&
770 "Reset the optimization state.");
771
772 Paths.emplace_back(Loc, Start, Phi, std::nullopt);
773 // Stores how many "valid" optimization nodes we had prior to calling
774 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
775 auto PriorPathsSize = Paths.size();
776
777 SmallVector<ListIndex, 16> PausedSearches;
778 SmallVector<ListIndex, 8> NewPaused;
779 SmallVector<TerminatedPath, 4> TerminatedPaths;
780
781 addSearches(Phi, PausedSearches, 0);
782
783 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
784 // Paths.
785 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
786 assert(!Paths.empty() && "Need a path to move");
787 auto Dom = Paths.begin();
788 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
789 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
790 Dom = I;
791 auto Last = Paths.end() - 1;
792 if (Last != Dom)
793 std::iter_swap(Last, Dom);
794 };
795
796 MemoryPhi *Current = Phi;
797 while (true) {
798 assert(!MSSA.isLiveOnEntryDef(Current) &&
799 "liveOnEntry wasn't treated as a clobber?");
800
801 const auto *Target = getWalkTarget(Current);
802 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
803 // optimization for the prior phi.
804 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
805 return MSSA.dominates(P.Clobber, Target);
806 }));
807
808 // FIXME: This is broken, because the Blocker may be reported to be
809 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
810 // For the moment, this is fine, since we do nothing with blocker info.
811 if (std::optional<TerminatedPath> Blocker = getBlockingAccess(
812 Target, PausedSearches, NewPaused, TerminatedPaths)) {
813
814 // Find the node we started at. We can't search based on N->Last, since
815 // we may have gone around a loop with a different MemoryLocation.
816 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
817 return defPathIndex(N) < PriorPathsSize;
818 });
819 assert(Iter != def_path_iterator());
820
821 DefPath &CurNode = *Iter;
822 assert(CurNode.Last == Current);
823
824 // Two things:
825 // A. We can't reliably cache all of NewPaused back. Consider a case
826 // where we have two paths in NewPaused; one of which can't optimize
827 // above this phi, whereas the other can. If we cache the second path
828 // back, we'll end up with suboptimal cache entries. We can handle
829 // cases like this a bit better when we either try to find all
830 // clobbers that block phi optimization, or when our cache starts
831 // supporting unfinished searches.
832 // B. We can't reliably cache TerminatedPaths back here without doing
833 // extra checks; consider a case like:
834 // T
835 // / \
836 // D C
837 // \ /
838 // S
839 // Where T is our target, C is a node with a clobber on it, D is a
840 // diamond (with a clobber *only* on the left or right node, N), and
841 // S is our start. Say we walk to D, through the node opposite N
842 // (read: ignoring the clobber), and see a cache entry in the top
843 // node of D. That cache entry gets put into TerminatedPaths. We then
844 // walk up to C (N is later in our worklist), find the clobber, and
845 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
846 // the bottom part of D to the cached clobber, ignoring the clobber
847 // in N. Again, this problem goes away if we start tracking all
848 // blockers for a given phi optimization.
849 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
850 return {Result, {}};
851 }
852
853 // If there's nothing left to search, then all paths led to valid clobbers
854 // that we got from our cache; pick the nearest to the start, and allow
855 // the rest to be cached back.
856 if (NewPaused.empty()) {
857 MoveDominatedPathToEnd(TerminatedPaths);
858 TerminatedPath Result = TerminatedPaths.pop_back_val();
859 return {Result, std::move(TerminatedPaths)};
860 }
861
862 MemoryAccess *DefChainEnd = nullptr;
863 SmallVector<TerminatedPath, 4> Clobbers;
864 for (ListIndex Paused : NewPaused) {
865 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
866 if (WR.IsKnownClobber)
867 Clobbers.push_back({WR.Result, Paused});
868 else
869 // Micro-opt: If we hit the end of the chain, save it.
870 DefChainEnd = WR.Result;
871 }
872
873 if (!TerminatedPaths.empty()) {
874 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
875 // do it now.
876 if (!DefChainEnd)
877 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
878 DefChainEnd = MA;
879 assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
880
881 // If any of the terminated paths don't dominate the phi we'll try to
882 // optimize, we need to figure out what they are and quit.
883 const BasicBlock *ChainBB = DefChainEnd->getBlock();
884 for (const TerminatedPath &TP : TerminatedPaths) {
885 // Because we know that DefChainEnd is as "high" as we can go, we
886 // don't need local dominance checks; BB dominance is sufficient.
887 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
888 Clobbers.push_back(TP);
889 }
890 }
891
892 // If we have clobbers in the def chain, find the one closest to Current
893 // and quit.
894 if (!Clobbers.empty()) {
895 MoveDominatedPathToEnd(Clobbers);
896 TerminatedPath Result = Clobbers.pop_back_val();
897 return {Result, std::move(Clobbers)};
898 }
899
900 assert(all_of(NewPaused,
901 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
902
903 // Because liveOnEntry is a clobber, this must be a phi.
904 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
905
906 PriorPathsSize = Paths.size();
907 PausedSearches.clear();
908 for (ListIndex I : NewPaused)
909 addSearches(DefChainPhi, PausedSearches, I);
910 NewPaused.clear();
911
912 Current = DefChainPhi;
913 }
914 }
915
verifyOptResult(const OptznResult & R) const916 void verifyOptResult(const OptznResult &R) const {
917 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
918 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
919 }));
920 }
921
resetPhiOptznState()922 void resetPhiOptznState() {
923 Paths.clear();
924 VisitedPhis.clear();
925 }
926
927 public:
ClobberWalker(const MemorySSA & MSSA,DominatorTree & DT)928 ClobberWalker(const MemorySSA &MSSA, DominatorTree &DT)
929 : MSSA(MSSA), DT(DT) {}
930
931 /// Finds the nearest clobber for the given query, optimizing phis if
932 /// possible.
findClobber(BatchAAResults & BAA,MemoryAccess * Start,UpwardsMemoryQuery & Q,unsigned & UpWalkLimit)933 MemoryAccess *findClobber(BatchAAResults &BAA, MemoryAccess *Start,
934 UpwardsMemoryQuery &Q, unsigned &UpWalkLimit) {
935 AA = &BAA;
936 Query = &Q;
937 UpwardWalkLimit = &UpWalkLimit;
938 // Starting limit must be > 0.
939 if (!UpWalkLimit)
940 UpWalkLimit++;
941
942 MemoryAccess *Current = Start;
943 // This walker pretends uses don't exist. If we're handed one, silently grab
944 // its def. (This has the nice side-effect of ensuring we never cache uses)
945 if (auto *MU = dyn_cast<MemoryUse>(Start))
946 Current = MU->getDefiningAccess();
947
948 DefPath FirstDesc(Q.StartingLoc, Current, Current, std::nullopt);
949 // Fast path for the overly-common case (no crazy phi optimization
950 // necessary)
951 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
952 MemoryAccess *Result;
953 if (WalkResult.IsKnownClobber) {
954 Result = WalkResult.Result;
955 } else {
956 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
957 Current, Q.StartingLoc);
958 verifyOptResult(OptRes);
959 resetPhiOptznState();
960 Result = OptRes.PrimaryClobber.Clobber;
961 }
962
963 #ifdef EXPENSIVE_CHECKS
964 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
965 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, BAA);
966 #endif
967 return Result;
968 }
969 };
970
971 struct RenamePassData {
972 DomTreeNode *DTN;
973 DomTreeNode::const_iterator ChildIt;
974 MemoryAccess *IncomingVal;
975
RenamePassData__anon871ccb700511::RenamePassData976 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
977 MemoryAccess *M)
978 : DTN(D), ChildIt(It), IncomingVal(M) {}
979
swap__anon871ccb700511::RenamePassData980 void swap(RenamePassData &RHS) {
981 std::swap(DTN, RHS.DTN);
982 std::swap(ChildIt, RHS.ChildIt);
983 std::swap(IncomingVal, RHS.IncomingVal);
984 }
985 };
986
987 } // end anonymous namespace
988
989 namespace llvm {
990
991 class MemorySSA::ClobberWalkerBase {
992 ClobberWalker Walker;
993 MemorySSA *MSSA;
994
995 public:
ClobberWalkerBase(MemorySSA * M,DominatorTree * D)996 ClobberWalkerBase(MemorySSA *M, DominatorTree *D) : Walker(*M, *D), MSSA(M) {}
997
998 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
999 const MemoryLocation &,
1000 BatchAAResults &, unsigned &);
1001 // Third argument (bool), defines whether the clobber search should skip the
1002 // original queried access. If true, there will be a follow-up query searching
1003 // for a clobber access past "self". Note that the Optimized access is not
1004 // updated if a new clobber is found by this SkipSelf search. If this
1005 // additional query becomes heavily used we may decide to cache the result.
1006 // Walker instantiations will decide how to set the SkipSelf bool.
1007 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, BatchAAResults &,
1008 unsigned &, bool,
1009 bool UseInvariantGroup = true);
1010 };
1011
1012 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1013 /// longer does caching on its own, but the name has been retained for the
1014 /// moment.
1015 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1016 ClobberWalkerBase *Walker;
1017
1018 public:
CachingWalker(MemorySSA * M,ClobberWalkerBase * W)1019 CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
1020 : MemorySSAWalker(M), Walker(W) {}
1021 ~CachingWalker() override = default;
1022
1023 using MemorySSAWalker::getClobberingMemoryAccess;
1024
getClobberingMemoryAccess(MemoryAccess * MA,BatchAAResults & BAA,unsigned & UWL)1025 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA,
1026 unsigned &UWL) {
1027 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false);
1028 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc,BatchAAResults & BAA,unsigned & UWL)1029 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1030 const MemoryLocation &Loc,
1031 BatchAAResults &BAA, unsigned &UWL) {
1032 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1033 }
1034 // This method is not accessible outside of this file.
getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess * MA,BatchAAResults & BAA,unsigned & UWL)1035 MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(
1036 MemoryAccess *MA, BatchAAResults &BAA, unsigned &UWL) {
1037 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, false, false);
1038 }
1039
getClobberingMemoryAccess(MemoryAccess * MA,BatchAAResults & BAA)1040 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1041 BatchAAResults &BAA) override {
1042 unsigned UpwardWalkLimit = MaxCheckLimit;
1043 return getClobberingMemoryAccess(MA, BAA, UpwardWalkLimit);
1044 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc,BatchAAResults & BAA)1045 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1046 const MemoryLocation &Loc,
1047 BatchAAResults &BAA) override {
1048 unsigned UpwardWalkLimit = MaxCheckLimit;
1049 return getClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit);
1050 }
1051
invalidateInfo(MemoryAccess * MA)1052 void invalidateInfo(MemoryAccess *MA) override {
1053 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1054 MUD->resetOptimized();
1055 }
1056 };
1057
1058 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1059 ClobberWalkerBase *Walker;
1060
1061 public:
SkipSelfWalker(MemorySSA * M,ClobberWalkerBase * W)1062 SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
1063 : MemorySSAWalker(M), Walker(W) {}
1064 ~SkipSelfWalker() override = default;
1065
1066 using MemorySSAWalker::getClobberingMemoryAccess;
1067
getClobberingMemoryAccess(MemoryAccess * MA,BatchAAResults & BAA,unsigned & UWL)1068 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, BatchAAResults &BAA,
1069 unsigned &UWL) {
1070 return Walker->getClobberingMemoryAccessBase(MA, BAA, UWL, true);
1071 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc,BatchAAResults & BAA,unsigned & UWL)1072 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1073 const MemoryLocation &Loc,
1074 BatchAAResults &BAA, unsigned &UWL) {
1075 return Walker->getClobberingMemoryAccessBase(MA, Loc, BAA, UWL);
1076 }
1077
getClobberingMemoryAccess(MemoryAccess * MA,BatchAAResults & BAA)1078 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1079 BatchAAResults &BAA) override {
1080 unsigned UpwardWalkLimit = MaxCheckLimit;
1081 return getClobberingMemoryAccess(MA, BAA, UpwardWalkLimit);
1082 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc,BatchAAResults & BAA)1083 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1084 const MemoryLocation &Loc,
1085 BatchAAResults &BAA) override {
1086 unsigned UpwardWalkLimit = MaxCheckLimit;
1087 return getClobberingMemoryAccess(MA, Loc, BAA, UpwardWalkLimit);
1088 }
1089
invalidateInfo(MemoryAccess * MA)1090 void invalidateInfo(MemoryAccess *MA) override {
1091 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1092 MUD->resetOptimized();
1093 }
1094 };
1095
1096 } // end namespace llvm
1097
renameSuccessorPhis(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)1098 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1099 bool RenameAllUses) {
1100 // Pass through values to our successors
1101 for (const BasicBlock *S : successors(BB)) {
1102 auto It = PerBlockAccesses.find(S);
1103 // Rename the phi nodes in our successor block
1104 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1105 continue;
1106 AccessList *Accesses = It->second.get();
1107 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1108 if (RenameAllUses) {
1109 bool ReplacementDone = false;
1110 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1111 if (Phi->getIncomingBlock(I) == BB) {
1112 Phi->setIncomingValue(I, IncomingVal);
1113 ReplacementDone = true;
1114 }
1115 (void) ReplacementDone;
1116 assert(ReplacementDone && "Incomplete phi during partial rename");
1117 } else
1118 Phi->addIncoming(IncomingVal, BB);
1119 }
1120 }
1121
1122 /// Rename a single basic block into MemorySSA form.
1123 /// Uses the standard SSA renaming algorithm.
1124 /// \returns The new incoming value.
renameBlock(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)1125 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1126 bool RenameAllUses) {
1127 auto It = PerBlockAccesses.find(BB);
1128 // Skip most processing if the list is empty.
1129 if (It != PerBlockAccesses.end()) {
1130 AccessList *Accesses = It->second.get();
1131 for (MemoryAccess &L : *Accesses) {
1132 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1133 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1134 MUD->setDefiningAccess(IncomingVal);
1135 if (isa<MemoryDef>(&L))
1136 IncomingVal = &L;
1137 } else {
1138 IncomingVal = &L;
1139 }
1140 }
1141 }
1142 return IncomingVal;
1143 }
1144
1145 /// This is the standard SSA renaming algorithm.
1146 ///
1147 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1148 /// in phi nodes in our successors.
renamePass(DomTreeNode * Root,MemoryAccess * IncomingVal,SmallPtrSetImpl<BasicBlock * > & Visited,bool SkipVisited,bool RenameAllUses)1149 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1150 SmallPtrSetImpl<BasicBlock *> &Visited,
1151 bool SkipVisited, bool RenameAllUses) {
1152 assert(Root && "Trying to rename accesses in an unreachable block");
1153
1154 SmallVector<RenamePassData, 32> WorkStack;
1155 // Skip everything if we already renamed this block and we are skipping.
1156 // Note: You can't sink this into the if, because we need it to occur
1157 // regardless of whether we skip blocks or not.
1158 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1159 if (SkipVisited && AlreadyVisited)
1160 return;
1161
1162 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1163 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1164 WorkStack.push_back({Root, Root->begin(), IncomingVal});
1165
1166 while (!WorkStack.empty()) {
1167 DomTreeNode *Node = WorkStack.back().DTN;
1168 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1169 IncomingVal = WorkStack.back().IncomingVal;
1170
1171 if (ChildIt == Node->end()) {
1172 WorkStack.pop_back();
1173 } else {
1174 DomTreeNode *Child = *ChildIt;
1175 ++WorkStack.back().ChildIt;
1176 BasicBlock *BB = Child->getBlock();
1177 // Note: You can't sink this into the if, because we need it to occur
1178 // regardless of whether we skip blocks or not.
1179 AlreadyVisited = !Visited.insert(BB).second;
1180 if (SkipVisited && AlreadyVisited) {
1181 // We already visited this during our renaming, which can happen when
1182 // being asked to rename multiple blocks. Figure out the incoming val,
1183 // which is the last def.
1184 // Incoming value can only change if there is a block def, and in that
1185 // case, it's the last block def in the list.
1186 if (auto *BlockDefs = getWritableBlockDefs(BB))
1187 IncomingVal = &*BlockDefs->rbegin();
1188 } else
1189 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1190 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1191 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1192 }
1193 }
1194 }
1195
1196 /// This handles unreachable block accesses by deleting phi nodes in
1197 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1198 /// being uses of the live on entry definition.
markUnreachableAsLiveOnEntry(BasicBlock * BB)1199 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1200 assert(!DT->isReachableFromEntry(BB) &&
1201 "Reachable block found while handling unreachable blocks");
1202
1203 // Make sure phi nodes in our reachable successors end up with a
1204 // LiveOnEntryDef for our incoming edge, even though our block is forward
1205 // unreachable. We could just disconnect these blocks from the CFG fully,
1206 // but we do not right now.
1207 for (const BasicBlock *S : successors(BB)) {
1208 if (!DT->isReachableFromEntry(S))
1209 continue;
1210 auto It = PerBlockAccesses.find(S);
1211 // Rename the phi nodes in our successor block
1212 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1213 continue;
1214 AccessList *Accesses = It->second.get();
1215 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1216 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1217 }
1218
1219 auto It = PerBlockAccesses.find(BB);
1220 if (It == PerBlockAccesses.end())
1221 return;
1222
1223 auto &Accesses = It->second;
1224 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1225 auto Next = std::next(AI);
1226 // If we have a phi, just remove it. We are going to replace all
1227 // users with live on entry.
1228 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1229 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1230 else
1231 Accesses->erase(AI);
1232 AI = Next;
1233 }
1234 }
1235
MemorySSA(Function & Func,AliasAnalysis * AA,DominatorTree * DT)1236 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1237 : DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1238 SkipWalker(nullptr) {
1239 // Build MemorySSA using a batch alias analysis. This reuses the internal
1240 // state that AA collects during an alias()/getModRefInfo() call. This is
1241 // safe because there are no CFG changes while building MemorySSA and can
1242 // significantly reduce the time spent by the compiler in AA, because we will
1243 // make queries about all the instructions in the Function.
1244 assert(AA && "No alias analysis?");
1245 BatchAAResults BatchAA(*AA);
1246 buildMemorySSA(BatchAA);
1247 // Intentionally leave AA to nullptr while building so we don't accidently
1248 // use non-batch AliasAnalysis.
1249 this->AA = AA;
1250 // Also create the walker here.
1251 getWalker();
1252 }
1253
~MemorySSA()1254 MemorySSA::~MemorySSA() {
1255 // Drop all our references
1256 for (const auto &Pair : PerBlockAccesses)
1257 for (MemoryAccess &MA : *Pair.second)
1258 MA.dropAllReferences();
1259 }
1260
getOrCreateAccessList(const BasicBlock * BB)1261 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1262 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1263
1264 if (Res.second)
1265 Res.first->second = std::make_unique<AccessList>();
1266 return Res.first->second.get();
1267 }
1268
getOrCreateDefsList(const BasicBlock * BB)1269 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1270 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1271
1272 if (Res.second)
1273 Res.first->second = std::make_unique<DefsList>();
1274 return Res.first->second.get();
1275 }
1276
1277 namespace llvm {
1278
1279 /// This class is a batch walker of all MemoryUse's in the program, and points
1280 /// their defining access at the thing that actually clobbers them. Because it
1281 /// is a batch walker that touches everything, it does not operate like the
1282 /// other walkers. This walker is basically performing a top-down SSA renaming
1283 /// pass, where the version stack is used as the cache. This enables it to be
1284 /// significantly more time and memory efficient than using the regular walker,
1285 /// which is walking bottom-up.
1286 class MemorySSA::OptimizeUses {
1287 public:
OptimizeUses(MemorySSA * MSSA,CachingWalker * Walker,BatchAAResults * BAA,DominatorTree * DT)1288 OptimizeUses(MemorySSA *MSSA, CachingWalker *Walker, BatchAAResults *BAA,
1289 DominatorTree *DT)
1290 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1291
1292 void optimizeUses();
1293
1294 private:
1295 /// This represents where a given memorylocation is in the stack.
1296 struct MemlocStackInfo {
1297 // This essentially is keeping track of versions of the stack. Whenever
1298 // the stack changes due to pushes or pops, these versions increase.
1299 unsigned long StackEpoch;
1300 unsigned long PopEpoch;
1301 // This is the lower bound of places on the stack to check. It is equal to
1302 // the place the last stack walk ended.
1303 // Note: Correctness depends on this being initialized to 0, which densemap
1304 // does
1305 unsigned long LowerBound;
1306 const BasicBlock *LowerBoundBlock;
1307 // This is where the last walk for this memory location ended.
1308 unsigned long LastKill;
1309 bool LastKillValid;
1310 };
1311
1312 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1313 SmallVectorImpl<MemoryAccess *> &,
1314 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1315
1316 MemorySSA *MSSA;
1317 CachingWalker *Walker;
1318 BatchAAResults *AA;
1319 DominatorTree *DT;
1320 };
1321
1322 } // end namespace llvm
1323
1324 /// Optimize the uses in a given block This is basically the SSA renaming
1325 /// algorithm, with one caveat: We are able to use a single stack for all
1326 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1327 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1328 /// going to be some position in that stack of possible ones.
1329 ///
1330 /// We track the stack positions that each MemoryLocation needs
1331 /// to check, and last ended at. This is because we only want to check the
1332 /// things that changed since last time. The same MemoryLocation should
1333 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1334 /// things like this, and if they start, we can modify MemoryLocOrCall to
1335 /// include relevant data)
optimizeUsesInBlock(const BasicBlock * BB,unsigned long & StackEpoch,unsigned long & PopEpoch,SmallVectorImpl<MemoryAccess * > & VersionStack,DenseMap<MemoryLocOrCall,MemlocStackInfo> & LocStackInfo)1336 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1337 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1338 SmallVectorImpl<MemoryAccess *> &VersionStack,
1339 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1340
1341 /// If no accesses, nothing to do.
1342 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1343 if (Accesses == nullptr)
1344 return;
1345
1346 // Pop everything that doesn't dominate the current block off the stack,
1347 // increment the PopEpoch to account for this.
1348 while (true) {
1349 assert(
1350 !VersionStack.empty() &&
1351 "Version stack should have liveOnEntry sentinel dominating everything");
1352 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1353 if (DT->dominates(BackBlock, BB))
1354 break;
1355 while (VersionStack.back()->getBlock() == BackBlock)
1356 VersionStack.pop_back();
1357 ++PopEpoch;
1358 }
1359
1360 for (MemoryAccess &MA : *Accesses) {
1361 auto *MU = dyn_cast<MemoryUse>(&MA);
1362 if (!MU) {
1363 VersionStack.push_back(&MA);
1364 ++StackEpoch;
1365 continue;
1366 }
1367
1368 if (MU->isOptimized())
1369 continue;
1370
1371 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1372 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true);
1373 continue;
1374 }
1375
1376 MemoryLocOrCall UseMLOC(MU);
1377 auto &LocInfo = LocStackInfo[UseMLOC];
1378 // If the pop epoch changed, it means we've removed stuff from top of
1379 // stack due to changing blocks. We may have to reset the lower bound or
1380 // last kill info.
1381 if (LocInfo.PopEpoch != PopEpoch) {
1382 LocInfo.PopEpoch = PopEpoch;
1383 LocInfo.StackEpoch = StackEpoch;
1384 // If the lower bound was in something that no longer dominates us, we
1385 // have to reset it.
1386 // We can't simply track stack size, because the stack may have had
1387 // pushes/pops in the meantime.
1388 // XXX: This is non-optimal, but only is slower cases with heavily
1389 // branching dominator trees. To get the optimal number of queries would
1390 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1391 // the top of that stack dominates us. This does not seem worth it ATM.
1392 // A much cheaper optimization would be to always explore the deepest
1393 // branch of the dominator tree first. This will guarantee this resets on
1394 // the smallest set of blocks.
1395 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1396 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1397 // Reset the lower bound of things to check.
1398 // TODO: Some day we should be able to reset to last kill, rather than
1399 // 0.
1400 LocInfo.LowerBound = 0;
1401 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1402 LocInfo.LastKillValid = false;
1403 }
1404 } else if (LocInfo.StackEpoch != StackEpoch) {
1405 // If all that has changed is the StackEpoch, we only have to check the
1406 // new things on the stack, because we've checked everything before. In
1407 // this case, the lower bound of things to check remains the same.
1408 LocInfo.PopEpoch = PopEpoch;
1409 LocInfo.StackEpoch = StackEpoch;
1410 }
1411 if (!LocInfo.LastKillValid) {
1412 LocInfo.LastKill = VersionStack.size() - 1;
1413 LocInfo.LastKillValid = true;
1414 }
1415
1416 // At this point, we should have corrected last kill and LowerBound to be
1417 // in bounds.
1418 assert(LocInfo.LowerBound < VersionStack.size() &&
1419 "Lower bound out of range");
1420 assert(LocInfo.LastKill < VersionStack.size() &&
1421 "Last kill info out of range");
1422 // In any case, the new upper bound is the top of the stack.
1423 unsigned long UpperBound = VersionStack.size() - 1;
1424
1425 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1426 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1427 << *(MU->getMemoryInst()) << ")"
1428 << " because there are "
1429 << UpperBound - LocInfo.LowerBound
1430 << " stores to disambiguate\n");
1431 // Because we did not walk, LastKill is no longer valid, as this may
1432 // have been a kill.
1433 LocInfo.LastKillValid = false;
1434 continue;
1435 }
1436 bool FoundClobberResult = false;
1437 unsigned UpwardWalkLimit = MaxCheckLimit;
1438 while (UpperBound > LocInfo.LowerBound) {
1439 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1440 // For phis, use the walker, see where we ended up, go there.
1441 // The invariant.group handling in MemorySSA is ad-hoc and doesn't
1442 // support updates, so don't use it to optimize uses.
1443 MemoryAccess *Result =
1444 Walker->getClobberingMemoryAccessWithoutInvariantGroup(
1445 MU, *AA, UpwardWalkLimit);
1446 // We are guaranteed to find it or something is wrong.
1447 while (VersionStack[UpperBound] != Result) {
1448 assert(UpperBound != 0);
1449 --UpperBound;
1450 }
1451 FoundClobberResult = true;
1452 break;
1453 }
1454
1455 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1456 if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
1457 FoundClobberResult = true;
1458 break;
1459 }
1460 --UpperBound;
1461 }
1462
1463 // At the end of this loop, UpperBound is either a clobber, or lower bound
1464 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1465 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1466 MU->setDefiningAccess(VersionStack[UpperBound], true);
1467 LocInfo.LastKill = UpperBound;
1468 } else {
1469 // Otherwise, we checked all the new ones, and now we know we can get to
1470 // LastKill.
1471 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);
1472 }
1473 LocInfo.LowerBound = VersionStack.size() - 1;
1474 LocInfo.LowerBoundBlock = BB;
1475 }
1476 }
1477
1478 /// Optimize uses to point to their actual clobbering definitions.
optimizeUses()1479 void MemorySSA::OptimizeUses::optimizeUses() {
1480 SmallVector<MemoryAccess *, 16> VersionStack;
1481 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1482 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1483
1484 unsigned long StackEpoch = 1;
1485 unsigned long PopEpoch = 1;
1486 // We perform a non-recursive top-down dominator tree walk.
1487 for (const auto *DomNode : depth_first(DT->getRootNode()))
1488 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1489 LocStackInfo);
1490 }
1491
placePHINodes(const SmallPtrSetImpl<BasicBlock * > & DefiningBlocks)1492 void MemorySSA::placePHINodes(
1493 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1494 // Determine where our MemoryPhi's should go
1495 ForwardIDFCalculator IDFs(*DT);
1496 IDFs.setDefiningBlocks(DefiningBlocks);
1497 SmallVector<BasicBlock *, 32> IDFBlocks;
1498 IDFs.calculate(IDFBlocks);
1499
1500 // Now place MemoryPhi nodes.
1501 for (auto &BB : IDFBlocks)
1502 createMemoryPhi(BB);
1503 }
1504
buildMemorySSA(BatchAAResults & BAA)1505 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1506 // We create an access to represent "live on entry", for things like
1507 // arguments or users of globals, where the memory they use is defined before
1508 // the beginning of the function. We do not actually insert it into the IR.
1509 // We do not define a live on exit for the immediate uses, and thus our
1510 // semantics do *not* imply that something with no immediate uses can simply
1511 // be removed.
1512 BasicBlock &StartingPoint = F.getEntryBlock();
1513 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1514 &StartingPoint, NextID++));
1515
1516 // We maintain lists of memory accesses per-block, trading memory for time. We
1517 // could just look up the memory access for every possible instruction in the
1518 // stream.
1519 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1520 // Go through each block, figure out where defs occur, and chain together all
1521 // the accesses.
1522 for (BasicBlock &B : F) {
1523 bool InsertIntoDef = false;
1524 AccessList *Accesses = nullptr;
1525 DefsList *Defs = nullptr;
1526 for (Instruction &I : B) {
1527 MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1528 if (!MUD)
1529 continue;
1530
1531 if (!Accesses)
1532 Accesses = getOrCreateAccessList(&B);
1533 Accesses->push_back(MUD);
1534 if (isa<MemoryDef>(MUD)) {
1535 InsertIntoDef = true;
1536 if (!Defs)
1537 Defs = getOrCreateDefsList(&B);
1538 Defs->push_back(*MUD);
1539 }
1540 }
1541 if (InsertIntoDef)
1542 DefiningBlocks.insert(&B);
1543 }
1544 placePHINodes(DefiningBlocks);
1545
1546 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1547 // filled in with all blocks.
1548 SmallPtrSet<BasicBlock *, 16> Visited;
1549 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1550
1551 // Mark the uses in unreachable blocks as live on entry, so that they go
1552 // somewhere.
1553 for (auto &BB : F)
1554 if (!Visited.count(&BB))
1555 markUnreachableAsLiveOnEntry(&BB);
1556 }
1557
getWalker()1558 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1559
getWalkerImpl()1560 MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1561 if (Walker)
1562 return Walker.get();
1563
1564 if (!WalkerBase)
1565 WalkerBase = std::make_unique<ClobberWalkerBase>(this, DT);
1566
1567 Walker = std::make_unique<CachingWalker>(this, WalkerBase.get());
1568 return Walker.get();
1569 }
1570
getSkipSelfWalker()1571 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1572 if (SkipWalker)
1573 return SkipWalker.get();
1574
1575 if (!WalkerBase)
1576 WalkerBase = std::make_unique<ClobberWalkerBase>(this, DT);
1577
1578 SkipWalker = std::make_unique<SkipSelfWalker>(this, WalkerBase.get());
1579 return SkipWalker.get();
1580 }
1581
1582
1583 // This is a helper function used by the creation routines. It places NewAccess
1584 // into the access and defs lists for a given basic block, at the given
1585 // insertion point.
insertIntoListsForBlock(MemoryAccess * NewAccess,const BasicBlock * BB,InsertionPlace Point)1586 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1587 const BasicBlock *BB,
1588 InsertionPlace Point) {
1589 auto *Accesses = getOrCreateAccessList(BB);
1590 if (Point == Beginning) {
1591 // If it's a phi node, it goes first, otherwise, it goes after any phi
1592 // nodes.
1593 if (isa<MemoryPhi>(NewAccess)) {
1594 Accesses->push_front(NewAccess);
1595 auto *Defs = getOrCreateDefsList(BB);
1596 Defs->push_front(*NewAccess);
1597 } else {
1598 auto AI = find_if_not(
1599 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1600 Accesses->insert(AI, NewAccess);
1601 if (!isa<MemoryUse>(NewAccess)) {
1602 auto *Defs = getOrCreateDefsList(BB);
1603 auto DI = find_if_not(
1604 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1605 Defs->insert(DI, *NewAccess);
1606 }
1607 }
1608 } else {
1609 Accesses->push_back(NewAccess);
1610 if (!isa<MemoryUse>(NewAccess)) {
1611 auto *Defs = getOrCreateDefsList(BB);
1612 Defs->push_back(*NewAccess);
1613 }
1614 }
1615 BlockNumberingValid.erase(BB);
1616 }
1617
insertIntoListsBefore(MemoryAccess * What,const BasicBlock * BB,AccessList::iterator InsertPt)1618 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1619 AccessList::iterator InsertPt) {
1620 auto *Accesses = getWritableBlockAccesses(BB);
1621 bool WasEnd = InsertPt == Accesses->end();
1622 Accesses->insert(AccessList::iterator(InsertPt), What);
1623 if (!isa<MemoryUse>(What)) {
1624 auto *Defs = getOrCreateDefsList(BB);
1625 // If we got asked to insert at the end, we have an easy job, just shove it
1626 // at the end. If we got asked to insert before an existing def, we also get
1627 // an iterator. If we got asked to insert before a use, we have to hunt for
1628 // the next def.
1629 if (WasEnd) {
1630 Defs->push_back(*What);
1631 } else if (isa<MemoryDef>(InsertPt)) {
1632 Defs->insert(InsertPt->getDefsIterator(), *What);
1633 } else {
1634 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1635 ++InsertPt;
1636 // Either we found a def, or we are inserting at the end
1637 if (InsertPt == Accesses->end())
1638 Defs->push_back(*What);
1639 else
1640 Defs->insert(InsertPt->getDefsIterator(), *What);
1641 }
1642 }
1643 BlockNumberingValid.erase(BB);
1644 }
1645
prepareForMoveTo(MemoryAccess * What,BasicBlock * BB)1646 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1647 // Keep it in the lookup tables, remove from the lists
1648 removeFromLists(What, false);
1649
1650 // Note that moving should implicitly invalidate the optimized state of a
1651 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1652 // MemoryDef.
1653 if (auto *MD = dyn_cast<MemoryDef>(What))
1654 MD->resetOptimized();
1655 What->setBlock(BB);
1656 }
1657
1658 // Move What before Where in the IR. The end result is that What will belong to
1659 // the right lists and have the right Block set, but will not otherwise be
1660 // correct. It will not have the right defining access, and if it is a def,
1661 // things below it will not properly be updated.
moveTo(MemoryUseOrDef * What,BasicBlock * BB,AccessList::iterator Where)1662 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1663 AccessList::iterator Where) {
1664 prepareForMoveTo(What, BB);
1665 insertIntoListsBefore(What, BB, Where);
1666 }
1667
moveTo(MemoryAccess * What,BasicBlock * BB,InsertionPlace Point)1668 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1669 InsertionPlace Point) {
1670 if (isa<MemoryPhi>(What)) {
1671 assert(Point == Beginning &&
1672 "Can only move a Phi at the beginning of the block");
1673 // Update lookup table entry
1674 ValueToMemoryAccess.erase(What->getBlock());
1675 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1676 (void)Inserted;
1677 assert(Inserted && "Cannot move a Phi to a block that already has one");
1678 }
1679
1680 prepareForMoveTo(What, BB);
1681 insertIntoListsForBlock(What, BB, Point);
1682 }
1683
createMemoryPhi(BasicBlock * BB)1684 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1685 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1686 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1687 // Phi's always are placed at the front of the block.
1688 insertIntoListsForBlock(Phi, BB, Beginning);
1689 ValueToMemoryAccess[BB] = Phi;
1690 return Phi;
1691 }
1692
createDefinedAccess(Instruction * I,MemoryAccess * Definition,const MemoryUseOrDef * Template,bool CreationMustSucceed)1693 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1694 MemoryAccess *Definition,
1695 const MemoryUseOrDef *Template,
1696 bool CreationMustSucceed) {
1697 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1698 MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1699 if (CreationMustSucceed)
1700 assert(NewAccess != nullptr && "Tried to create a memory access for a "
1701 "non-memory touching instruction");
1702 if (NewAccess) {
1703 assert((!Definition || !isa<MemoryUse>(Definition)) &&
1704 "A use cannot be a defining access");
1705 NewAccess->setDefiningAccess(Definition);
1706 }
1707 return NewAccess;
1708 }
1709
1710 // Return true if the instruction has ordering constraints.
1711 // Note specifically that this only considers stores and loads
1712 // because others are still considered ModRef by getModRefInfo.
isOrdered(const Instruction * I)1713 static inline bool isOrdered(const Instruction *I) {
1714 if (auto *SI = dyn_cast<StoreInst>(I)) {
1715 if (!SI->isUnordered())
1716 return true;
1717 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1718 if (!LI->isUnordered())
1719 return true;
1720 }
1721 return false;
1722 }
1723
1724 /// Helper function to create new memory accesses
1725 template <typename AliasAnalysisType>
createNewAccess(Instruction * I,AliasAnalysisType * AAP,const MemoryUseOrDef * Template)1726 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1727 AliasAnalysisType *AAP,
1728 const MemoryUseOrDef *Template) {
1729 // The assume intrinsic has a control dependency which we model by claiming
1730 // that it writes arbitrarily. Debuginfo intrinsics may be considered
1731 // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1732 // dependencies here.
1733 // FIXME: Replace this special casing with a more accurate modelling of
1734 // assume's control dependency.
1735 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1736 switch (II->getIntrinsicID()) {
1737 default:
1738 break;
1739 case Intrinsic::assume:
1740 case Intrinsic::experimental_noalias_scope_decl:
1741 case Intrinsic::pseudoprobe:
1742 return nullptr;
1743 }
1744 }
1745
1746 // Using a nonstandard AA pipelines might leave us with unexpected modref
1747 // results for I, so add a check to not model instructions that may not read
1748 // from or write to memory. This is necessary for correctness.
1749 if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1750 return nullptr;
1751
1752 bool Def, Use;
1753 if (Template) {
1754 Def = isa<MemoryDef>(Template);
1755 Use = isa<MemoryUse>(Template);
1756 #if !defined(NDEBUG)
1757 ModRefInfo ModRef = AAP->getModRefInfo(I, std::nullopt);
1758 bool DefCheck, UseCheck;
1759 DefCheck = isModSet(ModRef) || isOrdered(I);
1760 UseCheck = isRefSet(ModRef);
1761 // Memory accesses should only be reduced and can not be increased since AA
1762 // just might return better results as a result of some transformations.
1763 assert((Def == DefCheck || !DefCheck) &&
1764 "Memory accesses should only be reduced");
1765 if (!Def && Use != UseCheck) {
1766 // New Access should not have more power than template access
1767 assert(!UseCheck && "Invalid template");
1768 }
1769 #endif
1770 } else {
1771 // Find out what affect this instruction has on memory.
1772 ModRefInfo ModRef = AAP->getModRefInfo(I, std::nullopt);
1773 // The isOrdered check is used to ensure that volatiles end up as defs
1774 // (atomics end up as ModRef right now anyway). Until we separate the
1775 // ordering chain from the memory chain, this enables people to see at least
1776 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1777 // will still give an answer that bypasses other volatile loads. TODO:
1778 // Separate memory aliasing and ordering into two different chains so that
1779 // we can precisely represent both "what memory will this read/write/is
1780 // clobbered by" and "what instructions can I move this past".
1781 Def = isModSet(ModRef) || isOrdered(I);
1782 Use = isRefSet(ModRef);
1783 }
1784
1785 // It's possible for an instruction to not modify memory at all. During
1786 // construction, we ignore them.
1787 if (!Def && !Use)
1788 return nullptr;
1789
1790 MemoryUseOrDef *MUD;
1791 if (Def)
1792 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1793 else
1794 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1795 ValueToMemoryAccess[I] = MUD;
1796 return MUD;
1797 }
1798
1799 /// Properly remove \p MA from all of MemorySSA's lookup tables.
removeFromLookups(MemoryAccess * MA)1800 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1801 assert(MA->use_empty() &&
1802 "Trying to remove memory access that still has uses");
1803 BlockNumbering.erase(MA);
1804 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1805 MUD->setDefiningAccess(nullptr);
1806 // Invalidate our walker's cache if necessary
1807 if (!isa<MemoryUse>(MA))
1808 getWalker()->invalidateInfo(MA);
1809
1810 Value *MemoryInst;
1811 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1812 MemoryInst = MUD->getMemoryInst();
1813 else
1814 MemoryInst = MA->getBlock();
1815
1816 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1817 if (VMA->second == MA)
1818 ValueToMemoryAccess.erase(VMA);
1819 }
1820
1821 /// Properly remove \p MA from all of MemorySSA's lists.
1822 ///
1823 /// Because of the way the intrusive list and use lists work, it is important to
1824 /// do removal in the right order.
1825 /// ShouldDelete defaults to true, and will cause the memory access to also be
1826 /// deleted, not just removed.
removeFromLists(MemoryAccess * MA,bool ShouldDelete)1827 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1828 BasicBlock *BB = MA->getBlock();
1829 // The access list owns the reference, so we erase it from the non-owning list
1830 // first.
1831 if (!isa<MemoryUse>(MA)) {
1832 auto DefsIt = PerBlockDefs.find(BB);
1833 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1834 Defs->remove(*MA);
1835 if (Defs->empty())
1836 PerBlockDefs.erase(DefsIt);
1837 }
1838
1839 // The erase call here will delete it. If we don't want it deleted, we call
1840 // remove instead.
1841 auto AccessIt = PerBlockAccesses.find(BB);
1842 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1843 if (ShouldDelete)
1844 Accesses->erase(MA);
1845 else
1846 Accesses->remove(MA);
1847
1848 if (Accesses->empty()) {
1849 PerBlockAccesses.erase(AccessIt);
1850 BlockNumberingValid.erase(BB);
1851 }
1852 }
1853
print(raw_ostream & OS) const1854 void MemorySSA::print(raw_ostream &OS) const {
1855 MemorySSAAnnotatedWriter Writer(this);
1856 F.print(OS, &Writer);
1857 }
1858
1859 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1860 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1861 #endif
1862
verifyMemorySSA(VerificationLevel VL) const1863 void MemorySSA::verifyMemorySSA(VerificationLevel VL) const {
1864 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1865 VL = VerificationLevel::Full;
1866 #endif
1867
1868 #ifndef NDEBUG
1869 verifyOrderingDominationAndDefUses(F, VL);
1870 verifyDominationNumbers(F);
1871 if (VL == VerificationLevel::Full)
1872 verifyPrevDefInPhis(F);
1873 #endif
1874 // Previously, the verification used to also verify that the clobberingAccess
1875 // cached by MemorySSA is the same as the clobberingAccess found at a later
1876 // query to AA. This does not hold true in general due to the current fragility
1877 // of BasicAA which has arbitrary caps on the things it analyzes before giving
1878 // up. As a result, transformations that are correct, will lead to BasicAA
1879 // returning different Alias answers before and after that transformation.
1880 // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1881 // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1882 // every transformation, which defeats the purpose of using it. For such an
1883 // example, see test4 added in D51960.
1884 }
1885
verifyPrevDefInPhis(Function & F) const1886 void MemorySSA::verifyPrevDefInPhis(Function &F) const {
1887 for (const BasicBlock &BB : F) {
1888 if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1889 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1890 auto *Pred = Phi->getIncomingBlock(I);
1891 auto *IncAcc = Phi->getIncomingValue(I);
1892 // If Pred has no unreachable predecessors, get last def looking at
1893 // IDoms. If, while walkings IDoms, any of these has an unreachable
1894 // predecessor, then the incoming def can be any access.
1895 if (auto *DTNode = DT->getNode(Pred)) {
1896 while (DTNode) {
1897 if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1898 auto *LastAcc = &*(--DefList->end());
1899 assert(LastAcc == IncAcc &&
1900 "Incorrect incoming access into phi.");
1901 (void)IncAcc;
1902 (void)LastAcc;
1903 break;
1904 }
1905 DTNode = DTNode->getIDom();
1906 }
1907 } else {
1908 // If Pred has unreachable predecessors, but has at least a Def, the
1909 // incoming access can be the last Def in Pred, or it could have been
1910 // optimized to LoE. After an update, though, the LoE may have been
1911 // replaced by another access, so IncAcc may be any access.
1912 // If Pred has unreachable predecessors and no Defs, incoming access
1913 // should be LoE; However, after an update, it may be any access.
1914 }
1915 }
1916 }
1917 }
1918 }
1919
1920 /// Verify that all of the blocks we believe to have valid domination numbers
1921 /// actually have valid domination numbers.
verifyDominationNumbers(const Function & F) const1922 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1923 if (BlockNumberingValid.empty())
1924 return;
1925
1926 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1927 for (const BasicBlock &BB : F) {
1928 if (!ValidBlocks.count(&BB))
1929 continue;
1930
1931 ValidBlocks.erase(&BB);
1932
1933 const AccessList *Accesses = getBlockAccesses(&BB);
1934 // It's correct to say an empty block has valid numbering.
1935 if (!Accesses)
1936 continue;
1937
1938 // Block numbering starts at 1.
1939 unsigned long LastNumber = 0;
1940 for (const MemoryAccess &MA : *Accesses) {
1941 auto ThisNumberIter = BlockNumbering.find(&MA);
1942 assert(ThisNumberIter != BlockNumbering.end() &&
1943 "MemoryAccess has no domination number in a valid block!");
1944
1945 unsigned long ThisNumber = ThisNumberIter->second;
1946 assert(ThisNumber > LastNumber &&
1947 "Domination numbers should be strictly increasing!");
1948 (void)LastNumber;
1949 LastNumber = ThisNumber;
1950 }
1951 }
1952
1953 assert(ValidBlocks.empty() &&
1954 "All valid BasicBlocks should exist in F -- dangling pointers?");
1955 }
1956
1957 /// Verify ordering: the order and existence of MemoryAccesses matches the
1958 /// order and existence of memory affecting instructions.
1959 /// Verify domination: each definition dominates all of its uses.
1960 /// Verify def-uses: the immediate use information - walk all the memory
1961 /// accesses and verifying that, for each use, it appears in the appropriate
1962 /// def's use list
verifyOrderingDominationAndDefUses(Function & F,VerificationLevel VL) const1963 void MemorySSA::verifyOrderingDominationAndDefUses(Function &F,
1964 VerificationLevel VL) const {
1965 // Walk all the blocks, comparing what the lookups think and what the access
1966 // lists think, as well as the order in the blocks vs the order in the access
1967 // lists.
1968 SmallVector<MemoryAccess *, 32> ActualAccesses;
1969 SmallVector<MemoryAccess *, 32> ActualDefs;
1970 for (BasicBlock &B : F) {
1971 const AccessList *AL = getBlockAccesses(&B);
1972 const auto *DL = getBlockDefs(&B);
1973 MemoryPhi *Phi = getMemoryAccess(&B);
1974 if (Phi) {
1975 // Verify ordering.
1976 ActualAccesses.push_back(Phi);
1977 ActualDefs.push_back(Phi);
1978 // Verify domination
1979 for (const Use &U : Phi->uses()) {
1980 assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
1981 (void)U;
1982 }
1983 // Verify def-uses for full verify.
1984 if (VL == VerificationLevel::Full) {
1985 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1986 pred_begin(&B), pred_end(&B))) &&
1987 "Incomplete MemoryPhi Node");
1988 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1989 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1990 assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&
1991 "Incoming phi block not a block predecessor");
1992 }
1993 }
1994 }
1995
1996 for (Instruction &I : B) {
1997 MemoryUseOrDef *MA = getMemoryAccess(&I);
1998 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1999 "We have memory affecting instructions "
2000 "in this block but they are not in the "
2001 "access list or defs list");
2002 if (MA) {
2003 // Verify ordering.
2004 ActualAccesses.push_back(MA);
2005 if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
2006 // Verify ordering.
2007 ActualDefs.push_back(MA);
2008 // Verify domination.
2009 for (const Use &U : MD->uses()) {
2010 assert(dominates(MD, U) &&
2011 "Memory Def does not dominate it's uses");
2012 (void)U;
2013 }
2014 }
2015 // Verify def-uses for full verify.
2016 if (VL == VerificationLevel::Full)
2017 verifyUseInDefs(MA->getDefiningAccess(), MA);
2018 }
2019 }
2020 // Either we hit the assert, really have no accesses, or we have both
2021 // accesses and an access list. Same with defs.
2022 if (!AL && !DL)
2023 continue;
2024 // Verify ordering.
2025 assert(AL->size() == ActualAccesses.size() &&
2026 "We don't have the same number of accesses in the block as on the "
2027 "access list");
2028 assert((DL || ActualDefs.size() == 0) &&
2029 "Either we should have a defs list, or we should have no defs");
2030 assert((!DL || DL->size() == ActualDefs.size()) &&
2031 "We don't have the same number of defs in the block as on the "
2032 "def list");
2033 auto ALI = AL->begin();
2034 auto AAI = ActualAccesses.begin();
2035 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2036 assert(&*ALI == *AAI && "Not the same accesses in the same order");
2037 ++ALI;
2038 ++AAI;
2039 }
2040 ActualAccesses.clear();
2041 if (DL) {
2042 auto DLI = DL->begin();
2043 auto ADI = ActualDefs.begin();
2044 while (DLI != DL->end() && ADI != ActualDefs.end()) {
2045 assert(&*DLI == *ADI && "Not the same defs in the same order");
2046 ++DLI;
2047 ++ADI;
2048 }
2049 }
2050 ActualDefs.clear();
2051 }
2052 }
2053
2054 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2055 /// appears in the use list of \p Def.
verifyUseInDefs(MemoryAccess * Def,MemoryAccess * Use) const2056 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2057 // The live on entry use may cause us to get a NULL def here
2058 if (!Def)
2059 assert(isLiveOnEntryDef(Use) &&
2060 "Null def but use not point to live on entry def");
2061 else
2062 assert(is_contained(Def->users(), Use) &&
2063 "Did not find use in def's use list");
2064 }
2065
2066 /// Perform a local numbering on blocks so that instruction ordering can be
2067 /// determined in constant time.
2068 /// TODO: We currently just number in order. If we numbered by N, we could
2069 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2070 /// log2(N) sequences of mixed before and after) without needing to invalidate
2071 /// the numbering.
renumberBlock(const BasicBlock * B) const2072 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2073 // The pre-increment ensures the numbers really start at 1.
2074 unsigned long CurrentNumber = 0;
2075 const AccessList *AL = getBlockAccesses(B);
2076 assert(AL != nullptr && "Asking to renumber an empty block");
2077 for (const auto &I : *AL)
2078 BlockNumbering[&I] = ++CurrentNumber;
2079 BlockNumberingValid.insert(B);
2080 }
2081
2082 /// Determine, for two memory accesses in the same block,
2083 /// whether \p Dominator dominates \p Dominatee.
2084 /// \returns True if \p Dominator dominates \p Dominatee.
locallyDominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const2085 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2086 const MemoryAccess *Dominatee) const {
2087 const BasicBlock *DominatorBlock = Dominator->getBlock();
2088
2089 assert((DominatorBlock == Dominatee->getBlock()) &&
2090 "Asking for local domination when accesses are in different blocks!");
2091 // A node dominates itself.
2092 if (Dominatee == Dominator)
2093 return true;
2094
2095 // When Dominatee is defined on function entry, it is not dominated by another
2096 // memory access.
2097 if (isLiveOnEntryDef(Dominatee))
2098 return false;
2099
2100 // When Dominator is defined on function entry, it dominates the other memory
2101 // access.
2102 if (isLiveOnEntryDef(Dominator))
2103 return true;
2104
2105 if (!BlockNumberingValid.count(DominatorBlock))
2106 renumberBlock(DominatorBlock);
2107
2108 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2109 // All numbers start with 1
2110 assert(DominatorNum != 0 && "Block was not numbered properly");
2111 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2112 assert(DominateeNum != 0 && "Block was not numbered properly");
2113 return DominatorNum < DominateeNum;
2114 }
2115
dominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const2116 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2117 const MemoryAccess *Dominatee) const {
2118 if (Dominator == Dominatee)
2119 return true;
2120
2121 if (isLiveOnEntryDef(Dominatee))
2122 return false;
2123
2124 if (Dominator->getBlock() != Dominatee->getBlock())
2125 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2126 return locallyDominates(Dominator, Dominatee);
2127 }
2128
dominates(const MemoryAccess * Dominator,const Use & Dominatee) const2129 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2130 const Use &Dominatee) const {
2131 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2132 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2133 // The def must dominate the incoming block of the phi.
2134 if (UseBB != Dominator->getBlock())
2135 return DT->dominates(Dominator->getBlock(), UseBB);
2136 // If the UseBB and the DefBB are the same, compare locally.
2137 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2138 }
2139 // If it's not a PHI node use, the normal dominates can already handle it.
2140 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2141 }
2142
ensureOptimizedUses()2143 void MemorySSA::ensureOptimizedUses() {
2144 if (IsOptimized)
2145 return;
2146
2147 BatchAAResults BatchAA(*AA);
2148 ClobberWalkerBase WalkerBase(this, DT);
2149 CachingWalker WalkerLocal(this, &WalkerBase);
2150 OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses();
2151 IsOptimized = true;
2152 }
2153
print(raw_ostream & OS) const2154 void MemoryAccess::print(raw_ostream &OS) const {
2155 switch (getValueID()) {
2156 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2157 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2158 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2159 }
2160 llvm_unreachable("invalid value id");
2161 }
2162
print(raw_ostream & OS) const2163 void MemoryDef::print(raw_ostream &OS) const {
2164 MemoryAccess *UO = getDefiningAccess();
2165
2166 auto printID = [&OS](MemoryAccess *A) {
2167 if (A && A->getID())
2168 OS << A->getID();
2169 else
2170 OS << LiveOnEntryStr;
2171 };
2172
2173 OS << getID() << " = MemoryDef(";
2174 printID(UO);
2175 OS << ")";
2176
2177 if (isOptimized()) {
2178 OS << "->";
2179 printID(getOptimized());
2180 }
2181 }
2182
print(raw_ostream & OS) const2183 void MemoryPhi::print(raw_ostream &OS) const {
2184 ListSeparator LS(",");
2185 OS << getID() << " = MemoryPhi(";
2186 for (const auto &Op : operands()) {
2187 BasicBlock *BB = getIncomingBlock(Op);
2188 MemoryAccess *MA = cast<MemoryAccess>(Op);
2189
2190 OS << LS << '{';
2191 if (BB->hasName())
2192 OS << BB->getName();
2193 else
2194 BB->printAsOperand(OS, false);
2195 OS << ',';
2196 if (unsigned ID = MA->getID())
2197 OS << ID;
2198 else
2199 OS << LiveOnEntryStr;
2200 OS << '}';
2201 }
2202 OS << ')';
2203 }
2204
print(raw_ostream & OS) const2205 void MemoryUse::print(raw_ostream &OS) const {
2206 MemoryAccess *UO = getDefiningAccess();
2207 OS << "MemoryUse(";
2208 if (UO && UO->getID())
2209 OS << UO->getID();
2210 else
2211 OS << LiveOnEntryStr;
2212 OS << ')';
2213 }
2214
dump() const2215 void MemoryAccess::dump() const {
2216 // Cannot completely remove virtual function even in release mode.
2217 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2218 print(dbgs());
2219 dbgs() << "\n";
2220 #endif
2221 }
2222
2223 char MemorySSAPrinterLegacyPass::ID = 0;
2224
MemorySSAPrinterLegacyPass()2225 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2226 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2227 }
2228
getAnalysisUsage(AnalysisUsage & AU) const2229 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2230 AU.setPreservesAll();
2231 AU.addRequired<MemorySSAWrapperPass>();
2232 }
2233
2234 class DOTFuncMSSAInfo {
2235 private:
2236 const Function &F;
2237 MemorySSAAnnotatedWriter MSSAWriter;
2238
2239 public:
DOTFuncMSSAInfo(const Function & F,MemorySSA & MSSA)2240 DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
2241 : F(F), MSSAWriter(&MSSA) {}
2242
getFunction()2243 const Function *getFunction() { return &F; }
getWriter()2244 MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }
2245 };
2246
2247 namespace llvm {
2248
2249 template <>
2250 struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> {
getEntryNodellvm::GraphTraits2251 static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) {
2252 return &(CFGInfo->getFunction()->getEntryBlock());
2253 }
2254
2255 // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2256 using nodes_iterator = pointer_iterator<Function::const_iterator>;
2257
nodes_beginllvm::GraphTraits2258 static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) {
2259 return nodes_iterator(CFGInfo->getFunction()->begin());
2260 }
2261
nodes_endllvm::GraphTraits2262 static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) {
2263 return nodes_iterator(CFGInfo->getFunction()->end());
2264 }
2265
sizellvm::GraphTraits2266 static size_t size(DOTFuncMSSAInfo *CFGInfo) {
2267 return CFGInfo->getFunction()->size();
2268 }
2269 };
2270
2271 template <>
2272 struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits {
2273
DOTGraphTraitsllvm::DOTGraphTraits2274 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
2275
getGraphNamellvm::DOTGraphTraits2276 static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) {
2277 return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
2278 "' function";
2279 }
2280
getNodeLabelllvm::DOTGraphTraits2281 std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) {
2282 return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(
2283 Node, nullptr,
2284 [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void {
2285 BB.print(OS, &CFGInfo->getWriter(), true, true);
2286 },
2287 [](std::string &S, unsigned &I, unsigned Idx) -> void {
2288 std::string Str = S.substr(I, Idx - I);
2289 StringRef SR = Str;
2290 if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||
2291 SR.count("MemoryUse("))
2292 return;
2293 DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx);
2294 });
2295 }
2296
getEdgeSourceLabelllvm::DOTGraphTraits2297 static std::string getEdgeSourceLabel(const BasicBlock *Node,
2298 const_succ_iterator I) {
2299 return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I);
2300 }
2301
2302 /// Display the raw branch weights from PGO.
getEdgeAttributesllvm::DOTGraphTraits2303 std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I,
2304 DOTFuncMSSAInfo *CFGInfo) {
2305 return "";
2306 }
2307
getNodeAttributesllvm::DOTGraphTraits2308 std::string getNodeAttributes(const BasicBlock *Node,
2309 DOTFuncMSSAInfo *CFGInfo) {
2310 return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos
2311 ? "style=filled, fillcolor=lightpink"
2312 : "";
2313 }
2314 };
2315
2316 } // namespace llvm
2317
runOnFunction(Function & F)2318 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2319 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2320 MSSA.ensureOptimizedUses();
2321 if (DotCFGMSSA != "") {
2322 DOTFuncMSSAInfo CFGInfo(F, MSSA);
2323 WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2324 } else
2325 MSSA.print(dbgs());
2326
2327 if (VerifyMemorySSA)
2328 MSSA.verifyMemorySSA();
2329 return false;
2330 }
2331
2332 AnalysisKey MemorySSAAnalysis::Key;
2333
run(Function & F,FunctionAnalysisManager & AM)2334 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2335 FunctionAnalysisManager &AM) {
2336 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2337 auto &AA = AM.getResult<AAManager>(F);
2338 return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2339 }
2340
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)2341 bool MemorySSAAnalysis::Result::invalidate(
2342 Function &F, const PreservedAnalyses &PA,
2343 FunctionAnalysisManager::Invalidator &Inv) {
2344 auto PAC = PA.getChecker<MemorySSAAnalysis>();
2345 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2346 Inv.invalidate<AAManager>(F, PA) ||
2347 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2348 }
2349
run(Function & F,FunctionAnalysisManager & AM)2350 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2351 FunctionAnalysisManager &AM) {
2352 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2353 MSSA.ensureOptimizedUses();
2354 if (DotCFGMSSA != "") {
2355 DOTFuncMSSAInfo CFGInfo(F, MSSA);
2356 WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2357 } else {
2358 OS << "MemorySSA for function: " << F.getName() << "\n";
2359 MSSA.print(OS);
2360 }
2361
2362 return PreservedAnalyses::all();
2363 }
2364
run(Function & F,FunctionAnalysisManager & AM)2365 PreservedAnalyses MemorySSAWalkerPrinterPass::run(Function &F,
2366 FunctionAnalysisManager &AM) {
2367 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2368 OS << "MemorySSA (walker) for function: " << F.getName() << "\n";
2369 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2370 F.print(OS, &Writer);
2371
2372 return PreservedAnalyses::all();
2373 }
2374
run(Function & F,FunctionAnalysisManager & AM)2375 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2376 FunctionAnalysisManager &AM) {
2377 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2378
2379 return PreservedAnalyses::all();
2380 }
2381
2382 char MemorySSAWrapperPass::ID = 0;
2383
MemorySSAWrapperPass()2384 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2385 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2386 }
2387
releaseMemory()2388 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2389
getAnalysisUsage(AnalysisUsage & AU) const2390 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2391 AU.setPreservesAll();
2392 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2393 AU.addRequiredTransitive<AAResultsWrapperPass>();
2394 }
2395
runOnFunction(Function & F)2396 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2397 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2398 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2399 MSSA.reset(new MemorySSA(F, &AA, &DT));
2400 return false;
2401 }
2402
verifyAnalysis() const2403 void MemorySSAWrapperPass::verifyAnalysis() const {
2404 if (VerifyMemorySSA)
2405 MSSA->verifyMemorySSA();
2406 }
2407
print(raw_ostream & OS,const Module * M) const2408 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2409 MSSA->print(OS);
2410 }
2411
MemorySSAWalker(MemorySSA * M)2412 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2413
2414 /// Walk the use-def chains starting at \p StartingAccess and find
2415 /// the MemoryAccess that actually clobbers Loc.
2416 ///
2417 /// \returns our clobbering memory access
getClobberingMemoryAccessBase(MemoryAccess * StartingAccess,const MemoryLocation & Loc,BatchAAResults & BAA,unsigned & UpwardWalkLimit)2418 MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2419 MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2420 BatchAAResults &BAA, unsigned &UpwardWalkLimit) {
2421 assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
2422
2423 Instruction *I = nullptr;
2424 if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2425 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2426 return StartingUseOrDef;
2427
2428 I = StartingUseOrDef->getMemoryInst();
2429
2430 // Conservatively, fences are always clobbers, so don't perform the walk if
2431 // we hit a fence.
2432 if (!isa<CallBase>(I) && I->isFenceLike())
2433 return StartingUseOrDef;
2434 }
2435
2436 UpwardsMemoryQuery Q;
2437 Q.OriginalAccess = StartingAccess;
2438 Q.StartingLoc = Loc;
2439 Q.Inst = nullptr;
2440 Q.IsCall = false;
2441
2442 // Unlike the other function, do not walk to the def of a def, because we are
2443 // handed something we already believe is the clobbering access.
2444 // We never set SkipSelf to true in Q in this method.
2445 MemoryAccess *Clobber =
2446 Walker.findClobber(BAA, StartingAccess, Q, UpwardWalkLimit);
2447 LLVM_DEBUG({
2448 dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
2449 if (I)
2450 dbgs() << " for instruction " << *I << "\n";
2451 dbgs() << " is " << *Clobber << "\n";
2452 });
2453 return Clobber;
2454 }
2455
2456 static const Instruction *
getInvariantGroupClobberingInstruction(Instruction & I,DominatorTree & DT)2457 getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT) {
2458 if (!I.hasMetadata(LLVMContext::MD_invariant_group) || I.isVolatile())
2459 return nullptr;
2460
2461 // We consider bitcasts and zero GEPs to be the same pointer value. Start by
2462 // stripping bitcasts and zero GEPs, then we will recursively look at loads
2463 // and stores through bitcasts and zero GEPs.
2464 Value *PointerOperand = getLoadStorePointerOperand(&I)->stripPointerCasts();
2465
2466 // It's not safe to walk the use list of a global value because function
2467 // passes aren't allowed to look outside their functions.
2468 // FIXME: this could be fixed by filtering instructions from outside of
2469 // current function.
2470 if (isa<Constant>(PointerOperand))
2471 return nullptr;
2472
2473 // Queue to process all pointers that are equivalent to load operand.
2474 SmallVector<const Value *, 8> PointerUsesQueue;
2475 PointerUsesQueue.push_back(PointerOperand);
2476
2477 const Instruction *MostDominatingInstruction = &I;
2478
2479 // FIXME: This loop is O(n^2) because dominates can be O(n) and in worst case
2480 // we will see all the instructions. It may not matter in practice. If it
2481 // does, we will have to support MemorySSA construction and updates.
2482 while (!PointerUsesQueue.empty()) {
2483 const Value *Ptr = PointerUsesQueue.pop_back_val();
2484 assert(Ptr && !isa<GlobalValue>(Ptr) &&
2485 "Null or GlobalValue should not be inserted");
2486
2487 for (const User *Us : Ptr->users()) {
2488 auto *U = dyn_cast<Instruction>(Us);
2489 if (!U || U == &I || !DT.dominates(U, MostDominatingInstruction))
2490 continue;
2491
2492 // Add bitcasts and zero GEPs to queue.
2493 if (isa<BitCastInst>(U)) {
2494 PointerUsesQueue.push_back(U);
2495 continue;
2496 }
2497 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
2498 if (GEP->hasAllZeroIndices())
2499 PointerUsesQueue.push_back(U);
2500 continue;
2501 }
2502
2503 // If we hit a load/store with an invariant.group metadata and the same
2504 // pointer operand, we can assume that value pointed to by the pointer
2505 // operand didn't change.
2506 if (U->hasMetadata(LLVMContext::MD_invariant_group) &&
2507 getLoadStorePointerOperand(U) == Ptr && !U->isVolatile()) {
2508 MostDominatingInstruction = U;
2509 }
2510 }
2511 }
2512 return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction;
2513 }
2514
getClobberingMemoryAccessBase(MemoryAccess * MA,BatchAAResults & BAA,unsigned & UpwardWalkLimit,bool SkipSelf,bool UseInvariantGroup)2515 MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2516 MemoryAccess *MA, BatchAAResults &BAA, unsigned &UpwardWalkLimit,
2517 bool SkipSelf, bool UseInvariantGroup) {
2518 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2519 // If this is a MemoryPhi, we can't do anything.
2520 if (!StartingAccess)
2521 return MA;
2522
2523 if (UseInvariantGroup) {
2524 if (auto *I = getInvariantGroupClobberingInstruction(
2525 *StartingAccess->getMemoryInst(), MSSA->getDomTree())) {
2526 assert(isa<LoadInst>(I) || isa<StoreInst>(I));
2527
2528 auto *ClobberMA = MSSA->getMemoryAccess(I);
2529 assert(ClobberMA);
2530 if (isa<MemoryUse>(ClobberMA))
2531 return ClobberMA->getDefiningAccess();
2532 return ClobberMA;
2533 }
2534 }
2535
2536 bool IsOptimized = false;
2537
2538 // If this is an already optimized use or def, return the optimized result.
2539 // Note: Currently, we store the optimized def result in a separate field,
2540 // since we can't use the defining access.
2541 if (StartingAccess->isOptimized()) {
2542 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2543 return StartingAccess->getOptimized();
2544 IsOptimized = true;
2545 }
2546
2547 const Instruction *I = StartingAccess->getMemoryInst();
2548 // We can't sanely do anything with a fence, since they conservatively clobber
2549 // all memory, and have no locations to get pointers from to try to
2550 // disambiguate.
2551 if (!isa<CallBase>(I) && I->isFenceLike())
2552 return StartingAccess;
2553
2554 UpwardsMemoryQuery Q(I, StartingAccess);
2555
2556 if (isUseTriviallyOptimizableToLiveOnEntry(BAA, I)) {
2557 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2558 StartingAccess->setOptimized(LiveOnEntry);
2559 return LiveOnEntry;
2560 }
2561
2562 MemoryAccess *OptimizedAccess;
2563 if (!IsOptimized) {
2564 // Start with the thing we already think clobbers this location
2565 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2566
2567 // At this point, DefiningAccess may be the live on entry def.
2568 // If it is, we will not get a better result.
2569 if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2570 StartingAccess->setOptimized(DefiningAccess);
2571 return DefiningAccess;
2572 }
2573
2574 OptimizedAccess =
2575 Walker.findClobber(BAA, DefiningAccess, Q, UpwardWalkLimit);
2576 StartingAccess->setOptimized(OptimizedAccess);
2577 } else
2578 OptimizedAccess = StartingAccess->getOptimized();
2579
2580 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2581 LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2582 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2583 LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2584
2585 MemoryAccess *Result;
2586 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2587 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2588 assert(isa<MemoryDef>(Q.OriginalAccess));
2589 Q.SkipSelfAccess = true;
2590 Result = Walker.findClobber(BAA, OptimizedAccess, Q, UpwardWalkLimit);
2591 } else
2592 Result = OptimizedAccess;
2593
2594 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2595 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2596
2597 return Result;
2598 }
2599
2600 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA,BatchAAResults &)2601 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA,
2602 BatchAAResults &) {
2603 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2604 return Use->getDefiningAccess();
2605 return MA;
2606 }
2607
getClobberingMemoryAccess(MemoryAccess * StartingAccess,const MemoryLocation &,BatchAAResults &)2608 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2609 MemoryAccess *StartingAccess, const MemoryLocation &, BatchAAResults &) {
2610 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2611 return Use->getDefiningAccess();
2612 return StartingAccess;
2613 }
2614
deleteMe(DerivedUser * Self)2615 void MemoryPhi::deleteMe(DerivedUser *Self) {
2616 delete static_cast<MemoryPhi *>(Self);
2617 }
2618
deleteMe(DerivedUser * Self)2619 void MemoryDef::deleteMe(DerivedUser *Self) {
2620 delete static_cast<MemoryDef *>(Self);
2621 }
2622
deleteMe(DerivedUser * Self)2623 void MemoryUse::deleteMe(DerivedUser *Self) {
2624 delete static_cast<MemoryUse *>(Self);
2625 }
2626
IsGuaranteedLoopInvariant(const Value * Ptr) const2627 bool upward_defs_iterator::IsGuaranteedLoopInvariant(const Value *Ptr) const {
2628 auto IsGuaranteedLoopInvariantBase = [](const Value *Ptr) {
2629 Ptr = Ptr->stripPointerCasts();
2630 if (!isa<Instruction>(Ptr))
2631 return true;
2632 return isa<AllocaInst>(Ptr);
2633 };
2634
2635 Ptr = Ptr->stripPointerCasts();
2636 if (auto *I = dyn_cast<Instruction>(Ptr)) {
2637 if (I->getParent()->isEntryBlock())
2638 return true;
2639 }
2640 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
2641 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
2642 GEP->hasAllConstantIndices();
2643 }
2644 return IsGuaranteedLoopInvariantBase(Ptr);
2645 }
2646