1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs global value numbering to eliminate fully redundant
10 // instructions.  It also performs simple dead load elimination.
11 //
12 // Note that this pass does the value numbering itself; it does not use the
13 // ValueNumbering analysis passes.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Scalar/GVN.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/MapVector.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/AliasAnalysis.h"
29 #include "llvm/Analysis/AssumeBundleQueries.h"
30 #include "llvm/Analysis/AssumptionCache.h"
31 #include "llvm/Analysis/CFG.h"
32 #include "llvm/Analysis/DomTreeUpdater.h"
33 #include "llvm/Analysis/GlobalsModRef.h"
34 #include "llvm/Analysis/InstructionPrecedenceTracking.h"
35 #include "llvm/Analysis/InstructionSimplify.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/MemoryBuiltins.h"
38 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
39 #include "llvm/Analysis/MemorySSA.h"
40 #include "llvm/Analysis/MemorySSAUpdater.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/PHITransAddr.h"
43 #include "llvm/Analysis/TargetLibraryInfo.h"
44 #include "llvm/Analysis/ValueTracking.h"
45 #include "llvm/IR/Attributes.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/IntrinsicInst.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/PassManager.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/Use.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/InitializePasses.h"
65 #include "llvm/Pass.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Compiler.h"
69 #include "llvm/Support/Debug.h"
70 #include "llvm/Support/raw_ostream.h"
71 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
72 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
73 #include "llvm/Transforms/Utils/Local.h"
74 #include "llvm/Transforms/Utils/SSAUpdater.h"
75 #include "llvm/Transforms/Utils/VNCoercion.h"
76 #include <algorithm>
77 #include <cassert>
78 #include <cstdint>
79 #include <optional>
80 #include <utility>
81 
82 using namespace llvm;
83 using namespace llvm::gvn;
84 using namespace llvm::VNCoercion;
85 using namespace PatternMatch;
86 
87 #define DEBUG_TYPE "gvn"
88 
89 STATISTIC(NumGVNInstr, "Number of instructions deleted");
90 STATISTIC(NumGVNLoad, "Number of loads deleted");
91 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
92 STATISTIC(NumGVNBlocks, "Number of blocks merged");
93 STATISTIC(NumGVNSimpl, "Number of instructions simplified");
94 STATISTIC(NumGVNEqProp, "Number of equalities propagated");
95 STATISTIC(NumPRELoad, "Number of loads PRE'd");
96 STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
97 STATISTIC(NumPRELoadMoved2CEPred,
98           "Number of loads moved to predecessor of a critical edge in PRE");
99 
100 STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
101           "Number of blocks speculated as available in "
102           "IsValueFullyAvailableInBlock(), max");
103 STATISTIC(MaxBBSpeculationCutoffReachedTimes,
104           "Number of times we we reached gvn-max-block-speculations cut-off "
105           "preventing further exploration");
106 
107 static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
108 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
109 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
110                                             cl::init(true));
111 static cl::opt<bool>
112 GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
113                                 cl::init(false));
114 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
115 
116 static cl::opt<uint32_t> MaxNumDeps(
117     "gvn-max-num-deps", cl::Hidden, cl::init(100),
118     cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
119 
120 // This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
121 static cl::opt<uint32_t> MaxBBSpeculations(
122     "gvn-max-block-speculations", cl::Hidden, cl::init(600),
123     cl::desc("Max number of blocks we're willing to speculate on (and recurse "
124              "into) when deducing if a value is fully available or not in GVN "
125              "(default = 600)"));
126 
127 static cl::opt<uint32_t> MaxNumVisitedInsts(
128     "gvn-max-num-visited-insts", cl::Hidden, cl::init(100),
129     cl::desc("Max number of visited instructions when trying to find "
130              "dominating value of select dependency (default = 100)"));
131 
132 static cl::opt<uint32_t> MaxNumInsnsPerBlock(
133     "gvn-max-num-insns", cl::Hidden, cl::init(100),
134     cl::desc("Max number of instructions to scan in each basic block in GVN "
135              "(default = 100)"));
136 
137 struct llvm::GVNPass::Expression {
138   uint32_t opcode;
139   bool commutative = false;
140   // The type is not necessarily the result type of the expression, it may be
141   // any additional type needed to disambiguate the expression.
142   Type *type = nullptr;
143   SmallVector<uint32_t, 4> varargs;
144 
145   Expression(uint32_t o = ~2U) : opcode(o) {}
146 
147   bool operator==(const Expression &other) const {
148     if (opcode != other.opcode)
149       return false;
150     if (opcode == ~0U || opcode == ~1U)
151       return true;
152     if (type != other.type)
153       return false;
154     if (varargs != other.varargs)
155       return false;
156     return true;
157   }
158 
159   friend hash_code hash_value(const Expression &Value) {
160     return hash_combine(
161         Value.opcode, Value.type,
162         hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
163   }
164 };
165 
166 namespace llvm {
167 
168 template <> struct DenseMapInfo<GVNPass::Expression> {
169   static inline GVNPass::Expression getEmptyKey() { return ~0U; }
170   static inline GVNPass::Expression getTombstoneKey() { return ~1U; }
171 
172   static unsigned getHashValue(const GVNPass::Expression &e) {
173     using llvm::hash_value;
174 
175     return static_cast<unsigned>(hash_value(e));
176   }
177 
178   static bool isEqual(const GVNPass::Expression &LHS,
179                       const GVNPass::Expression &RHS) {
180     return LHS == RHS;
181   }
182 };
183 
184 } // end namespace llvm
185 
186 /// Represents a particular available value that we know how to materialize.
187 /// Materialization of an AvailableValue never fails.  An AvailableValue is
188 /// implicitly associated with a rematerialization point which is the
189 /// location of the instruction from which it was formed.
190 struct llvm::gvn::AvailableValue {
191   enum class ValType {
192     SimpleVal, // A simple offsetted value that is accessed.
193     LoadVal,   // A value produced by a load.
194     MemIntrin, // A memory intrinsic which is loaded from.
195     UndefVal,  // A UndefValue representing a value from dead block (which
196                // is not yet physically removed from the CFG).
197     SelectVal, // A pointer select which is loaded from and for which the load
198                // can be replace by a value select.
199   };
200 
201   /// Val - The value that is live out of the block.
202   Value *Val;
203   /// Kind of the live-out value.
204   ValType Kind;
205 
206   /// Offset - The byte offset in Val that is interesting for the load query.
207   unsigned Offset = 0;
208   /// V1, V2 - The dominating non-clobbered values of SelectVal.
209   Value *V1 = nullptr, *V2 = nullptr;
210 
211   static AvailableValue get(Value *V, unsigned Offset = 0) {
212     AvailableValue Res;
213     Res.Val = V;
214     Res.Kind = ValType::SimpleVal;
215     Res.Offset = Offset;
216     return Res;
217   }
218 
219   static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
220     AvailableValue Res;
221     Res.Val = MI;
222     Res.Kind = ValType::MemIntrin;
223     Res.Offset = Offset;
224     return Res;
225   }
226 
227   static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) {
228     AvailableValue Res;
229     Res.Val = Load;
230     Res.Kind = ValType::LoadVal;
231     Res.Offset = Offset;
232     return Res;
233   }
234 
235   static AvailableValue getUndef() {
236     AvailableValue Res;
237     Res.Val = nullptr;
238     Res.Kind = ValType::UndefVal;
239     Res.Offset = 0;
240     return Res;
241   }
242 
243   static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2) {
244     AvailableValue Res;
245     Res.Val = Sel;
246     Res.Kind = ValType::SelectVal;
247     Res.Offset = 0;
248     Res.V1 = V1;
249     Res.V2 = V2;
250     return Res;
251   }
252 
253   bool isSimpleValue() const { return Kind == ValType::SimpleVal; }
254   bool isCoercedLoadValue() const { return Kind == ValType::LoadVal; }
255   bool isMemIntrinValue() const { return Kind == ValType::MemIntrin; }
256   bool isUndefValue() const { return Kind == ValType::UndefVal; }
257   bool isSelectValue() const { return Kind == ValType::SelectVal; }
258 
259   Value *getSimpleValue() const {
260     assert(isSimpleValue() && "Wrong accessor");
261     return Val;
262   }
263 
264   LoadInst *getCoercedLoadValue() const {
265     assert(isCoercedLoadValue() && "Wrong accessor");
266     return cast<LoadInst>(Val);
267   }
268 
269   MemIntrinsic *getMemIntrinValue() const {
270     assert(isMemIntrinValue() && "Wrong accessor");
271     return cast<MemIntrinsic>(Val);
272   }
273 
274   SelectInst *getSelectValue() const {
275     assert(isSelectValue() && "Wrong accessor");
276     return cast<SelectInst>(Val);
277   }
278 
279   /// Emit code at the specified insertion point to adjust the value defined
280   /// here to the specified type. This handles various coercion cases.
281   Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt,
282                                   GVNPass &gvn) const;
283 };
284 
285 /// Represents an AvailableValue which can be rematerialized at the end of
286 /// the associated BasicBlock.
287 struct llvm::gvn::AvailableValueInBlock {
288   /// BB - The basic block in question.
289   BasicBlock *BB = nullptr;
290 
291   /// AV - The actual available value
292   AvailableValue AV;
293 
294   static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
295     AvailableValueInBlock Res;
296     Res.BB = BB;
297     Res.AV = std::move(AV);
298     return Res;
299   }
300 
301   static AvailableValueInBlock get(BasicBlock *BB, Value *V,
302                                    unsigned Offset = 0) {
303     return get(BB, AvailableValue::get(V, Offset));
304   }
305 
306   static AvailableValueInBlock getUndef(BasicBlock *BB) {
307     return get(BB, AvailableValue::getUndef());
308   }
309 
310   static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel,
311                                          Value *V1, Value *V2) {
312     return get(BB, AvailableValue::getSelect(Sel, V1, V2));
313   }
314 
315   /// Emit code at the end of this block to adjust the value defined here to
316   /// the specified type. This handles various coercion cases.
317   Value *MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const {
318     return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
319   }
320 };
321 
322 //===----------------------------------------------------------------------===//
323 //                     ValueTable Internal Functions
324 //===----------------------------------------------------------------------===//
325 
326 GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) {
327   Expression e;
328   e.type = I->getType();
329   e.opcode = I->getOpcode();
330   if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
331     // gc.relocate is 'special' call: its second and third operands are
332     // not real values, but indices into statepoint's argument list.
333     // Use the refered to values for purposes of identity.
334     e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
335     e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
336     e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
337   } else {
338     for (Use &Op : I->operands())
339       e.varargs.push_back(lookupOrAdd(Op));
340   }
341   if (I->isCommutative()) {
342     // Ensure that commutative instructions that only differ by a permutation
343     // of their operands get the same value number by sorting the operand value
344     // numbers.  Since commutative operands are the 1st two operands it is more
345     // efficient to sort by hand rather than using, say, std::sort.
346     assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
347     if (e.varargs[0] > e.varargs[1])
348       std::swap(e.varargs[0], e.varargs[1]);
349     e.commutative = true;
350   }
351 
352   if (auto *C = dyn_cast<CmpInst>(I)) {
353     // Sort the operand value numbers so x<y and y>x get the same value number.
354     CmpInst::Predicate Predicate = C->getPredicate();
355     if (e.varargs[0] > e.varargs[1]) {
356       std::swap(e.varargs[0], e.varargs[1]);
357       Predicate = CmpInst::getSwappedPredicate(Predicate);
358     }
359     e.opcode = (C->getOpcode() << 8) | Predicate;
360     e.commutative = true;
361   } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
362     e.varargs.append(E->idx_begin(), E->idx_end());
363   } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
364     ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
365     e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
366   }
367 
368   return e;
369 }
370 
371 GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
372     unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) {
373   assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
374          "Not a comparison!");
375   Expression e;
376   e.type = CmpInst::makeCmpResultType(LHS->getType());
377   e.varargs.push_back(lookupOrAdd(LHS));
378   e.varargs.push_back(lookupOrAdd(RHS));
379 
380   // Sort the operand value numbers so x<y and y>x get the same value number.
381   if (e.varargs[0] > e.varargs[1]) {
382     std::swap(e.varargs[0], e.varargs[1]);
383     Predicate = CmpInst::getSwappedPredicate(Predicate);
384   }
385   e.opcode = (Opcode << 8) | Predicate;
386   e.commutative = true;
387   return e;
388 }
389 
390 GVNPass::Expression
391 GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
392   assert(EI && "Not an ExtractValueInst?");
393   Expression e;
394   e.type = EI->getType();
395   e.opcode = 0;
396 
397   WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
398   if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
399     // EI is an extract from one of our with.overflow intrinsics. Synthesize
400     // a semantically equivalent expression instead of an extract value
401     // expression.
402     e.opcode = WO->getBinaryOp();
403     e.varargs.push_back(lookupOrAdd(WO->getLHS()));
404     e.varargs.push_back(lookupOrAdd(WO->getRHS()));
405     return e;
406   }
407 
408   // Not a recognised intrinsic. Fall back to producing an extract value
409   // expression.
410   e.opcode = EI->getOpcode();
411   for (Use &Op : EI->operands())
412     e.varargs.push_back(lookupOrAdd(Op));
413 
414   append_range(e.varargs, EI->indices());
415 
416   return e;
417 }
418 
419 GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *GEP) {
420   Expression E;
421   Type *PtrTy = GEP->getType()->getScalarType();
422   const DataLayout &DL = GEP->getModule()->getDataLayout();
423   unsigned BitWidth = DL.getIndexTypeSizeInBits(PtrTy);
424   MapVector<Value *, APInt> VariableOffsets;
425   APInt ConstantOffset(BitWidth, 0);
426   if (GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
427     // Convert into offset representation, to recognize equivalent address
428     // calculations that use different type encoding.
429     LLVMContext &Context = GEP->getContext();
430     E.opcode = GEP->getOpcode();
431     E.type = nullptr;
432     E.varargs.push_back(lookupOrAdd(GEP->getPointerOperand()));
433     for (const auto &Pair : VariableOffsets) {
434       E.varargs.push_back(lookupOrAdd(Pair.first));
435       E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second)));
436     }
437     if (!ConstantOffset.isZero())
438       E.varargs.push_back(
439           lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
440   } else {
441     // If converting to offset representation fails (for scalable vectors),
442     // fall back to type-based implementation:
443     E.opcode = GEP->getOpcode();
444     E.type = GEP->getSourceElementType();
445     for (Use &Op : GEP->operands())
446       E.varargs.push_back(lookupOrAdd(Op));
447   }
448   return E;
449 }
450 
451 //===----------------------------------------------------------------------===//
452 //                     ValueTable External Functions
453 //===----------------------------------------------------------------------===//
454 
455 GVNPass::ValueTable::ValueTable() = default;
456 GVNPass::ValueTable::ValueTable(const ValueTable &) = default;
457 GVNPass::ValueTable::ValueTable(ValueTable &&) = default;
458 GVNPass::ValueTable::~ValueTable() = default;
459 GVNPass::ValueTable &
460 GVNPass::ValueTable::operator=(const GVNPass::ValueTable &Arg) = default;
461 
462 /// add - Insert a value into the table with a specified value number.
463 void GVNPass::ValueTable::add(Value *V, uint32_t num) {
464   valueNumbering.insert(std::make_pair(V, num));
465   if (PHINode *PN = dyn_cast<PHINode>(V))
466     NumberingPhi[num] = PN;
467 }
468 
469 uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
470   // FIXME: Currently the calls which may access the thread id may
471   // be considered as not accessing the memory. But this is
472   // problematic for coroutines, since coroutines may resume in a
473   // different thread. So we disable the optimization here for the
474   // correctness. However, it may block many other correct
475   // optimizations. Revert this one when we detect the memory
476   // accessing kind more precisely.
477   if (C->getFunction()->isPresplitCoroutine()) {
478     valueNumbering[C] = nextValueNumber;
479     return nextValueNumber++;
480   }
481 
482   // Do not combine convergent calls since they implicitly depend on the set of
483   // threads that is currently executing, and they might be in different basic
484   // blocks.
485   if (C->isConvergent()) {
486     valueNumbering[C] = nextValueNumber;
487     return nextValueNumber++;
488   }
489 
490   if (AA->doesNotAccessMemory(C)) {
491     Expression exp = createExpr(C);
492     uint32_t e = assignExpNewValueNum(exp).first;
493     valueNumbering[C] = e;
494     return e;
495   }
496 
497   if (MD && AA->onlyReadsMemory(C)) {
498     Expression exp = createExpr(C);
499     auto ValNum = assignExpNewValueNum(exp);
500     if (ValNum.second) {
501       valueNumbering[C] = ValNum.first;
502       return ValNum.first;
503     }
504 
505     MemDepResult local_dep = MD->getDependency(C);
506 
507     if (!local_dep.isDef() && !local_dep.isNonLocal()) {
508       valueNumbering[C] =  nextValueNumber;
509       return nextValueNumber++;
510     }
511 
512     if (local_dep.isDef()) {
513       // For masked load/store intrinsics, the local_dep may actually be
514       // a normal load or store instruction.
515       CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
516 
517       if (!local_cdep || local_cdep->arg_size() != C->arg_size()) {
518         valueNumbering[C] = nextValueNumber;
519         return nextValueNumber++;
520       }
521 
522       for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
523         uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
524         uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
525         if (c_vn != cd_vn) {
526           valueNumbering[C] = nextValueNumber;
527           return nextValueNumber++;
528         }
529       }
530 
531       uint32_t v = lookupOrAdd(local_cdep);
532       valueNumbering[C] = v;
533       return v;
534     }
535 
536     // Non-local case.
537     const MemoryDependenceResults::NonLocalDepInfo &deps =
538         MD->getNonLocalCallDependency(C);
539     // FIXME: Move the checking logic to MemDep!
540     CallInst* cdep = nullptr;
541 
542     // Check to see if we have a single dominating call instruction that is
543     // identical to C.
544     for (const NonLocalDepEntry &I : deps) {
545       if (I.getResult().isNonLocal())
546         continue;
547 
548       // We don't handle non-definitions.  If we already have a call, reject
549       // instruction dependencies.
550       if (!I.getResult().isDef() || cdep != nullptr) {
551         cdep = nullptr;
552         break;
553       }
554 
555       CallInst *NonLocalDepCall = dyn_cast<CallInst>(I.getResult().getInst());
556       // FIXME: All duplicated with non-local case.
557       if (NonLocalDepCall && DT->properlyDominates(I.getBB(), C->getParent())) {
558         cdep = NonLocalDepCall;
559         continue;
560       }
561 
562       cdep = nullptr;
563       break;
564     }
565 
566     if (!cdep) {
567       valueNumbering[C] = nextValueNumber;
568       return nextValueNumber++;
569     }
570 
571     if (cdep->arg_size() != C->arg_size()) {
572       valueNumbering[C] = nextValueNumber;
573       return nextValueNumber++;
574     }
575     for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
576       uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
577       uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
578       if (c_vn != cd_vn) {
579         valueNumbering[C] = nextValueNumber;
580         return nextValueNumber++;
581       }
582     }
583 
584     uint32_t v = lookupOrAdd(cdep);
585     valueNumbering[C] = v;
586     return v;
587   }
588 
589   valueNumbering[C] = nextValueNumber;
590   return nextValueNumber++;
591 }
592 
593 /// Returns true if a value number exists for the specified value.
594 bool GVNPass::ValueTable::exists(Value *V) const {
595   return valueNumbering.count(V) != 0;
596 }
597 
598 /// lookup_or_add - Returns the value number for the specified value, assigning
599 /// it a new number if it did not have one before.
600 uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) {
601   DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
602   if (VI != valueNumbering.end())
603     return VI->second;
604 
605   auto *I = dyn_cast<Instruction>(V);
606   if (!I) {
607     valueNumbering[V] = nextValueNumber;
608     return nextValueNumber++;
609   }
610 
611   Expression exp;
612   switch (I->getOpcode()) {
613     case Instruction::Call:
614       return lookupOrAddCall(cast<CallInst>(I));
615     case Instruction::FNeg:
616     case Instruction::Add:
617     case Instruction::FAdd:
618     case Instruction::Sub:
619     case Instruction::FSub:
620     case Instruction::Mul:
621     case Instruction::FMul:
622     case Instruction::UDiv:
623     case Instruction::SDiv:
624     case Instruction::FDiv:
625     case Instruction::URem:
626     case Instruction::SRem:
627     case Instruction::FRem:
628     case Instruction::Shl:
629     case Instruction::LShr:
630     case Instruction::AShr:
631     case Instruction::And:
632     case Instruction::Or:
633     case Instruction::Xor:
634     case Instruction::ICmp:
635     case Instruction::FCmp:
636     case Instruction::Trunc:
637     case Instruction::ZExt:
638     case Instruction::SExt:
639     case Instruction::FPToUI:
640     case Instruction::FPToSI:
641     case Instruction::UIToFP:
642     case Instruction::SIToFP:
643     case Instruction::FPTrunc:
644     case Instruction::FPExt:
645     case Instruction::PtrToInt:
646     case Instruction::IntToPtr:
647     case Instruction::AddrSpaceCast:
648     case Instruction::BitCast:
649     case Instruction::Select:
650     case Instruction::Freeze:
651     case Instruction::ExtractElement:
652     case Instruction::InsertElement:
653     case Instruction::ShuffleVector:
654     case Instruction::InsertValue:
655       exp = createExpr(I);
656       break;
657     case Instruction::GetElementPtr:
658       exp = createGEPExpr(cast<GetElementPtrInst>(I));
659       break;
660     case Instruction::ExtractValue:
661       exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
662       break;
663     case Instruction::PHI:
664       valueNumbering[V] = nextValueNumber;
665       NumberingPhi[nextValueNumber] = cast<PHINode>(V);
666       return nextValueNumber++;
667     default:
668       valueNumbering[V] = nextValueNumber;
669       return nextValueNumber++;
670   }
671 
672   uint32_t e = assignExpNewValueNum(exp).first;
673   valueNumbering[V] = e;
674   return e;
675 }
676 
677 /// Returns the value number of the specified value. Fails if
678 /// the value has not yet been numbered.
679 uint32_t GVNPass::ValueTable::lookup(Value *V, bool Verify) const {
680   DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
681   if (Verify) {
682     assert(VI != valueNumbering.end() && "Value not numbered?");
683     return VI->second;
684   }
685   return (VI != valueNumbering.end()) ? VI->second : 0;
686 }
687 
688 /// Returns the value number of the given comparison,
689 /// assigning it a new number if it did not have one before.  Useful when
690 /// we deduced the result of a comparison, but don't immediately have an
691 /// instruction realizing that comparison to hand.
692 uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode,
693                                              CmpInst::Predicate Predicate,
694                                              Value *LHS, Value *RHS) {
695   Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
696   return assignExpNewValueNum(exp).first;
697 }
698 
699 /// Remove all entries from the ValueTable.
700 void GVNPass::ValueTable::clear() {
701   valueNumbering.clear();
702   expressionNumbering.clear();
703   NumberingPhi.clear();
704   PhiTranslateTable.clear();
705   nextValueNumber = 1;
706   Expressions.clear();
707   ExprIdx.clear();
708   nextExprNumber = 0;
709 }
710 
711 /// Remove a value from the value numbering.
712 void GVNPass::ValueTable::erase(Value *V) {
713   uint32_t Num = valueNumbering.lookup(V);
714   valueNumbering.erase(V);
715   // If V is PHINode, V <--> value number is an one-to-one mapping.
716   if (isa<PHINode>(V))
717     NumberingPhi.erase(Num);
718 }
719 
720 /// verifyRemoved - Verify that the value is removed from all internal data
721 /// structures.
722 void GVNPass::ValueTable::verifyRemoved(const Value *V) const {
723   assert(!valueNumbering.contains(V) &&
724          "Inst still occurs in value numbering map!");
725 }
726 
727 //===----------------------------------------------------------------------===//
728 //                                GVN Pass
729 //===----------------------------------------------------------------------===//
730 
731 bool GVNPass::isPREEnabled() const {
732   return Options.AllowPRE.value_or(GVNEnablePRE);
733 }
734 
735 bool GVNPass::isLoadPREEnabled() const {
736   return Options.AllowLoadPRE.value_or(GVNEnableLoadPRE);
737 }
738 
739 bool GVNPass::isLoadInLoopPREEnabled() const {
740   return Options.AllowLoadInLoopPRE.value_or(GVNEnableLoadInLoopPRE);
741 }
742 
743 bool GVNPass::isLoadPRESplitBackedgeEnabled() const {
744   return Options.AllowLoadPRESplitBackedge.value_or(
745       GVNEnableSplitBackedgeInLoadPRE);
746 }
747 
748 bool GVNPass::isMemDepEnabled() const {
749   return Options.AllowMemDep.value_or(GVNEnableMemDep);
750 }
751 
752 PreservedAnalyses GVNPass::run(Function &F, FunctionAnalysisManager &AM) {
753   // FIXME: The order of evaluation of these 'getResult' calls is very
754   // significant! Re-ordering these variables will cause GVN when run alone to
755   // be less effective! We should fix memdep and basic-aa to not exhibit this
756   // behavior, but until then don't change the order here.
757   auto &AC = AM.getResult<AssumptionAnalysis>(F);
758   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
759   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
760   auto &AA = AM.getResult<AAManager>(F);
761   auto *MemDep =
762       isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr;
763   auto *LI = AM.getCachedResult<LoopAnalysis>(F);
764   auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
765   auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
766   bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
767                          MSSA ? &MSSA->getMSSA() : nullptr);
768   if (!Changed)
769     return PreservedAnalyses::all();
770   PreservedAnalyses PA;
771   PA.preserve<DominatorTreeAnalysis>();
772   PA.preserve<TargetLibraryAnalysis>();
773   if (MSSA)
774     PA.preserve<MemorySSAAnalysis>();
775   if (LI)
776     PA.preserve<LoopAnalysis>();
777   return PA;
778 }
779 
780 void GVNPass::printPipeline(
781     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
782   static_cast<PassInfoMixin<GVNPass> *>(this)->printPipeline(
783       OS, MapClassName2PassName);
784 
785   OS << '<';
786   if (Options.AllowPRE != std::nullopt)
787     OS << (*Options.AllowPRE ? "" : "no-") << "pre;";
788   if (Options.AllowLoadPRE != std::nullopt)
789     OS << (*Options.AllowLoadPRE ? "" : "no-") << "load-pre;";
790   if (Options.AllowLoadPRESplitBackedge != std::nullopt)
791     OS << (*Options.AllowLoadPRESplitBackedge ? "" : "no-")
792        << "split-backedge-load-pre;";
793   if (Options.AllowMemDep != std::nullopt)
794     OS << (*Options.AllowMemDep ? "" : "no-") << "memdep";
795   OS << '>';
796 }
797 
798 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
799 LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &d) const {
800   errs() << "{\n";
801   for (auto &I : d) {
802     errs() << I.first << "\n";
803     I.second->dump();
804   }
805   errs() << "}\n";
806 }
807 #endif
808 
809 enum class AvailabilityState : char {
810   /// We know the block *is not* fully available. This is a fixpoint.
811   Unavailable = 0,
812   /// We know the block *is* fully available. This is a fixpoint.
813   Available = 1,
814   /// We do not know whether the block is fully available or not,
815   /// but we are currently speculating that it will be.
816   /// If it would have turned out that the block was, in fact, not fully
817   /// available, this would have been cleaned up into an Unavailable.
818   SpeculativelyAvailable = 2,
819 };
820 
821 /// Return true if we can prove that the value
822 /// we're analyzing is fully available in the specified block.  As we go, keep
823 /// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
824 /// map is actually a tri-state map with the following values:
825 ///   0) we know the block *is not* fully available.
826 ///   1) we know the block *is* fully available.
827 ///   2) we do not know whether the block is fully available or not, but we are
828 ///      currently speculating that it will be.
829 static bool IsValueFullyAvailableInBlock(
830     BasicBlock *BB,
831     DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
832   SmallVector<BasicBlock *, 32> Worklist;
833   std::optional<BasicBlock *> UnavailableBB;
834 
835   // The number of times we didn't find an entry for a block in a map and
836   // optimistically inserted an entry marking block as speculatively available.
837   unsigned NumNewNewSpeculativelyAvailableBBs = 0;
838 
839 #ifndef NDEBUG
840   SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
841   SmallVector<BasicBlock *, 32> AvailableBBs;
842 #endif
843 
844   Worklist.emplace_back(BB);
845   while (!Worklist.empty()) {
846     BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first!
847     // Optimistically assume that the block is Speculatively Available and check
848     // to see if we already know about this block in one lookup.
849     std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
850         FullyAvailableBlocks.try_emplace(
851             CurrBB, AvailabilityState::SpeculativelyAvailable);
852     AvailabilityState &State = IV.first->second;
853 
854     // Did the entry already exist for this block?
855     if (!IV.second) {
856       if (State == AvailabilityState::Unavailable) {
857         UnavailableBB = CurrBB;
858         break; // Backpropagate unavailability info.
859       }
860 
861 #ifndef NDEBUG
862       AvailableBBs.emplace_back(CurrBB);
863 #endif
864       continue; // Don't recurse further, but continue processing worklist.
865     }
866 
867     // No entry found for block.
868     ++NumNewNewSpeculativelyAvailableBBs;
869     bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
870 
871     // If we have exhausted our budget, mark this block as unavailable.
872     // Also, if this block has no predecessors, the value isn't live-in here.
873     if (OutOfBudget || pred_empty(CurrBB)) {
874       MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
875       State = AvailabilityState::Unavailable;
876       UnavailableBB = CurrBB;
877       break; // Backpropagate unavailability info.
878     }
879 
880     // Tentatively consider this block as speculatively available.
881 #ifndef NDEBUG
882     NewSpeculativelyAvailableBBs.insert(CurrBB);
883 #endif
884     // And further recurse into block's predecessors, in depth-first order!
885     Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
886   }
887 
888 #if LLVM_ENABLE_STATS
889   IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
890       NumNewNewSpeculativelyAvailableBBs);
891 #endif
892 
893   // If the block isn't marked as fixpoint yet
894   // (the Unavailable and Available states are fixpoints)
895   auto MarkAsFixpointAndEnqueueSuccessors =
896       [&](BasicBlock *BB, AvailabilityState FixpointState) {
897         auto It = FullyAvailableBlocks.find(BB);
898         if (It == FullyAvailableBlocks.end())
899           return; // Never queried this block, leave as-is.
900         switch (AvailabilityState &State = It->second) {
901         case AvailabilityState::Unavailable:
902         case AvailabilityState::Available:
903           return; // Don't backpropagate further, continue processing worklist.
904         case AvailabilityState::SpeculativelyAvailable: // Fix it!
905           State = FixpointState;
906 #ifndef NDEBUG
907           assert(NewSpeculativelyAvailableBBs.erase(BB) &&
908                  "Found a speculatively available successor leftover?");
909 #endif
910           // Queue successors for further processing.
911           Worklist.append(succ_begin(BB), succ_end(BB));
912           return;
913         }
914       };
915 
916   if (UnavailableBB) {
917     // Okay, we have encountered an unavailable block.
918     // Mark speculatively available blocks reachable from UnavailableBB as
919     // unavailable as well. Paths are terminated when they reach blocks not in
920     // FullyAvailableBlocks or they are not marked as speculatively available.
921     Worklist.clear();
922     Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
923     while (!Worklist.empty())
924       MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
925                                          AvailabilityState::Unavailable);
926   }
927 
928 #ifndef NDEBUG
929   Worklist.clear();
930   for (BasicBlock *AvailableBB : AvailableBBs)
931     Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
932   while (!Worklist.empty())
933     MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
934                                        AvailabilityState::Available);
935 
936   assert(NewSpeculativelyAvailableBBs.empty() &&
937          "Must have fixed all the new speculatively available blocks.");
938 #endif
939 
940   return !UnavailableBB;
941 }
942 
943 /// If the specified OldValue exists in ValuesPerBlock, replace its value with
944 /// NewValue.
945 static void replaceValuesPerBlockEntry(
946     SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, Value *OldValue,
947     Value *NewValue) {
948   for (AvailableValueInBlock &V : ValuesPerBlock) {
949     if ((V.AV.isSimpleValue() && V.AV.getSimpleValue() == OldValue) ||
950        (V.AV.isCoercedLoadValue() && V.AV.getCoercedLoadValue() == OldValue))
951       V = AvailableValueInBlock::get(V.BB, NewValue);
952   }
953 }
954 
955 /// Given a set of loads specified by ValuesPerBlock,
956 /// construct SSA form, allowing us to eliminate Load.  This returns the value
957 /// that should be used at Load's definition site.
958 static Value *
959 ConstructSSAForLoadSet(LoadInst *Load,
960                        SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
961                        GVNPass &gvn) {
962   // Check for the fully redundant, dominating load case.  In this case, we can
963   // just use the dominating value directly.
964   if (ValuesPerBlock.size() == 1 &&
965       gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
966                                                Load->getParent())) {
967     assert(!ValuesPerBlock[0].AV.isUndefValue() &&
968            "Dead BB dominate this block");
969     return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
970   }
971 
972   // Otherwise, we have to construct SSA form.
973   SmallVector<PHINode*, 8> NewPHIs;
974   SSAUpdater SSAUpdate(&NewPHIs);
975   SSAUpdate.Initialize(Load->getType(), Load->getName());
976 
977   for (const AvailableValueInBlock &AV : ValuesPerBlock) {
978     BasicBlock *BB = AV.BB;
979 
980     if (AV.AV.isUndefValue())
981       continue;
982 
983     if (SSAUpdate.HasValueForBlock(BB))
984       continue;
985 
986     // If the value is the load that we will be eliminating, and the block it's
987     // available in is the block that the load is in, then don't add it as
988     // SSAUpdater will resolve the value to the relevant phi which may let it
989     // avoid phi construction entirely if there's actually only one value.
990     if (BB == Load->getParent() &&
991         ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
992          (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
993       continue;
994 
995     SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
996   }
997 
998   // Perform PHI construction.
999   return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent());
1000 }
1001 
1002 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load,
1003                                                 Instruction *InsertPt,
1004                                                 GVNPass &gvn) const {
1005   Value *Res;
1006   Type *LoadTy = Load->getType();
1007   const DataLayout &DL = Load->getModule()->getDataLayout();
1008   if (isSimpleValue()) {
1009     Res = getSimpleValue();
1010     if (Res->getType() != LoadTy) {
1011       Res = getValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
1012 
1013       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
1014                         << "  " << *getSimpleValue() << '\n'
1015                         << *Res << '\n'
1016                         << "\n\n\n");
1017     }
1018   } else if (isCoercedLoadValue()) {
1019     LoadInst *CoercedLoad = getCoercedLoadValue();
1020     if (CoercedLoad->getType() == LoadTy && Offset == 0) {
1021       Res = CoercedLoad;
1022       combineMetadataForCSE(CoercedLoad, Load, false);
1023     } else {
1024       Res = getValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL);
1025       // We are adding a new user for this load, for which the original
1026       // metadata may not hold. Additionally, the new load may have a different
1027       // size and type, so their metadata cannot be combined in any
1028       // straightforward way.
1029       // Drop all metadata that is not known to cause immediate UB on violation,
1030       // unless the load has !noundef, in which case all metadata violations
1031       // will be promoted to UB.
1032       // TODO: We can combine noalias/alias.scope metadata here, because it is
1033       // independent of the load type.
1034       if (!CoercedLoad->hasMetadata(LLVMContext::MD_noundef))
1035         CoercedLoad->dropUnknownNonDebugMetadata(
1036             {LLVMContext::MD_dereferenceable,
1037              LLVMContext::MD_dereferenceable_or_null,
1038              LLVMContext::MD_invariant_load, LLVMContext::MD_invariant_group});
1039       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
1040                         << "  " << *getCoercedLoadValue() << '\n'
1041                         << *Res << '\n'
1042                         << "\n\n\n");
1043     }
1044   } else if (isMemIntrinValue()) {
1045     Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
1046                                  InsertPt, DL);
1047     LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1048                       << "  " << *getMemIntrinValue() << '\n'
1049                       << *Res << '\n'
1050                       << "\n\n\n");
1051   } else if (isSelectValue()) {
1052     // Introduce a new value select for a load from an eligible pointer select.
1053     SelectInst *Sel = getSelectValue();
1054     assert(V1 && V2 && "both value operands of the select must be present");
1055     Res = SelectInst::Create(Sel->getCondition(), V1, V2, "", Sel);
1056   } else {
1057     llvm_unreachable("Should not materialize value from dead block");
1058   }
1059   assert(Res && "failed to materialize?");
1060   return Res;
1061 }
1062 
1063 static bool isLifetimeStart(const Instruction *Inst) {
1064   if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1065     return II->getIntrinsicID() == Intrinsic::lifetime_start;
1066   return false;
1067 }
1068 
1069 /// Assuming To can be reached from both From and Between, does Between lie on
1070 /// every path from From to To?
1071 static bool liesBetween(const Instruction *From, Instruction *Between,
1072                         const Instruction *To, DominatorTree *DT) {
1073   if (From->getParent() == Between->getParent())
1074     return DT->dominates(From, Between);
1075   SmallSet<BasicBlock *, 1> Exclusion;
1076   Exclusion.insert(Between->getParent());
1077   return !isPotentiallyReachable(From, To, &Exclusion, DT);
1078 }
1079 
1080 /// Try to locate the three instruction involved in a missed
1081 /// load-elimination case that is due to an intervening store.
1082 static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo,
1083                                    DominatorTree *DT,
1084                                    OptimizationRemarkEmitter *ORE) {
1085   using namespace ore;
1086 
1087   Instruction *OtherAccess = nullptr;
1088 
1089   OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load);
1090   R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
1091     << setExtraArgs();
1092 
1093   for (auto *U : Load->getPointerOperand()->users()) {
1094     if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1095       auto *I = cast<Instruction>(U);
1096       if (I->getFunction() == Load->getFunction() && DT->dominates(I, Load)) {
1097         // Use the most immediately dominating value
1098         if (OtherAccess) {
1099           if (DT->dominates(OtherAccess, I))
1100             OtherAccess = I;
1101           else
1102             assert(U == OtherAccess || DT->dominates(I, OtherAccess));
1103         } else
1104           OtherAccess = I;
1105       }
1106     }
1107   }
1108 
1109   if (!OtherAccess) {
1110     // There is no dominating use, check if we can find a closest non-dominating
1111     // use that lies between any other potentially available use and Load.
1112     for (auto *U : Load->getPointerOperand()->users()) {
1113       if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1114         auto *I = cast<Instruction>(U);
1115         if (I->getFunction() == Load->getFunction() &&
1116             isPotentiallyReachable(I, Load, nullptr, DT)) {
1117           if (OtherAccess) {
1118             if (liesBetween(OtherAccess, I, Load, DT)) {
1119               OtherAccess = I;
1120             } else if (!liesBetween(I, OtherAccess, Load, DT)) {
1121               // These uses are both partially available at Load were it not for
1122               // the clobber, but neither lies strictly after the other.
1123               OtherAccess = nullptr;
1124               break;
1125             } // else: keep current OtherAccess since it lies between U and Load
1126           } else {
1127             OtherAccess = I;
1128           }
1129         }
1130       }
1131     }
1132   }
1133 
1134   if (OtherAccess)
1135     R << " in favor of " << NV("OtherAccess", OtherAccess);
1136 
1137   R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1138 
1139   ORE->emit(R);
1140 }
1141 
1142 // Find non-clobbered value for Loc memory location in extended basic block
1143 // (chain of basic blocks with single predecessors) starting From instruction.
1144 static Value *findDominatingValue(const MemoryLocation &Loc, Type *LoadTy,
1145                                   Instruction *From, AAResults *AA) {
1146   uint32_t NumVisitedInsts = 0;
1147   BasicBlock *FromBB = From->getParent();
1148   BatchAAResults BatchAA(*AA);
1149   for (BasicBlock *BB = FromBB; BB; BB = BB->getSinglePredecessor())
1150     for (auto I = BB == FromBB ? From->getReverseIterator() : BB->rbegin(),
1151               E = BB->rend();
1152          I != E; ++I) {
1153       // Stop the search if limit is reached.
1154       if (++NumVisitedInsts > MaxNumVisitedInsts)
1155         return nullptr;
1156       Instruction *Inst = &*I;
1157       if (isModSet(BatchAA.getModRefInfo(Inst, Loc)))
1158         return nullptr;
1159       if (auto *LI = dyn_cast<LoadInst>(Inst))
1160         if (LI->getPointerOperand() == Loc.Ptr && LI->getType() == LoadTy)
1161           return LI;
1162     }
1163   return nullptr;
1164 }
1165 
1166 std::optional<AvailableValue>
1167 GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1168                                  Value *Address) {
1169   assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1170   assert(DepInfo.isLocal() && "expected a local dependence");
1171 
1172   Instruction *DepInst = DepInfo.getInst();
1173 
1174   const DataLayout &DL = Load->getModule()->getDataLayout();
1175   if (DepInfo.isClobber()) {
1176     // If the dependence is to a store that writes to a superset of the bits
1177     // read by the load, we can extract the bits we need for the load from the
1178     // stored value.
1179     if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1180       // Can't forward from non-atomic to atomic without violating memory model.
1181       if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1182         int Offset =
1183             analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL);
1184         if (Offset != -1)
1185           return AvailableValue::get(DepSI->getValueOperand(), Offset);
1186       }
1187     }
1188 
1189     // Check to see if we have something like this:
1190     //    load i32* P
1191     //    load i8* (P+1)
1192     // if we have this, replace the later with an extraction from the former.
1193     if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1194       // If this is a clobber and L is the first instruction in its block, then
1195       // we have the first instruction in the entry block.
1196       // Can't forward from non-atomic to atomic without violating memory model.
1197       if (DepLoad != Load && Address &&
1198           Load->isAtomic() <= DepLoad->isAtomic()) {
1199         Type *LoadType = Load->getType();
1200         int Offset = -1;
1201 
1202         // If MD reported clobber, check it was nested.
1203         if (DepInfo.isClobber() &&
1204             canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) {
1205           const auto ClobberOff = MD->getClobberOffset(DepLoad);
1206           // GVN has no deal with a negative offset.
1207           Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1208                        ? -1
1209                        : *ClobberOff;
1210         }
1211         if (Offset == -1)
1212           Offset =
1213               analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL);
1214         if (Offset != -1)
1215           return AvailableValue::getLoad(DepLoad, Offset);
1216       }
1217     }
1218 
1219     // If the clobbering value is a memset/memcpy/memmove, see if we can
1220     // forward a value on from it.
1221     if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1222       if (Address && !Load->isAtomic()) {
1223         int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address,
1224                                                       DepMI, DL);
1225         if (Offset != -1)
1226           return AvailableValue::getMI(DepMI, Offset);
1227       }
1228     }
1229 
1230     // Nothing known about this clobber, have to be conservative
1231     LLVM_DEBUG(
1232         // fast print dep, using operator<< on instruction is too slow.
1233         dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1234         dbgs() << " is clobbered by " << *DepInst << '\n';);
1235     if (ORE->allowExtraAnalysis(DEBUG_TYPE))
1236       reportMayClobberedLoad(Load, DepInfo, DT, ORE);
1237 
1238     return std::nullopt;
1239   }
1240   assert(DepInfo.isDef() && "follows from above");
1241 
1242   // Loading the alloca -> undef.
1243   // Loading immediately after lifetime begin -> undef.
1244   if (isa<AllocaInst>(DepInst) || isLifetimeStart(DepInst))
1245     return AvailableValue::get(UndefValue::get(Load->getType()));
1246 
1247   if (Constant *InitVal =
1248           getInitialValueOfAllocation(DepInst, TLI, Load->getType()))
1249     return AvailableValue::get(InitVal);
1250 
1251   if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1252     // Reject loads and stores that are to the same address but are of
1253     // different types if we have to. If the stored value is convertable to
1254     // the loaded value, we can reuse it.
1255     if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(),
1256                                          DL))
1257       return std::nullopt;
1258 
1259     // Can't forward from non-atomic to atomic without violating memory model.
1260     if (S->isAtomic() < Load->isAtomic())
1261       return std::nullopt;
1262 
1263     return AvailableValue::get(S->getValueOperand());
1264   }
1265 
1266   if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1267     // If the types mismatch and we can't handle it, reject reuse of the load.
1268     // If the stored value is larger or equal to the loaded value, we can reuse
1269     // it.
1270     if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(), DL))
1271       return std::nullopt;
1272 
1273     // Can't forward from non-atomic to atomic without violating memory model.
1274     if (LD->isAtomic() < Load->isAtomic())
1275       return std::nullopt;
1276 
1277     return AvailableValue::getLoad(LD);
1278   }
1279 
1280   // Check if load with Addr dependent from select can be converted to select
1281   // between load values. There must be no instructions between the found
1282   // loads and DepInst that may clobber the loads.
1283   if (auto *Sel = dyn_cast<SelectInst>(DepInst)) {
1284     assert(Sel->getType() == Load->getPointerOperandType());
1285     auto Loc = MemoryLocation::get(Load);
1286     Value *V1 =
1287         findDominatingValue(Loc.getWithNewPtr(Sel->getTrueValue()),
1288                             Load->getType(), DepInst, getAliasAnalysis());
1289     if (!V1)
1290       return std::nullopt;
1291     Value *V2 =
1292         findDominatingValue(Loc.getWithNewPtr(Sel->getFalseValue()),
1293                             Load->getType(), DepInst, getAliasAnalysis());
1294     if (!V2)
1295       return std::nullopt;
1296     return AvailableValue::getSelect(Sel, V1, V2);
1297   }
1298 
1299   // Unknown def - must be conservative
1300   LLVM_DEBUG(
1301       // fast print dep, using operator<< on instruction is too slow.
1302       dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1303       dbgs() << " has unknown def " << *DepInst << '\n';);
1304   return std::nullopt;
1305 }
1306 
1307 void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1308                                       AvailValInBlkVect &ValuesPerBlock,
1309                                       UnavailBlkVect &UnavailableBlocks) {
1310   // Filter out useless results (non-locals, etc).  Keep track of the blocks
1311   // where we have a value available in repl, also keep track of whether we see
1312   // dependencies that produce an unknown value for the load (such as a call
1313   // that could potentially clobber the load).
1314   for (const auto &Dep : Deps) {
1315     BasicBlock *DepBB = Dep.getBB();
1316     MemDepResult DepInfo = Dep.getResult();
1317 
1318     if (DeadBlocks.count(DepBB)) {
1319       // Dead dependent mem-op disguise as a load evaluating the same value
1320       // as the load in question.
1321       ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1322       continue;
1323     }
1324 
1325     if (!DepInfo.isLocal()) {
1326       UnavailableBlocks.push_back(DepBB);
1327       continue;
1328     }
1329 
1330     // The address being loaded in this non-local block may not be the same as
1331     // the pointer operand of the load if PHI translation occurs.  Make sure
1332     // to consider the right address.
1333     if (auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1334       // subtlety: because we know this was a non-local dependency, we know
1335       // it's safe to materialize anywhere between the instruction within
1336       // DepInfo and the end of it's block.
1337       ValuesPerBlock.push_back(
1338           AvailableValueInBlock::get(DepBB, std::move(*AV)));
1339     } else {
1340       UnavailableBlocks.push_back(DepBB);
1341     }
1342   }
1343 
1344   assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1345          "post condition violation");
1346 }
1347 
1348 /// Given the following code, v1 is partially available on some edges, but not
1349 /// available on the edge from PredBB. This function tries to find if there is
1350 /// another identical load in the other successor of PredBB.
1351 ///
1352 ///      v0 = load %addr
1353 ///      br %LoadBB
1354 ///
1355 ///   LoadBB:
1356 ///      v1 = load %addr
1357 ///      ...
1358 ///
1359 ///   PredBB:
1360 ///      ...
1361 ///      br %cond, label %LoadBB, label %SuccBB
1362 ///
1363 ///   SuccBB:
1364 ///      v2 = load %addr
1365 ///      ...
1366 ///
1367 LoadInst *GVNPass::findLoadToHoistIntoPred(BasicBlock *Pred, BasicBlock *LoadBB,
1368                                            LoadInst *Load) {
1369   // For simplicity we handle a Pred has 2 successors only.
1370   auto *Term = Pred->getTerminator();
1371   if (Term->getNumSuccessors() != 2 || Term->isExceptionalTerminator())
1372     return nullptr;
1373   auto *SuccBB = Term->getSuccessor(0);
1374   if (SuccBB == LoadBB)
1375     SuccBB = Term->getSuccessor(1);
1376   if (!SuccBB->getSinglePredecessor())
1377     return nullptr;
1378 
1379   unsigned int NumInsts = MaxNumInsnsPerBlock;
1380   for (Instruction &Inst : *SuccBB) {
1381     if (Inst.isDebugOrPseudoInst())
1382       continue;
1383     if (--NumInsts == 0)
1384       return nullptr;
1385 
1386     if (!Inst.isIdenticalTo(Load))
1387       continue;
1388 
1389     MemDepResult Dep = MD->getDependency(&Inst);
1390     // If an identical load doesn't depends on any local instructions, it can
1391     // be safely moved to PredBB.
1392     // Also check for the implicit control flow instructions. See the comments
1393     // in PerformLoadPRE for details.
1394     if (Dep.isNonLocal() && !ICF->isDominatedByICFIFromSameBlock(&Inst))
1395       return cast<LoadInst>(&Inst);
1396 
1397     // Otherwise there is something in the same BB clobbers the memory, we can't
1398     // move this and later load to PredBB.
1399     return nullptr;
1400   }
1401 
1402   return nullptr;
1403 }
1404 
1405 void GVNPass::eliminatePartiallyRedundantLoad(
1406     LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1407     MapVector<BasicBlock *, Value *> &AvailableLoads,
1408     MapVector<BasicBlock *, LoadInst *> *CriticalEdgePredAndLoad) {
1409   for (const auto &AvailableLoad : AvailableLoads) {
1410     BasicBlock *UnavailableBlock = AvailableLoad.first;
1411     Value *LoadPtr = AvailableLoad.second;
1412 
1413     auto *NewLoad =
1414         new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre",
1415                      Load->isVolatile(), Load->getAlign(), Load->getOrdering(),
1416                      Load->getSyncScopeID(), UnavailableBlock->getTerminator());
1417     NewLoad->setDebugLoc(Load->getDebugLoc());
1418     if (MSSAU) {
1419       auto *MSSA = MSSAU->getMemorySSA();
1420       // Get the defining access of the original load or use the load if it is a
1421       // MemoryDef (e.g. because it is volatile). The inserted loads are
1422       // guaranteed to load from the same definition.
1423       auto *LoadAcc = MSSA->getMemoryAccess(Load);
1424       auto *DefiningAcc =
1425           isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess();
1426       auto *NewAccess = MSSAU->createMemoryAccessInBB(
1427           NewLoad, DefiningAcc, NewLoad->getParent(),
1428           MemorySSA::BeforeTerminator);
1429       if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1430         MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1431       else
1432         MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1433     }
1434 
1435     // Transfer the old load's AA tags to the new load.
1436     AAMDNodes Tags = Load->getAAMetadata();
1437     if (Tags)
1438       NewLoad->setAAMetadata(Tags);
1439 
1440     if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1441       NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1442     if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1443       NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1444     if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1445       NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1446     if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1447       if (LI &&
1448           LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1449         NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1450 
1451     // We do not propagate the old load's debug location, because the new
1452     // load now lives in a different BB, and we want to avoid a jumpy line
1453     // table.
1454     // FIXME: How do we retain source locations without causing poor debugging
1455     // behavior?
1456 
1457     // Add the newly created load.
1458     ValuesPerBlock.push_back(
1459         AvailableValueInBlock::get(UnavailableBlock, NewLoad));
1460     MD->invalidateCachedPointerInfo(LoadPtr);
1461     LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1462 
1463     // For PredBB in CriticalEdgePredAndLoad we need to replace the uses of old
1464     // load instruction with the new created load instruction.
1465     if (CriticalEdgePredAndLoad) {
1466       auto I = CriticalEdgePredAndLoad->find(UnavailableBlock);
1467       if (I != CriticalEdgePredAndLoad->end()) {
1468         ++NumPRELoadMoved2CEPred;
1469         ICF->insertInstructionTo(NewLoad, UnavailableBlock);
1470         LoadInst *OldLoad = I->second;
1471         combineMetadataForCSE(NewLoad, OldLoad, false);
1472         OldLoad->replaceAllUsesWith(NewLoad);
1473         replaceValuesPerBlockEntry(ValuesPerBlock, OldLoad, NewLoad);
1474         if (uint32_t ValNo = VN.lookup(OldLoad, false))
1475           removeFromLeaderTable(ValNo, OldLoad, OldLoad->getParent());
1476         VN.erase(OldLoad);
1477         removeInstruction(OldLoad);
1478       }
1479     }
1480   }
1481 
1482   // Perform PHI construction.
1483   Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1484   // ConstructSSAForLoadSet is responsible for combining metadata.
1485   Load->replaceAllUsesWith(V);
1486   if (isa<PHINode>(V))
1487     V->takeName(Load);
1488   if (Instruction *I = dyn_cast<Instruction>(V))
1489     I->setDebugLoc(Load->getDebugLoc());
1490   if (V->getType()->isPtrOrPtrVectorTy())
1491     MD->invalidateCachedPointerInfo(V);
1492   markInstructionForDeletion(Load);
1493   ORE->emit([&]() {
1494     return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1495            << "load eliminated by PRE";
1496   });
1497 }
1498 
1499 bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1500                              UnavailBlkVect &UnavailableBlocks) {
1501   // Okay, we have *some* definitions of the value.  This means that the value
1502   // is available in some of our (transitive) predecessors.  Lets think about
1503   // doing PRE of this load.  This will involve inserting a new load into the
1504   // predecessor when it's not available.  We could do this in general, but
1505   // prefer to not increase code size.  As such, we only do this when we know
1506   // that we only have to insert *one* load (which means we're basically moving
1507   // the load, not inserting a new one).
1508 
1509   SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1510                                         UnavailableBlocks.end());
1511 
1512   // Let's find the first basic block with more than one predecessor.  Walk
1513   // backwards through predecessors if needed.
1514   BasicBlock *LoadBB = Load->getParent();
1515   BasicBlock *TmpBB = LoadBB;
1516 
1517   // Check that there is no implicit control flow instructions above our load in
1518   // its block. If there is an instruction that doesn't always pass the
1519   // execution to the following instruction, then moving through it may become
1520   // invalid. For example:
1521   //
1522   // int arr[LEN];
1523   // int index = ???;
1524   // ...
1525   // guard(0 <= index && index < LEN);
1526   // use(arr[index]);
1527   //
1528   // It is illegal to move the array access to any point above the guard,
1529   // because if the index is out of bounds we should deoptimize rather than
1530   // access the array.
1531   // Check that there is no guard in this block above our instruction.
1532   bool MustEnsureSafetyOfSpeculativeExecution =
1533       ICF->isDominatedByICFIFromSameBlock(Load);
1534 
1535   while (TmpBB->getSinglePredecessor()) {
1536     TmpBB = TmpBB->getSinglePredecessor();
1537     if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1538       return false;
1539     if (Blockers.count(TmpBB))
1540       return false;
1541 
1542     // If any of these blocks has more than one successor (i.e. if the edge we
1543     // just traversed was critical), then there are other paths through this
1544     // block along which the load may not be anticipated.  Hoisting the load
1545     // above this block would be adding the load to execution paths along
1546     // which it was not previously executed.
1547     if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1548       return false;
1549 
1550     // Check that there is no implicit control flow in a block above.
1551     MustEnsureSafetyOfSpeculativeExecution =
1552         MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1553   }
1554 
1555   assert(TmpBB);
1556   LoadBB = TmpBB;
1557 
1558   // Check to see how many predecessors have the loaded value fully
1559   // available.
1560   MapVector<BasicBlock *, Value *> PredLoads;
1561   DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1562   for (const AvailableValueInBlock &AV : ValuesPerBlock)
1563     FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1564   for (BasicBlock *UnavailableBB : UnavailableBlocks)
1565     FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1566 
1567   // The edge from Pred to LoadBB is a critical edge will be splitted.
1568   SmallVector<BasicBlock *, 4> CriticalEdgePredSplit;
1569   // The edge from Pred to LoadBB is a critical edge, another successor of Pred
1570   // contains a load can be moved to Pred. This data structure maps the Pred to
1571   // the movable load.
1572   MapVector<BasicBlock *, LoadInst *> CriticalEdgePredAndLoad;
1573   for (BasicBlock *Pred : predecessors(LoadBB)) {
1574     // If any predecessor block is an EH pad that does not allow non-PHI
1575     // instructions before the terminator, we can't PRE the load.
1576     if (Pred->getTerminator()->isEHPad()) {
1577       LLVM_DEBUG(
1578           dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1579                  << Pred->getName() << "': " << *Load << '\n');
1580       return false;
1581     }
1582 
1583     if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1584       continue;
1585     }
1586 
1587     if (Pred->getTerminator()->getNumSuccessors() != 1) {
1588       if (isa<IndirectBrInst>(Pred->getTerminator())) {
1589         LLVM_DEBUG(
1590             dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1591                    << Pred->getName() << "': " << *Load << '\n');
1592         return false;
1593       }
1594 
1595       if (LoadBB->isEHPad()) {
1596         LLVM_DEBUG(
1597             dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1598                    << Pred->getName() << "': " << *Load << '\n');
1599         return false;
1600       }
1601 
1602       // Do not split backedge as it will break the canonical loop form.
1603       if (!isLoadPRESplitBackedgeEnabled())
1604         if (DT->dominates(LoadBB, Pred)) {
1605           LLVM_DEBUG(
1606               dbgs()
1607               << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1608               << Pred->getName() << "': " << *Load << '\n');
1609           return false;
1610         }
1611 
1612       if (LoadInst *LI = findLoadToHoistIntoPred(Pred, LoadBB, Load))
1613         CriticalEdgePredAndLoad[Pred] = LI;
1614       else
1615         CriticalEdgePredSplit.push_back(Pred);
1616     } else {
1617       // Only add the predecessors that will not be split for now.
1618       PredLoads[Pred] = nullptr;
1619     }
1620   }
1621 
1622   // Decide whether PRE is profitable for this load.
1623   unsigned NumInsertPreds = PredLoads.size() + CriticalEdgePredSplit.size();
1624   unsigned NumUnavailablePreds = NumInsertPreds +
1625       CriticalEdgePredAndLoad.size();
1626   assert(NumUnavailablePreds != 0 &&
1627          "Fully available value should already be eliminated!");
1628   (void)NumUnavailablePreds;
1629 
1630   // If we need to insert new load in multiple predecessors, reject it.
1631   // FIXME: If we could restructure the CFG, we could make a common pred with
1632   // all the preds that don't have an available Load and insert a new load into
1633   // that one block.
1634   if (NumInsertPreds > 1)
1635       return false;
1636 
1637   // Now we know where we will insert load. We must ensure that it is safe
1638   // to speculatively execute the load at that points.
1639   if (MustEnsureSafetyOfSpeculativeExecution) {
1640     if (CriticalEdgePredSplit.size())
1641       if (!isSafeToSpeculativelyExecute(Load, LoadBB->getFirstNonPHI(), AC, DT))
1642         return false;
1643     for (auto &PL : PredLoads)
1644       if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), AC,
1645                                         DT))
1646         return false;
1647     for (auto &CEP : CriticalEdgePredAndLoad)
1648       if (!isSafeToSpeculativelyExecute(Load, CEP.first->getTerminator(), AC,
1649                                         DT))
1650         return false;
1651   }
1652 
1653   // Split critical edges, and update the unavailable predecessors accordingly.
1654   for (BasicBlock *OrigPred : CriticalEdgePredSplit) {
1655     BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1656     assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1657     PredLoads[NewPred] = nullptr;
1658     LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1659                       << LoadBB->getName() << '\n');
1660   }
1661 
1662   for (auto &CEP : CriticalEdgePredAndLoad)
1663     PredLoads[CEP.first] = nullptr;
1664 
1665   // Check if the load can safely be moved to all the unavailable predecessors.
1666   bool CanDoPRE = true;
1667   const DataLayout &DL = Load->getModule()->getDataLayout();
1668   SmallVector<Instruction*, 8> NewInsts;
1669   for (auto &PredLoad : PredLoads) {
1670     BasicBlock *UnavailablePred = PredLoad.first;
1671 
1672     // Do PHI translation to get its value in the predecessor if necessary.  The
1673     // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1674     // We do the translation for each edge we skipped by going from Load's block
1675     // to LoadBB, otherwise we might miss pieces needing translation.
1676 
1677     // If all preds have a single successor, then we know it is safe to insert
1678     // the load on the pred (?!?), so we can insert code to materialize the
1679     // pointer if it is not available.
1680     Value *LoadPtr = Load->getPointerOperand();
1681     BasicBlock *Cur = Load->getParent();
1682     while (Cur != LoadBB) {
1683       PHITransAddr Address(LoadPtr, DL, AC);
1684       LoadPtr = Address.translateWithInsertion(Cur, Cur->getSinglePredecessor(),
1685                                                *DT, NewInsts);
1686       if (!LoadPtr) {
1687         CanDoPRE = false;
1688         break;
1689       }
1690       Cur = Cur->getSinglePredecessor();
1691     }
1692 
1693     if (LoadPtr) {
1694       PHITransAddr Address(LoadPtr, DL, AC);
1695       LoadPtr = Address.translateWithInsertion(LoadBB, UnavailablePred, *DT,
1696                                                NewInsts);
1697     }
1698     // If we couldn't find or insert a computation of this phi translated value,
1699     // we fail PRE.
1700     if (!LoadPtr) {
1701       LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1702                         << *Load->getPointerOperand() << "\n");
1703       CanDoPRE = false;
1704       break;
1705     }
1706 
1707     PredLoad.second = LoadPtr;
1708   }
1709 
1710   if (!CanDoPRE) {
1711     while (!NewInsts.empty()) {
1712       // Erase instructions generated by the failed PHI translation before
1713       // trying to number them. PHI translation might insert instructions
1714       // in basic blocks other than the current one, and we delete them
1715       // directly, as markInstructionForDeletion only allows removing from the
1716       // current basic block.
1717       NewInsts.pop_back_val()->eraseFromParent();
1718     }
1719     // HINT: Don't revert the edge-splitting as following transformation may
1720     // also need to split these critical edges.
1721     return !CriticalEdgePredSplit.empty();
1722   }
1723 
1724   // Okay, we can eliminate this load by inserting a reload in the predecessor
1725   // and using PHI construction to get the value in the other predecessors, do
1726   // it.
1727   LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1728   LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
1729                                            << " INSTS: " << *NewInsts.back()
1730                                            << '\n');
1731 
1732   // Assign value numbers to the new instructions.
1733   for (Instruction *I : NewInsts) {
1734     // Instructions that have been inserted in predecessor(s) to materialize
1735     // the load address do not retain their original debug locations. Doing
1736     // so could lead to confusing (but correct) source attributions.
1737     I->updateLocationAfterHoist();
1738 
1739     // FIXME: We really _ought_ to insert these value numbers into their
1740     // parent's availability map.  However, in doing so, we risk getting into
1741     // ordering issues.  If a block hasn't been processed yet, we would be
1742     // marking a value as AVAIL-IN, which isn't what we intend.
1743     VN.lookupOrAdd(I);
1744   }
1745 
1746   eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads,
1747                                   &CriticalEdgePredAndLoad);
1748   ++NumPRELoad;
1749   return true;
1750 }
1751 
1752 bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1753                                  AvailValInBlkVect &ValuesPerBlock,
1754                                  UnavailBlkVect &UnavailableBlocks) {
1755   if (!LI)
1756     return false;
1757 
1758   const Loop *L = LI->getLoopFor(Load->getParent());
1759   // TODO: Generalize to other loop blocks that dominate the latch.
1760   if (!L || L->getHeader() != Load->getParent())
1761     return false;
1762 
1763   BasicBlock *Preheader = L->getLoopPreheader();
1764   BasicBlock *Latch = L->getLoopLatch();
1765   if (!Preheader || !Latch)
1766     return false;
1767 
1768   Value *LoadPtr = Load->getPointerOperand();
1769   // Must be available in preheader.
1770   if (!L->isLoopInvariant(LoadPtr))
1771     return false;
1772 
1773   // We plan to hoist the load to preheader without introducing a new fault.
1774   // In order to do it, we need to prove that we cannot side-exit the loop
1775   // once loop header is first entered before execution of the load.
1776   if (ICF->isDominatedByICFIFromSameBlock(Load))
1777     return false;
1778 
1779   BasicBlock *LoopBlock = nullptr;
1780   for (auto *Blocker : UnavailableBlocks) {
1781     // Blockers from outside the loop are handled in preheader.
1782     if (!L->contains(Blocker))
1783       continue;
1784 
1785     // Only allow one loop block. Loop header is not less frequently executed
1786     // than each loop block, and likely it is much more frequently executed. But
1787     // in case of multiple loop blocks, we need extra information (such as block
1788     // frequency info) to understand whether it is profitable to PRE into
1789     // multiple loop blocks.
1790     if (LoopBlock)
1791       return false;
1792 
1793     // Do not sink into inner loops. This may be non-profitable.
1794     if (L != LI->getLoopFor(Blocker))
1795       return false;
1796 
1797     // Blocks that dominate the latch execute on every single iteration, maybe
1798     // except the last one. So PREing into these blocks doesn't make much sense
1799     // in most cases. But the blocks that do not necessarily execute on each
1800     // iteration are sometimes much colder than the header, and this is when
1801     // PRE is potentially profitable.
1802     if (DT->dominates(Blocker, Latch))
1803       return false;
1804 
1805     // Make sure that the terminator itself doesn't clobber.
1806     if (Blocker->getTerminator()->mayWriteToMemory())
1807       return false;
1808 
1809     LoopBlock = Blocker;
1810   }
1811 
1812   if (!LoopBlock)
1813     return false;
1814 
1815   // Make sure the memory at this pointer cannot be freed, therefore we can
1816   // safely reload from it after clobber.
1817   if (LoadPtr->canBeFreed())
1818     return false;
1819 
1820   // TODO: Support critical edge splitting if blocker has more than 1 successor.
1821   MapVector<BasicBlock *, Value *> AvailableLoads;
1822   AvailableLoads[LoopBlock] = LoadPtr;
1823   AvailableLoads[Preheader] = LoadPtr;
1824 
1825   LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1826   eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads,
1827                                   /*CriticalEdgePredAndLoad*/ nullptr);
1828   ++NumPRELoopLoad;
1829   return true;
1830 }
1831 
1832 static void reportLoadElim(LoadInst *Load, Value *AvailableValue,
1833                            OptimizationRemarkEmitter *ORE) {
1834   using namespace ore;
1835 
1836   ORE->emit([&]() {
1837     return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load)
1838            << "load of type " << NV("Type", Load->getType()) << " eliminated"
1839            << setExtraArgs() << " in favor of "
1840            << NV("InfavorOfValue", AvailableValue);
1841   });
1842 }
1843 
1844 /// Attempt to eliminate a load whose dependencies are
1845 /// non-local by performing PHI construction.
1846 bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1847   // non-local speculations are not allowed under asan.
1848   if (Load->getParent()->getParent()->hasFnAttribute(
1849           Attribute::SanitizeAddress) ||
1850       Load->getParent()->getParent()->hasFnAttribute(
1851           Attribute::SanitizeHWAddress))
1852     return false;
1853 
1854   // Step 1: Find the non-local dependencies of the load.
1855   LoadDepVect Deps;
1856   MD->getNonLocalPointerDependency(Load, Deps);
1857 
1858   // If we had to process more than one hundred blocks to find the
1859   // dependencies, this load isn't worth worrying about.  Optimizing
1860   // it will be too expensive.
1861   unsigned NumDeps = Deps.size();
1862   if (NumDeps > MaxNumDeps)
1863     return false;
1864 
1865   // If we had a phi translation failure, we'll have a single entry which is a
1866   // clobber in the current block.  Reject this early.
1867   if (NumDeps == 1 &&
1868       !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1869     LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs());
1870                dbgs() << " has unknown dependencies\n";);
1871     return false;
1872   }
1873 
1874   bool Changed = false;
1875   // If this load follows a GEP, see if we can PRE the indices before analyzing.
1876   if (GetElementPtrInst *GEP =
1877           dyn_cast<GetElementPtrInst>(Load->getOperand(0))) {
1878     for (Use &U : GEP->indices())
1879       if (Instruction *I = dyn_cast<Instruction>(U.get()))
1880         Changed |= performScalarPRE(I);
1881   }
1882 
1883   // Step 2: Analyze the availability of the load
1884   AvailValInBlkVect ValuesPerBlock;
1885   UnavailBlkVect UnavailableBlocks;
1886   AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1887 
1888   // If we have no predecessors that produce a known value for this load, exit
1889   // early.
1890   if (ValuesPerBlock.empty())
1891     return Changed;
1892 
1893   // Step 3: Eliminate fully redundancy.
1894   //
1895   // If all of the instructions we depend on produce a known value for this
1896   // load, then it is fully redundant and we can use PHI insertion to compute
1897   // its value.  Insert PHIs and remove the fully redundant value now.
1898   if (UnavailableBlocks.empty()) {
1899     LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
1900 
1901     // Perform PHI construction.
1902     Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1903     // ConstructSSAForLoadSet is responsible for combining metadata.
1904     Load->replaceAllUsesWith(V);
1905 
1906     if (isa<PHINode>(V))
1907       V->takeName(Load);
1908     if (Instruction *I = dyn_cast<Instruction>(V))
1909       // If instruction I has debug info, then we should not update it.
1910       // Also, if I has a null DebugLoc, then it is still potentially incorrect
1911       // to propagate Load's DebugLoc because Load may not post-dominate I.
1912       if (Load->getDebugLoc() && Load->getParent() == I->getParent())
1913         I->setDebugLoc(Load->getDebugLoc());
1914     if (V->getType()->isPtrOrPtrVectorTy())
1915       MD->invalidateCachedPointerInfo(V);
1916     markInstructionForDeletion(Load);
1917     ++NumGVNLoad;
1918     reportLoadElim(Load, V, ORE);
1919     return true;
1920   }
1921 
1922   // Step 4: Eliminate partial redundancy.
1923   if (!isPREEnabled() || !isLoadPREEnabled())
1924     return Changed;
1925   if (!isLoadInLoopPREEnabled() && LI && LI->getLoopFor(Load->getParent()))
1926     return Changed;
1927 
1928   if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1929       PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
1930     return true;
1931 
1932   return Changed;
1933 }
1934 
1935 static bool impliesEquivalanceIfTrue(CmpInst* Cmp) {
1936   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1937     return true;
1938 
1939   // Floating point comparisons can be equal, but not equivalent.  Cases:
1940   // NaNs for unordered operators
1941   // +0.0 vs 0.0 for all operators
1942   if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1943       (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1944        Cmp->getFastMathFlags().noNaNs())) {
1945       Value *LHS = Cmp->getOperand(0);
1946       Value *RHS = Cmp->getOperand(1);
1947       // If we can prove either side non-zero, then equality must imply
1948       // equivalence.
1949       // FIXME: We should do this optimization if 'no signed zeros' is
1950       // applicable via an instruction-level fast-math-flag or some other
1951       // indicator that relaxed FP semantics are being used.
1952       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1953         return true;
1954       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1955         return true;
1956       // TODO: Handle vector floating point constants
1957   }
1958   return false;
1959 }
1960 
1961 static bool impliesEquivalanceIfFalse(CmpInst* Cmp) {
1962   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1963     return true;
1964 
1965   // Floating point comparisons can be equal, but not equivelent.  Cases:
1966   // NaNs for unordered operators
1967   // +0.0 vs 0.0 for all operators
1968   if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1969        Cmp->getFastMathFlags().noNaNs()) ||
1970       Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1971       Value *LHS = Cmp->getOperand(0);
1972       Value *RHS = Cmp->getOperand(1);
1973       // If we can prove either side non-zero, then equality must imply
1974       // equivalence.
1975       // FIXME: We should do this optimization if 'no signed zeros' is
1976       // applicable via an instruction-level fast-math-flag or some other
1977       // indicator that relaxed FP semantics are being used.
1978       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1979         return true;
1980       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1981         return true;
1982       // TODO: Handle vector floating point constants
1983   }
1984   return false;
1985 }
1986 
1987 
1988 static bool hasUsersIn(Value *V, BasicBlock *BB) {
1989   return llvm::any_of(V->users(), [BB](User *U) {
1990     auto *I = dyn_cast<Instruction>(U);
1991     return I && I->getParent() == BB;
1992   });
1993 }
1994 
1995 bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
1996   Value *V = IntrinsicI->getArgOperand(0);
1997 
1998   if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1999     if (Cond->isZero()) {
2000       Type *Int8Ty = Type::getInt8Ty(V->getContext());
2001       // Insert a new store to null instruction before the load to indicate that
2002       // this code is not reachable.  FIXME: We could insert unreachable
2003       // instruction directly because we can modify the CFG.
2004       auto *NewS = new StoreInst(PoisonValue::get(Int8Ty),
2005                                  Constant::getNullValue(Int8Ty->getPointerTo()),
2006                                  IntrinsicI);
2007       if (MSSAU) {
2008         const MemoryUseOrDef *FirstNonDom = nullptr;
2009         const auto *AL =
2010             MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
2011 
2012         // If there are accesses in the current basic block, find the first one
2013         // that does not come before NewS. The new memory access is inserted
2014         // after the found access or before the terminator if no such access is
2015         // found.
2016         if (AL) {
2017           for (const auto &Acc : *AL) {
2018             if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
2019               if (!Current->getMemoryInst()->comesBefore(NewS)) {
2020                 FirstNonDom = Current;
2021                 break;
2022               }
2023           }
2024         }
2025 
2026         // This added store is to null, so it will never executed and we can
2027         // just use the LiveOnEntry def as defining access.
2028         auto *NewDef =
2029             FirstNonDom ? MSSAU->createMemoryAccessBefore(
2030                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
2031                               const_cast<MemoryUseOrDef *>(FirstNonDom))
2032                         : MSSAU->createMemoryAccessInBB(
2033                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
2034                               NewS->getParent(), MemorySSA::BeforeTerminator);
2035 
2036         MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
2037       }
2038     }
2039     if (isAssumeWithEmptyBundle(*IntrinsicI)) {
2040       markInstructionForDeletion(IntrinsicI);
2041       return true;
2042     }
2043     return false;
2044   }
2045 
2046   if (isa<Constant>(V)) {
2047     // If it's not false, and constant, it must evaluate to true. This means our
2048     // assume is assume(true), and thus, pointless, and we don't want to do
2049     // anything more here.
2050     return false;
2051   }
2052 
2053   Constant *True = ConstantInt::getTrue(V->getContext());
2054   bool Changed = false;
2055 
2056   for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
2057     BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
2058 
2059     // This property is only true in dominated successors, propagateEquality
2060     // will check dominance for us.
2061     Changed |= propagateEquality(V, True, Edge, false);
2062   }
2063 
2064   // We can replace assume value with true, which covers cases like this:
2065   // call void @llvm.assume(i1 %cmp)
2066   // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
2067   ReplaceOperandsWithMap[V] = True;
2068 
2069   // Similarly, after assume(!NotV) we know that NotV == false.
2070   Value *NotV;
2071   if (match(V, m_Not(m_Value(NotV))))
2072     ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
2073 
2074   // If we find an equality fact, canonicalize all dominated uses in this block
2075   // to one of the two values.  We heuristically choice the "oldest" of the
2076   // two where age is determined by value number. (Note that propagateEquality
2077   // above handles the cross block case.)
2078   //
2079   // Key case to cover are:
2080   // 1)
2081   // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
2082   // call void @llvm.assume(i1 %cmp)
2083   // ret float %0 ; will change it to ret float 3.000000e+00
2084   // 2)
2085   // %load = load float, float* %addr
2086   // %cmp = fcmp oeq float %load, %0
2087   // call void @llvm.assume(i1 %cmp)
2088   // ret float %load ; will change it to ret float %0
2089   if (auto *CmpI = dyn_cast<CmpInst>(V)) {
2090     if (impliesEquivalanceIfTrue(CmpI)) {
2091       Value *CmpLHS = CmpI->getOperand(0);
2092       Value *CmpRHS = CmpI->getOperand(1);
2093       // Heuristically pick the better replacement -- the choice of heuristic
2094       // isn't terribly important here, but the fact we canonicalize on some
2095       // replacement is for exposing other simplifications.
2096       // TODO: pull this out as a helper function and reuse w/existing
2097       // (slightly different) logic.
2098       if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
2099         std::swap(CmpLHS, CmpRHS);
2100       if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
2101         std::swap(CmpLHS, CmpRHS);
2102       if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
2103           (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
2104         // Move the 'oldest' value to the right-hand side, using the value
2105         // number as a proxy for age.
2106         uint32_t LVN = VN.lookupOrAdd(CmpLHS);
2107         uint32_t RVN = VN.lookupOrAdd(CmpRHS);
2108         if (LVN < RVN)
2109           std::swap(CmpLHS, CmpRHS);
2110       }
2111 
2112       // Handle degenerate case where we either haven't pruned a dead path or a
2113       // removed a trivial assume yet.
2114       if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
2115         return Changed;
2116 
2117       LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
2118                  << *CmpLHS << " with "
2119                  << *CmpRHS << " in block "
2120                  << IntrinsicI->getParent()->getName() << "\n");
2121 
2122 
2123       // Setup the replacement map - this handles uses within the same block
2124       if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
2125         ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
2126 
2127       // NOTE: The non-block local cases are handled by the call to
2128       // propagateEquality above; this block is just about handling the block
2129       // local cases.  TODO: There's a bunch of logic in propagateEqualiy which
2130       // isn't duplicated for the block local case, can we share it somehow?
2131     }
2132   }
2133   return Changed;
2134 }
2135 
2136 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
2137   patchReplacementInstruction(I, Repl);
2138   I->replaceAllUsesWith(Repl);
2139 }
2140 
2141 /// Attempt to eliminate a load, first by eliminating it
2142 /// locally, and then attempting non-local elimination if that fails.
2143 bool GVNPass::processLoad(LoadInst *L) {
2144   if (!MD)
2145     return false;
2146 
2147   // This code hasn't been audited for ordered or volatile memory access
2148   if (!L->isUnordered())
2149     return false;
2150 
2151   if (L->use_empty()) {
2152     markInstructionForDeletion(L);
2153     return true;
2154   }
2155 
2156   // ... to a pointer that has been loaded from before...
2157   MemDepResult Dep = MD->getDependency(L);
2158 
2159   // If it is defined in another block, try harder.
2160   if (Dep.isNonLocal())
2161     return processNonLocalLoad(L);
2162 
2163   // Only handle the local case below
2164   if (!Dep.isLocal()) {
2165     // This might be a NonFuncLocal or an Unknown
2166     LLVM_DEBUG(
2167         // fast print dep, using operator<< on instruction is too slow.
2168         dbgs() << "GVN: load "; L->printAsOperand(dbgs());
2169         dbgs() << " has unknown dependence\n";);
2170     return false;
2171   }
2172 
2173   auto AV = AnalyzeLoadAvailability(L, Dep, L->getPointerOperand());
2174   if (!AV)
2175     return false;
2176 
2177   Value *AvailableValue = AV->MaterializeAdjustedValue(L, L, *this);
2178 
2179   // MaterializeAdjustedValue is responsible for combining metadata.
2180   L->replaceAllUsesWith(AvailableValue);
2181   markInstructionForDeletion(L);
2182   if (MSSAU)
2183     MSSAU->removeMemoryAccess(L);
2184   ++NumGVNLoad;
2185   reportLoadElim(L, AvailableValue, ORE);
2186   // Tell MDA to reexamine the reused pointer since we might have more
2187   // information after forwarding it.
2188   if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
2189     MD->invalidateCachedPointerInfo(AvailableValue);
2190   return true;
2191 }
2192 
2193 /// Return a pair the first field showing the value number of \p Exp and the
2194 /// second field showing whether it is a value number newly created.
2195 std::pair<uint32_t, bool>
2196 GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) {
2197   uint32_t &e = expressionNumbering[Exp];
2198   bool CreateNewValNum = !e;
2199   if (CreateNewValNum) {
2200     Expressions.push_back(Exp);
2201     if (ExprIdx.size() < nextValueNumber + 1)
2202       ExprIdx.resize(nextValueNumber * 2);
2203     e = nextValueNumber;
2204     ExprIdx[nextValueNumber++] = nextExprNumber++;
2205   }
2206   return {e, CreateNewValNum};
2207 }
2208 
2209 /// Return whether all the values related with the same \p num are
2210 /// defined in \p BB.
2211 bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
2212                                          GVNPass &Gvn) {
2213   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2214   while (Vals && Vals->BB == BB)
2215     Vals = Vals->Next;
2216   return !Vals;
2217 }
2218 
2219 /// Wrap phiTranslateImpl to provide caching functionality.
2220 uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred,
2221                                            const BasicBlock *PhiBlock,
2222                                            uint32_t Num, GVNPass &Gvn) {
2223   auto FindRes = PhiTranslateTable.find({Num, Pred});
2224   if (FindRes != PhiTranslateTable.end())
2225     return FindRes->second;
2226   uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2227   PhiTranslateTable.insert({{Num, Pred}, NewNum});
2228   return NewNum;
2229 }
2230 
2231 // Return true if the value number \p Num and NewNum have equal value.
2232 // Return false if the result is unknown.
2233 bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
2234                                            const BasicBlock *Pred,
2235                                            const BasicBlock *PhiBlock,
2236                                            GVNPass &Gvn) {
2237   CallInst *Call = nullptr;
2238   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2239   while (Vals) {
2240     Call = dyn_cast<CallInst>(Vals->Val);
2241     if (Call && Call->getParent() == PhiBlock)
2242       break;
2243     Vals = Vals->Next;
2244   }
2245 
2246   if (AA->doesNotAccessMemory(Call))
2247     return true;
2248 
2249   if (!MD || !AA->onlyReadsMemory(Call))
2250     return false;
2251 
2252   MemDepResult local_dep = MD->getDependency(Call);
2253   if (!local_dep.isNonLocal())
2254     return false;
2255 
2256   const MemoryDependenceResults::NonLocalDepInfo &deps =
2257       MD->getNonLocalCallDependency(Call);
2258 
2259   // Check to see if the Call has no function local clobber.
2260   for (const NonLocalDepEntry &D : deps) {
2261     if (D.getResult().isNonFuncLocal())
2262       return true;
2263   }
2264   return false;
2265 }
2266 
2267 /// Translate value number \p Num using phis, so that it has the values of
2268 /// the phis in BB.
2269 uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2270                                                const BasicBlock *PhiBlock,
2271                                                uint32_t Num, GVNPass &Gvn) {
2272   if (PHINode *PN = NumberingPhi[Num]) {
2273     for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2274       if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2275         if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
2276           return TransVal;
2277     }
2278     return Num;
2279   }
2280 
2281   // If there is any value related with Num is defined in a BB other than
2282   // PhiBlock, it cannot depend on a phi in PhiBlock without going through
2283   // a backedge. We can do an early exit in that case to save compile time.
2284   if (!areAllValsInBB(Num, PhiBlock, Gvn))
2285     return Num;
2286 
2287   if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2288     return Num;
2289   Expression Exp = Expressions[ExprIdx[Num]];
2290 
2291   for (unsigned i = 0; i < Exp.varargs.size(); i++) {
2292     // For InsertValue and ExtractValue, some varargs are index numbers
2293     // instead of value numbers. Those index numbers should not be
2294     // translated.
2295     if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
2296         (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
2297         (i > 1 && Exp.opcode == Instruction::ShuffleVector))
2298       continue;
2299     Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
2300   }
2301 
2302   if (Exp.commutative) {
2303     assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
2304     if (Exp.varargs[0] > Exp.varargs[1]) {
2305       std::swap(Exp.varargs[0], Exp.varargs[1]);
2306       uint32_t Opcode = Exp.opcode >> 8;
2307       if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2308         Exp.opcode = (Opcode << 8) |
2309                      CmpInst::getSwappedPredicate(
2310                          static_cast<CmpInst::Predicate>(Exp.opcode & 255));
2311     }
2312   }
2313 
2314   if (uint32_t NewNum = expressionNumbering[Exp]) {
2315     if (Exp.opcode == Instruction::Call && NewNum != Num)
2316       return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2317     return NewNum;
2318   }
2319   return Num;
2320 }
2321 
2322 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed
2323 /// again.
2324 void GVNPass::ValueTable::eraseTranslateCacheEntry(
2325     uint32_t Num, const BasicBlock &CurrBlock) {
2326   for (const BasicBlock *Pred : predecessors(&CurrBlock))
2327     PhiTranslateTable.erase({Num, Pred});
2328 }
2329 
2330 // In order to find a leader for a given value number at a
2331 // specific basic block, we first obtain the list of all Values for that number,
2332 // and then scan the list to find one whose block dominates the block in
2333 // question.  This is fast because dominator tree queries consist of only
2334 // a few comparisons of DFS numbers.
2335 Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t num) {
2336   LeaderTableEntry Vals = LeaderTable[num];
2337   if (!Vals.Val) return nullptr;
2338 
2339   Value *Val = nullptr;
2340   if (DT->dominates(Vals.BB, BB)) {
2341     Val = Vals.Val;
2342     if (isa<Constant>(Val)) return Val;
2343   }
2344 
2345   LeaderTableEntry* Next = Vals.Next;
2346   while (Next) {
2347     if (DT->dominates(Next->BB, BB)) {
2348       if (isa<Constant>(Next->Val)) return Next->Val;
2349       if (!Val) Val = Next->Val;
2350     }
2351 
2352     Next = Next->Next;
2353   }
2354 
2355   return Val;
2356 }
2357 
2358 /// There is an edge from 'Src' to 'Dst'.  Return
2359 /// true if every path from the entry block to 'Dst' passes via this edge.  In
2360 /// particular 'Dst' must not be reachable via another edge from 'Src'.
2361 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
2362                                        DominatorTree *DT) {
2363   // While in theory it is interesting to consider the case in which Dst has
2364   // more than one predecessor, because Dst might be part of a loop which is
2365   // only reachable from Src, in practice it is pointless since at the time
2366   // GVN runs all such loops have preheaders, which means that Dst will have
2367   // been changed to have only one predecessor, namely Src.
2368   const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2369   assert((!Pred || Pred == E.getStart()) &&
2370          "No edge between these basic blocks!");
2371   return Pred != nullptr;
2372 }
2373 
2374 void GVNPass::assignBlockRPONumber(Function &F) {
2375   BlockRPONumber.clear();
2376   uint32_t NextBlockNumber = 1;
2377   ReversePostOrderTraversal<Function *> RPOT(&F);
2378   for (BasicBlock *BB : RPOT)
2379     BlockRPONumber[BB] = NextBlockNumber++;
2380   InvalidBlockRPONumbers = false;
2381 }
2382 
2383 bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr) const {
2384   bool Changed = false;
2385   for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
2386     Value *Operand = Instr->getOperand(OpNum);
2387     auto it = ReplaceOperandsWithMap.find(Operand);
2388     if (it != ReplaceOperandsWithMap.end()) {
2389       LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
2390                         << *it->second << " in instruction " << *Instr << '\n');
2391       Instr->setOperand(OpNum, it->second);
2392       Changed = true;
2393     }
2394   }
2395   return Changed;
2396 }
2397 
2398 /// The given values are known to be equal in every block
2399 /// dominated by 'Root'.  Exploit this, for example by replacing 'LHS' with
2400 /// 'RHS' everywhere in the scope.  Returns whether a change was made.
2401 /// If DominatesByEdge is false, then it means that we will propagate the RHS
2402 /// value starting from the end of Root.Start.
2403 bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
2404                                 const BasicBlockEdge &Root,
2405                                 bool DominatesByEdge) {
2406   SmallVector<std::pair<Value*, Value*>, 4> Worklist;
2407   Worklist.push_back(std::make_pair(LHS, RHS));
2408   bool Changed = false;
2409   // For speed, compute a conservative fast approximation to
2410   // DT->dominates(Root, Root.getEnd());
2411   const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2412 
2413   while (!Worklist.empty()) {
2414     std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2415     LHS = Item.first; RHS = Item.second;
2416 
2417     if (LHS == RHS)
2418       continue;
2419     assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2420 
2421     // Don't try to propagate equalities between constants.
2422     if (isa<Constant>(LHS) && isa<Constant>(RHS))
2423       continue;
2424 
2425     // Prefer a constant on the right-hand side, or an Argument if no constants.
2426     if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2427       std::swap(LHS, RHS);
2428     assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2429 
2430     // If there is no obvious reason to prefer the left-hand side over the
2431     // right-hand side, ensure the longest lived term is on the right-hand side,
2432     // so the shortest lived term will be replaced by the longest lived.
2433     // This tends to expose more simplifications.
2434     uint32_t LVN = VN.lookupOrAdd(LHS);
2435     if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2436         (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2437       // Move the 'oldest' value to the right-hand side, using the value number
2438       // as a proxy for age.
2439       uint32_t RVN = VN.lookupOrAdd(RHS);
2440       if (LVN < RVN) {
2441         std::swap(LHS, RHS);
2442         LVN = RVN;
2443       }
2444     }
2445 
2446     // If value numbering later sees that an instruction in the scope is equal
2447     // to 'LHS' then ensure it will be turned into 'RHS'.  In order to preserve
2448     // the invariant that instructions only occur in the leader table for their
2449     // own value number (this is used by removeFromLeaderTable), do not do this
2450     // if RHS is an instruction (if an instruction in the scope is morphed into
2451     // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2452     // using the leader table is about compiling faster, not optimizing better).
2453     // The leader table only tracks basic blocks, not edges. Only add to if we
2454     // have the simple case where the edge dominates the end.
2455     if (RootDominatesEnd && !isa<Instruction>(RHS))
2456       addToLeaderTable(LVN, RHS, Root.getEnd());
2457 
2458     // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope.  As
2459     // LHS always has at least one use that is not dominated by Root, this will
2460     // never do anything if LHS has only one use.
2461     if (!LHS->hasOneUse()) {
2462       unsigned NumReplacements =
2463           DominatesByEdge
2464               ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
2465               : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2466 
2467       Changed |= NumReplacements > 0;
2468       NumGVNEqProp += NumReplacements;
2469       // Cached information for anything that uses LHS will be invalid.
2470       if (MD)
2471         MD->invalidateCachedPointerInfo(LHS);
2472     }
2473 
2474     // Now try to deduce additional equalities from this one. For example, if
2475     // the known equality was "(A != B)" == "false" then it follows that A and B
2476     // are equal in the scope. Only boolean equalities with an explicit true or
2477     // false RHS are currently supported.
2478     if (!RHS->getType()->isIntegerTy(1))
2479       // Not a boolean equality - bail out.
2480       continue;
2481     ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2482     if (!CI)
2483       // RHS neither 'true' nor 'false' - bail out.
2484       continue;
2485     // Whether RHS equals 'true'.  Otherwise it equals 'false'.
2486     bool isKnownTrue = CI->isMinusOne();
2487     bool isKnownFalse = !isKnownTrue;
2488 
2489     // If "A && B" is known true then both A and B are known true.  If "A || B"
2490     // is known false then both A and B are known false.
2491     Value *A, *B;
2492     if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2493         (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2494       Worklist.push_back(std::make_pair(A, RHS));
2495       Worklist.push_back(std::make_pair(B, RHS));
2496       continue;
2497     }
2498 
2499     // If we are propagating an equality like "(A == B)" == "true" then also
2500     // propagate the equality A == B.  When propagating a comparison such as
2501     // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2502     if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2503       Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2504 
2505       // If "A == B" is known true, or "A != B" is known false, then replace
2506       // A with B everywhere in the scope.  For floating point operations, we
2507       // have to be careful since equality does not always imply equivalance.
2508       if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) ||
2509           (isKnownFalse && impliesEquivalanceIfFalse(Cmp)))
2510         Worklist.push_back(std::make_pair(Op0, Op1));
2511 
2512       // If "A >= B" is known true, replace "A < B" with false everywhere.
2513       CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2514       Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2515       // Since we don't have the instruction "A < B" immediately to hand, work
2516       // out the value number that it would have and use that to find an
2517       // appropriate instruction (if any).
2518       uint32_t NextNum = VN.getNextUnusedValueNumber();
2519       uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2520       // If the number we were assigned was brand new then there is no point in
2521       // looking for an instruction realizing it: there cannot be one!
2522       if (Num < NextNum) {
2523         Value *NotCmp = findLeader(Root.getEnd(), Num);
2524         if (NotCmp && isa<Instruction>(NotCmp)) {
2525           unsigned NumReplacements =
2526               DominatesByEdge
2527                   ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2528                   : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2529                                              Root.getStart());
2530           Changed |= NumReplacements > 0;
2531           NumGVNEqProp += NumReplacements;
2532           // Cached information for anything that uses NotCmp will be invalid.
2533           if (MD)
2534             MD->invalidateCachedPointerInfo(NotCmp);
2535         }
2536       }
2537       // Ensure that any instruction in scope that gets the "A < B" value number
2538       // is replaced with false.
2539       // The leader table only tracks basic blocks, not edges. Only add to if we
2540       // have the simple case where the edge dominates the end.
2541       if (RootDominatesEnd)
2542         addToLeaderTable(Num, NotVal, Root.getEnd());
2543 
2544       continue;
2545     }
2546   }
2547 
2548   return Changed;
2549 }
2550 
2551 /// When calculating availability, handle an instruction
2552 /// by inserting it into the appropriate sets
2553 bool GVNPass::processInstruction(Instruction *I) {
2554   // Ignore dbg info intrinsics.
2555   if (isa<DbgInfoIntrinsic>(I))
2556     return false;
2557 
2558   // If the instruction can be easily simplified then do so now in preference
2559   // to value numbering it.  Value numbering often exposes redundancies, for
2560   // example if it determines that %y is equal to %x then the instruction
2561   // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2562   const DataLayout &DL = I->getModule()->getDataLayout();
2563   if (Value *V = simplifyInstruction(I, {DL, TLI, DT, AC})) {
2564     bool Changed = false;
2565     if (!I->use_empty()) {
2566       // Simplification can cause a special instruction to become not special.
2567       // For example, devirtualization to a willreturn function.
2568       ICF->removeUsersOf(I);
2569       I->replaceAllUsesWith(V);
2570       Changed = true;
2571     }
2572     if (isInstructionTriviallyDead(I, TLI)) {
2573       markInstructionForDeletion(I);
2574       Changed = true;
2575     }
2576     if (Changed) {
2577       if (MD && V->getType()->isPtrOrPtrVectorTy())
2578         MD->invalidateCachedPointerInfo(V);
2579       ++NumGVNSimpl;
2580       return true;
2581     }
2582   }
2583 
2584   if (auto *Assume = dyn_cast<AssumeInst>(I))
2585     return processAssumeIntrinsic(Assume);
2586 
2587   if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
2588     if (processLoad(Load))
2589       return true;
2590 
2591     unsigned Num = VN.lookupOrAdd(Load);
2592     addToLeaderTable(Num, Load, Load->getParent());
2593     return false;
2594   }
2595 
2596   // For conditional branches, we can perform simple conditional propagation on
2597   // the condition value itself.
2598   if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2599     if (!BI->isConditional())
2600       return false;
2601 
2602     if (isa<Constant>(BI->getCondition()))
2603       return processFoldableCondBr(BI);
2604 
2605     Value *BranchCond = BI->getCondition();
2606     BasicBlock *TrueSucc = BI->getSuccessor(0);
2607     BasicBlock *FalseSucc = BI->getSuccessor(1);
2608     // Avoid multiple edges early.
2609     if (TrueSucc == FalseSucc)
2610       return false;
2611 
2612     BasicBlock *Parent = BI->getParent();
2613     bool Changed = false;
2614 
2615     Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
2616     BasicBlockEdge TrueE(Parent, TrueSucc);
2617     Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2618 
2619     Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
2620     BasicBlockEdge FalseE(Parent, FalseSucc);
2621     Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2622 
2623     return Changed;
2624   }
2625 
2626   // For switches, propagate the case values into the case destinations.
2627   if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2628     Value *SwitchCond = SI->getCondition();
2629     BasicBlock *Parent = SI->getParent();
2630     bool Changed = false;
2631 
2632     // Remember how many outgoing edges there are to every successor.
2633     SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2634     for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2635       ++SwitchEdges[SI->getSuccessor(i)];
2636 
2637     for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2638          i != e; ++i) {
2639       BasicBlock *Dst = i->getCaseSuccessor();
2640       // If there is only a single edge, propagate the case value into it.
2641       if (SwitchEdges.lookup(Dst) == 1) {
2642         BasicBlockEdge E(Parent, Dst);
2643         Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2644       }
2645     }
2646     return Changed;
2647   }
2648 
2649   // Instructions with void type don't return a value, so there's
2650   // no point in trying to find redundancies in them.
2651   if (I->getType()->isVoidTy())
2652     return false;
2653 
2654   uint32_t NextNum = VN.getNextUnusedValueNumber();
2655   unsigned Num = VN.lookupOrAdd(I);
2656 
2657   // Allocations are always uniquely numbered, so we can save time and memory
2658   // by fast failing them.
2659   if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2660     addToLeaderTable(Num, I, I->getParent());
2661     return false;
2662   }
2663 
2664   // If the number we were assigned was a brand new VN, then we don't
2665   // need to do a lookup to see if the number already exists
2666   // somewhere in the domtree: it can't!
2667   if (Num >= NextNum) {
2668     addToLeaderTable(Num, I, I->getParent());
2669     return false;
2670   }
2671 
2672   // Perform fast-path value-number based elimination of values inherited from
2673   // dominators.
2674   Value *Repl = findLeader(I->getParent(), Num);
2675   if (!Repl) {
2676     // Failure, just remember this instance for future use.
2677     addToLeaderTable(Num, I, I->getParent());
2678     return false;
2679   }
2680 
2681   if (Repl == I) {
2682     // If I was the result of a shortcut PRE, it might already be in the table
2683     // and the best replacement for itself. Nothing to do.
2684     return false;
2685   }
2686 
2687   // Remove it!
2688   patchAndReplaceAllUsesWith(I, Repl);
2689   if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2690     MD->invalidateCachedPointerInfo(Repl);
2691   markInstructionForDeletion(I);
2692   return true;
2693 }
2694 
2695 /// runOnFunction - This is the main transformation entry point for a function.
2696 bool GVNPass::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2697                       const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2698                       MemoryDependenceResults *RunMD, LoopInfo *LI,
2699                       OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2700   AC = &RunAC;
2701   DT = &RunDT;
2702   VN.setDomTree(DT);
2703   TLI = &RunTLI;
2704   VN.setAliasAnalysis(&RunAA);
2705   MD = RunMD;
2706   ImplicitControlFlowTracking ImplicitCFT;
2707   ICF = &ImplicitCFT;
2708   this->LI = LI;
2709   VN.setMemDep(MD);
2710   ORE = RunORE;
2711   InvalidBlockRPONumbers = true;
2712   MemorySSAUpdater Updater(MSSA);
2713   MSSAU = MSSA ? &Updater : nullptr;
2714 
2715   bool Changed = false;
2716   bool ShouldContinue = true;
2717 
2718   DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2719   // Merge unconditional branches, allowing PRE to catch more
2720   // optimization opportunities.
2721   for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
2722     bool removedBlock = MergeBlockIntoPredecessor(&BB, &DTU, LI, MSSAU, MD);
2723     if (removedBlock)
2724       ++NumGVNBlocks;
2725 
2726     Changed |= removedBlock;
2727   }
2728 
2729   unsigned Iteration = 0;
2730   while (ShouldContinue) {
2731     LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2732     (void) Iteration;
2733     ShouldContinue = iterateOnFunction(F);
2734     Changed |= ShouldContinue;
2735     ++Iteration;
2736   }
2737 
2738   if (isPREEnabled()) {
2739     // Fabricate val-num for dead-code in order to suppress assertion in
2740     // performPRE().
2741     assignValNumForDeadCode();
2742     bool PREChanged = true;
2743     while (PREChanged) {
2744       PREChanged = performPRE(F);
2745       Changed |= PREChanged;
2746     }
2747   }
2748 
2749   // FIXME: Should perform GVN again after PRE does something.  PRE can move
2750   // computations into blocks where they become fully redundant.  Note that
2751   // we can't do this until PRE's critical edge splitting updates memdep.
2752   // Actually, when this happens, we should just fully integrate PRE into GVN.
2753 
2754   cleanupGlobalSets();
2755   // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2756   // iteration.
2757   DeadBlocks.clear();
2758 
2759   if (MSSA && VerifyMemorySSA)
2760     MSSA->verifyMemorySSA();
2761 
2762   return Changed;
2763 }
2764 
2765 bool GVNPass::processBlock(BasicBlock *BB) {
2766   // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2767   // (and incrementing BI before processing an instruction).
2768   assert(InstrsToErase.empty() &&
2769          "We expect InstrsToErase to be empty across iterations");
2770   if (DeadBlocks.count(BB))
2771     return false;
2772 
2773   // Clearing map before every BB because it can be used only for single BB.
2774   ReplaceOperandsWithMap.clear();
2775   bool ChangedFunction = false;
2776 
2777   // Since we may not have visited the input blocks of the phis, we can't
2778   // use our normal hash approach for phis.  Instead, simply look for
2779   // obvious duplicates.  The first pass of GVN will tend to create
2780   // identical phis, and the second or later passes can eliminate them.
2781   ChangedFunction |= EliminateDuplicatePHINodes(BB);
2782 
2783   for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2784        BI != BE;) {
2785     if (!ReplaceOperandsWithMap.empty())
2786       ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2787     ChangedFunction |= processInstruction(&*BI);
2788 
2789     if (InstrsToErase.empty()) {
2790       ++BI;
2791       continue;
2792     }
2793 
2794     // If we need some instructions deleted, do it now.
2795     NumGVNInstr += InstrsToErase.size();
2796 
2797     // Avoid iterator invalidation.
2798     bool AtStart = BI == BB->begin();
2799     if (!AtStart)
2800       --BI;
2801 
2802     for (auto *I : InstrsToErase) {
2803       assert(I->getParent() == BB && "Removing instruction from wrong block?");
2804       LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2805       salvageKnowledge(I, AC);
2806       salvageDebugInfo(*I);
2807       removeInstruction(I);
2808     }
2809     InstrsToErase.clear();
2810 
2811     if (AtStart)
2812       BI = BB->begin();
2813     else
2814       ++BI;
2815   }
2816 
2817   return ChangedFunction;
2818 }
2819 
2820 // Instantiate an expression in a predecessor that lacked it.
2821 bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2822                                         BasicBlock *Curr, unsigned int ValNo) {
2823   // Because we are going top-down through the block, all value numbers
2824   // will be available in the predecessor by the time we need them.  Any
2825   // that weren't originally present will have been instantiated earlier
2826   // in this loop.
2827   bool success = true;
2828   for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2829     Value *Op = Instr->getOperand(i);
2830     if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2831       continue;
2832     // This could be a newly inserted instruction, in which case, we won't
2833     // find a value number, and should give up before we hurt ourselves.
2834     // FIXME: Rewrite the infrastructure to let it easier to value number
2835     // and process newly inserted instructions.
2836     if (!VN.exists(Op)) {
2837       success = false;
2838       break;
2839     }
2840     uint32_t TValNo =
2841         VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2842     if (Value *V = findLeader(Pred, TValNo)) {
2843       Instr->setOperand(i, V);
2844     } else {
2845       success = false;
2846       break;
2847     }
2848   }
2849 
2850   // Fail out if we encounter an operand that is not available in
2851   // the PRE predecessor.  This is typically because of loads which
2852   // are not value numbered precisely.
2853   if (!success)
2854     return false;
2855 
2856   Instr->insertBefore(Pred->getTerminator());
2857   Instr->setName(Instr->getName() + ".pre");
2858   Instr->setDebugLoc(Instr->getDebugLoc());
2859 
2860   ICF->insertInstructionTo(Instr, Pred);
2861 
2862   unsigned Num = VN.lookupOrAdd(Instr);
2863   VN.add(Instr, Num);
2864 
2865   // Update the availability map to include the new instruction.
2866   addToLeaderTable(Num, Instr, Pred);
2867   return true;
2868 }
2869 
2870 bool GVNPass::performScalarPRE(Instruction *CurInst) {
2871   if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2872       isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2873       CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2874       isa<DbgInfoIntrinsic>(CurInst))
2875     return false;
2876 
2877   // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2878   // sinking the compare again, and it would force the code generator to
2879   // move the i1 from processor flags or predicate registers into a general
2880   // purpose register.
2881   if (isa<CmpInst>(CurInst))
2882     return false;
2883 
2884   // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2885   // sinking the addressing mode computation back to its uses. Extending the
2886   // GEP's live range increases the register pressure, and therefore it can
2887   // introduce unnecessary spills.
2888   //
2889   // This doesn't prevent Load PRE. PHI translation will make the GEP available
2890   // to the load by moving it to the predecessor block if necessary.
2891   if (isa<GetElementPtrInst>(CurInst))
2892     return false;
2893 
2894   if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2895     // We don't currently value number ANY inline asm calls.
2896     if (CallB->isInlineAsm())
2897       return false;
2898   }
2899 
2900   uint32_t ValNo = VN.lookup(CurInst);
2901 
2902   // Look for the predecessors for PRE opportunities.  We're
2903   // only trying to solve the basic diamond case, where
2904   // a value is computed in the successor and one predecessor,
2905   // but not the other.  We also explicitly disallow cases
2906   // where the successor is its own predecessor, because they're
2907   // more complicated to get right.
2908   unsigned NumWith = 0;
2909   unsigned NumWithout = 0;
2910   BasicBlock *PREPred = nullptr;
2911   BasicBlock *CurrentBlock = CurInst->getParent();
2912 
2913   // Update the RPO numbers for this function.
2914   if (InvalidBlockRPONumbers)
2915     assignBlockRPONumber(*CurrentBlock->getParent());
2916 
2917   SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
2918   for (BasicBlock *P : predecessors(CurrentBlock)) {
2919     // We're not interested in PRE where blocks with predecessors that are
2920     // not reachable.
2921     if (!DT->isReachableFromEntry(P)) {
2922       NumWithout = 2;
2923       break;
2924     }
2925     // It is not safe to do PRE when P->CurrentBlock is a loop backedge.
2926     assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2927            "Invalid BlockRPONumber map.");
2928     if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock]) {
2929       NumWithout = 2;
2930       break;
2931     }
2932 
2933     uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2934     Value *predV = findLeader(P, TValNo);
2935     if (!predV) {
2936       predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2937       PREPred = P;
2938       ++NumWithout;
2939     } else if (predV == CurInst) {
2940       /* CurInst dominates this predecessor. */
2941       NumWithout = 2;
2942       break;
2943     } else {
2944       predMap.push_back(std::make_pair(predV, P));
2945       ++NumWith;
2946     }
2947   }
2948 
2949   // Don't do PRE when it might increase code size, i.e. when
2950   // we would need to insert instructions in more than one pred.
2951   if (NumWithout > 1 || NumWith == 0)
2952     return false;
2953 
2954   // We may have a case where all predecessors have the instruction,
2955   // and we just need to insert a phi node. Otherwise, perform
2956   // insertion.
2957   Instruction *PREInstr = nullptr;
2958 
2959   if (NumWithout != 0) {
2960     if (!isSafeToSpeculativelyExecute(CurInst)) {
2961       // It is only valid to insert a new instruction if the current instruction
2962       // is always executed. An instruction with implicit control flow could
2963       // prevent us from doing it. If we cannot speculate the execution, then
2964       // PRE should be prohibited.
2965       if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2966         return false;
2967     }
2968 
2969     // Don't do PRE across indirect branch.
2970     if (isa<IndirectBrInst>(PREPred->getTerminator()))
2971       return false;
2972 
2973     // We can't do PRE safely on a critical edge, so instead we schedule
2974     // the edge to be split and perform the PRE the next time we iterate
2975     // on the function.
2976     unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2977     if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2978       toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2979       return false;
2980     }
2981     // We need to insert somewhere, so let's give it a shot
2982     PREInstr = CurInst->clone();
2983     if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2984       // If we failed insertion, make sure we remove the instruction.
2985 #ifndef NDEBUG
2986       verifyRemoved(PREInstr);
2987 #endif
2988       PREInstr->deleteValue();
2989       return false;
2990     }
2991   }
2992 
2993   // Either we should have filled in the PRE instruction, or we should
2994   // not have needed insertions.
2995   assert(PREInstr != nullptr || NumWithout == 0);
2996 
2997   ++NumGVNPRE;
2998 
2999   // Create a PHI to make the value available in this block.
3000   PHINode *Phi =
3001       PHINode::Create(CurInst->getType(), predMap.size(),
3002                       CurInst->getName() + ".pre-phi", &CurrentBlock->front());
3003   for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
3004     if (Value *V = predMap[i].first) {
3005       // If we use an existing value in this phi, we have to patch the original
3006       // value because the phi will be used to replace a later value.
3007       patchReplacementInstruction(CurInst, V);
3008       Phi->addIncoming(V, predMap[i].second);
3009     } else
3010       Phi->addIncoming(PREInstr, PREPred);
3011   }
3012 
3013   VN.add(Phi, ValNo);
3014   // After creating a new PHI for ValNo, the phi translate result for ValNo will
3015   // be changed, so erase the related stale entries in phi translate cache.
3016   VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
3017   addToLeaderTable(ValNo, Phi, CurrentBlock);
3018   Phi->setDebugLoc(CurInst->getDebugLoc());
3019   CurInst->replaceAllUsesWith(Phi);
3020   if (MD && Phi->getType()->isPtrOrPtrVectorTy())
3021     MD->invalidateCachedPointerInfo(Phi);
3022   VN.erase(CurInst);
3023   removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
3024 
3025   LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
3026   removeInstruction(CurInst);
3027   ++NumGVNInstr;
3028 
3029   return true;
3030 }
3031 
3032 /// Perform a purely local form of PRE that looks for diamond
3033 /// control flow patterns and attempts to perform simple PRE at the join point.
3034 bool GVNPass::performPRE(Function &F) {
3035   bool Changed = false;
3036   for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
3037     // Nothing to PRE in the entry block.
3038     if (CurrentBlock == &F.getEntryBlock())
3039       continue;
3040 
3041     // Don't perform PRE on an EH pad.
3042     if (CurrentBlock->isEHPad())
3043       continue;
3044 
3045     for (BasicBlock::iterator BI = CurrentBlock->begin(),
3046                               BE = CurrentBlock->end();
3047          BI != BE;) {
3048       Instruction *CurInst = &*BI++;
3049       Changed |= performScalarPRE(CurInst);
3050     }
3051   }
3052 
3053   if (splitCriticalEdges())
3054     Changed = true;
3055 
3056   return Changed;
3057 }
3058 
3059 /// Split the critical edge connecting the given two blocks, and return
3060 /// the block inserted to the critical edge.
3061 BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
3062   // GVN does not require loop-simplify, do not try to preserve it if it is not
3063   // possible.
3064   BasicBlock *BB = SplitCriticalEdge(
3065       Pred, Succ,
3066       CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
3067   if (BB) {
3068     if (MD)
3069       MD->invalidateCachedPredecessors();
3070     InvalidBlockRPONumbers = true;
3071   }
3072   return BB;
3073 }
3074 
3075 /// Split critical edges found during the previous
3076 /// iteration that may enable further optimization.
3077 bool GVNPass::splitCriticalEdges() {
3078   if (toSplit.empty())
3079     return false;
3080 
3081   bool Changed = false;
3082   do {
3083     std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
3084     Changed |= SplitCriticalEdge(Edge.first, Edge.second,
3085                                  CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
3086                nullptr;
3087   } while (!toSplit.empty());
3088   if (Changed) {
3089     if (MD)
3090       MD->invalidateCachedPredecessors();
3091     InvalidBlockRPONumbers = true;
3092   }
3093   return Changed;
3094 }
3095 
3096 /// Executes one iteration of GVN
3097 bool GVNPass::iterateOnFunction(Function &F) {
3098   cleanupGlobalSets();
3099 
3100   // Top-down walk of the dominator tree
3101   bool Changed = false;
3102   // Needed for value numbering with phi construction to work.
3103   // RPOT walks the graph in its constructor and will not be invalidated during
3104   // processBlock.
3105   ReversePostOrderTraversal<Function *> RPOT(&F);
3106 
3107   for (BasicBlock *BB : RPOT)
3108     Changed |= processBlock(BB);
3109 
3110   return Changed;
3111 }
3112 
3113 void GVNPass::cleanupGlobalSets() {
3114   VN.clear();
3115   LeaderTable.clear();
3116   BlockRPONumber.clear();
3117   TableAllocator.Reset();
3118   ICF->clear();
3119   InvalidBlockRPONumbers = true;
3120 }
3121 
3122 void GVNPass::removeInstruction(Instruction *I) {
3123   if (MD) MD->removeInstruction(I);
3124   if (MSSAU)
3125     MSSAU->removeMemoryAccess(I);
3126 #ifndef NDEBUG
3127   verifyRemoved(I);
3128 #endif
3129   ICF->removeInstruction(I);
3130   I->eraseFromParent();
3131 }
3132 
3133 /// Verify that the specified instruction does not occur in our
3134 /// internal data structures.
3135 void GVNPass::verifyRemoved(const Instruction *Inst) const {
3136   VN.verifyRemoved(Inst);
3137 
3138   // Walk through the value number scope to make sure the instruction isn't
3139   // ferreted away in it.
3140   for (const auto &I : LeaderTable) {
3141     const LeaderTableEntry *Node = &I.second;
3142     assert(Node->Val != Inst && "Inst still in value numbering scope!");
3143 
3144     while (Node->Next) {
3145       Node = Node->Next;
3146       assert(Node->Val != Inst && "Inst still in value numbering scope!");
3147     }
3148   }
3149 }
3150 
3151 /// BB is declared dead, which implied other blocks become dead as well. This
3152 /// function is to add all these blocks to "DeadBlocks". For the dead blocks'
3153 /// live successors, update their phi nodes by replacing the operands
3154 /// corresponding to dead blocks with UndefVal.
3155 void GVNPass::addDeadBlock(BasicBlock *BB) {
3156   SmallVector<BasicBlock *, 4> NewDead;
3157   SmallSetVector<BasicBlock *, 4> DF;
3158 
3159   NewDead.push_back(BB);
3160   while (!NewDead.empty()) {
3161     BasicBlock *D = NewDead.pop_back_val();
3162     if (DeadBlocks.count(D))
3163       continue;
3164 
3165     // All blocks dominated by D are dead.
3166     SmallVector<BasicBlock *, 8> Dom;
3167     DT->getDescendants(D, Dom);
3168     DeadBlocks.insert(Dom.begin(), Dom.end());
3169 
3170     // Figure out the dominance-frontier(D).
3171     for (BasicBlock *B : Dom) {
3172       for (BasicBlock *S : successors(B)) {
3173         if (DeadBlocks.count(S))
3174           continue;
3175 
3176         bool AllPredDead = true;
3177         for (BasicBlock *P : predecessors(S))
3178           if (!DeadBlocks.count(P)) {
3179             AllPredDead = false;
3180             break;
3181           }
3182 
3183         if (!AllPredDead) {
3184           // S could be proved dead later on. That is why we don't update phi
3185           // operands at this moment.
3186           DF.insert(S);
3187         } else {
3188           // While S is not dominated by D, it is dead by now. This could take
3189           // place if S already have a dead predecessor before D is declared
3190           // dead.
3191           NewDead.push_back(S);
3192         }
3193       }
3194     }
3195   }
3196 
3197   // For the dead blocks' live successors, update their phi nodes by replacing
3198   // the operands corresponding to dead blocks with UndefVal.
3199   for (BasicBlock *B : DF) {
3200     if (DeadBlocks.count(B))
3201       continue;
3202 
3203     // First, split the critical edges. This might also create additional blocks
3204     // to preserve LoopSimplify form and adjust edges accordingly.
3205     SmallVector<BasicBlock *, 4> Preds(predecessors(B));
3206     for (BasicBlock *P : Preds) {
3207       if (!DeadBlocks.count(P))
3208         continue;
3209 
3210       if (llvm::is_contained(successors(P), B) &&
3211           isCriticalEdge(P->getTerminator(), B)) {
3212         if (BasicBlock *S = splitCriticalEdges(P, B))
3213           DeadBlocks.insert(P = S);
3214       }
3215     }
3216 
3217     // Now poison the incoming values from the dead predecessors.
3218     for (BasicBlock *P : predecessors(B)) {
3219       if (!DeadBlocks.count(P))
3220         continue;
3221       for (PHINode &Phi : B->phis()) {
3222         Phi.setIncomingValueForBlock(P, PoisonValue::get(Phi.getType()));
3223         if (MD)
3224           MD->invalidateCachedPointerInfo(&Phi);
3225       }
3226     }
3227   }
3228 }
3229 
3230 // If the given branch is recognized as a foldable branch (i.e. conditional
3231 // branch with constant condition), it will perform following analyses and
3232 // transformation.
3233 //  1) If the dead out-coming edge is a critical-edge, split it. Let
3234 //     R be the target of the dead out-coming edge.
3235 //  1) Identify the set of dead blocks implied by the branch's dead outcoming
3236 //     edge. The result of this step will be {X| X is dominated by R}
3237 //  2) Identify those blocks which haves at least one dead predecessor. The
3238 //     result of this step will be dominance-frontier(R).
3239 //  3) Update the PHIs in DF(R) by replacing the operands corresponding to
3240 //     dead blocks with "UndefVal" in an hope these PHIs will optimized away.
3241 //
3242 // Return true iff *NEW* dead code are found.
3243 bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3244   if (!BI || BI->isUnconditional())
3245     return false;
3246 
3247   // If a branch has two identical successors, we cannot declare either dead.
3248   if (BI->getSuccessor(0) == BI->getSuccessor(1))
3249     return false;
3250 
3251   ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
3252   if (!Cond)
3253     return false;
3254 
3255   BasicBlock *DeadRoot =
3256       Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
3257   if (DeadBlocks.count(DeadRoot))
3258     return false;
3259 
3260   if (!DeadRoot->getSinglePredecessor())
3261     DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
3262 
3263   addDeadBlock(DeadRoot);
3264   return true;
3265 }
3266 
3267 // performPRE() will trigger assert if it comes across an instruction without
3268 // associated val-num. As it normally has far more live instructions than dead
3269 // instructions, it makes more sense just to "fabricate" a val-number for the
3270 // dead code than checking if instruction involved is dead or not.
3271 void GVNPass::assignValNumForDeadCode() {
3272   for (BasicBlock *BB : DeadBlocks) {
3273     for (Instruction &Inst : *BB) {
3274       unsigned ValNum = VN.lookupOrAdd(&Inst);
3275       addToLeaderTable(ValNum, &Inst, BB);
3276     }
3277   }
3278 }
3279 
3280 class llvm::gvn::GVNLegacyPass : public FunctionPass {
3281 public:
3282   static char ID; // Pass identification, replacement for typeid
3283 
3284   explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
3285       : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
3286     initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry());
3287   }
3288 
3289   bool runOnFunction(Function &F) override {
3290     if (skipFunction(F))
3291       return false;
3292 
3293     auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
3294 
3295     auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3296     return Impl.runImpl(
3297         F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
3298         getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3299         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
3300         getAnalysis<AAResultsWrapperPass>().getAAResults(),
3301         Impl.isMemDepEnabled()
3302             ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3303             : nullptr,
3304         LIWP ? &LIWP->getLoopInfo() : nullptr,
3305         &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3306         MSSAWP ? &MSSAWP->getMSSA() : nullptr);
3307   }
3308 
3309   void getAnalysisUsage(AnalysisUsage &AU) const override {
3310     AU.addRequired<AssumptionCacheTracker>();
3311     AU.addRequired<DominatorTreeWrapperPass>();
3312     AU.addRequired<TargetLibraryInfoWrapperPass>();
3313     AU.addRequired<LoopInfoWrapperPass>();
3314     if (Impl.isMemDepEnabled())
3315       AU.addRequired<MemoryDependenceWrapperPass>();
3316     AU.addRequired<AAResultsWrapperPass>();
3317     AU.addPreserved<DominatorTreeWrapperPass>();
3318     AU.addPreserved<GlobalsAAWrapperPass>();
3319     AU.addPreserved<TargetLibraryInfoWrapperPass>();
3320     AU.addPreserved<LoopInfoWrapperPass>();
3321     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
3322     AU.addPreserved<MemorySSAWrapperPass>();
3323   }
3324 
3325 private:
3326   GVNPass Impl;
3327 };
3328 
3329 char GVNLegacyPass::ID = 0;
3330 
3331 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3332 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
3333 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
3334 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
3335 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3336 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
3337 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
3338 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
3339 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3340 
3341 // The public interface to this file...
3342 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
3343   return new GVNLegacyPass(NoMemDepAnalysis);
3344 }
3345