1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs global value numbering to eliminate fully redundant
10 // instructions.  It also performs simple dead load elimination.
11 //
12 // Note that this pass does the value numbering itself; it does not use the
13 // ValueNumbering analysis passes.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Scalar/GVN.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/MapVector.h"
22 #include "llvm/ADT/PointerIntPair.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/AssumeBundleQueries.h"
31 #include "llvm/Analysis/AssumptionCache.h"
32 #include "llvm/Analysis/CFG.h"
33 #include "llvm/Analysis/DomTreeUpdater.h"
34 #include "llvm/Analysis/GlobalsModRef.h"
35 #include "llvm/Analysis/InstructionSimplify.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/MemoryBuiltins.h"
38 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
39 #include "llvm/Analysis/MemorySSA.h"
40 #include "llvm/Analysis/MemorySSAUpdater.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/PHITransAddr.h"
43 #include "llvm/Analysis/TargetLibraryInfo.h"
44 #include "llvm/Analysis/ValueTracking.h"
45 #include "llvm/Config/llvm-config.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/Dominators.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/Metadata.h"
61 #include "llvm/IR/Module.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/IR/PassManager.h"
64 #include "llvm/IR/PatternMatch.h"
65 #include "llvm/IR/Type.h"
66 #include "llvm/IR/Use.h"
67 #include "llvm/IR/Value.h"
68 #include "llvm/InitializePasses.h"
69 #include "llvm/Pass.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CommandLine.h"
72 #include "llvm/Support/Compiler.h"
73 #include "llvm/Support/Debug.h"
74 #include "llvm/Support/raw_ostream.h"
75 #include "llvm/Transforms/Utils.h"
76 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
77 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
78 #include "llvm/Transforms/Utils/Local.h"
79 #include "llvm/Transforms/Utils/SSAUpdater.h"
80 #include "llvm/Transforms/Utils/VNCoercion.h"
81 #include <algorithm>
82 #include <cassert>
83 #include <cstdint>
84 #include <utility>
85 #include <vector>
86 
87 using namespace llvm;
88 using namespace llvm::gvn;
89 using namespace llvm::VNCoercion;
90 using namespace PatternMatch;
91 
92 #define DEBUG_TYPE "gvn"
93 
94 STATISTIC(NumGVNInstr,  "Number of instructions deleted");
95 STATISTIC(NumGVNLoad,   "Number of loads deleted");
96 STATISTIC(NumGVNPRE,    "Number of instructions PRE'd");
97 STATISTIC(NumGVNBlocks, "Number of blocks merged");
98 STATISTIC(NumGVNSimpl,  "Number of instructions simplified");
99 STATISTIC(NumGVNEqProp, "Number of equalities propagated");
100 STATISTIC(NumPRELoad,   "Number of loads PRE'd");
101 
102 STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
103           "Number of blocks speculated as available in "
104           "IsValueFullyAvailableInBlock(), max");
105 STATISTIC(MaxBBSpeculationCutoffReachedTimes,
106           "Number of times we we reached gvn-max-block-speculations cut-off "
107           "preventing further exploration");
108 
109 static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
110 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
111 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
112                                             cl::init(true));
113 static cl::opt<bool>
114 GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
115                                 cl::init(true));
116 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
117 
118 static cl::opt<uint32_t> MaxNumDeps(
119     "gvn-max-num-deps", cl::Hidden, cl::init(100), cl::ZeroOrMore,
120     cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
121 
122 // This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
123 static cl::opt<uint32_t> MaxBBSpeculations(
124     "gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::ZeroOrMore,
125     cl::desc("Max number of blocks we're willing to speculate on (and recurse "
126              "into) when deducing if a value is fully available or not in GVN "
127              "(default = 600)"));
128 
129 struct llvm::GVN::Expression {
130   uint32_t opcode;
131   bool commutative = false;
132   Type *type = nullptr;
133   SmallVector<uint32_t, 4> varargs;
134 
135   Expression(uint32_t o = ~2U) : opcode(o) {}
136 
137   bool operator==(const Expression &other) const {
138     if (opcode != other.opcode)
139       return false;
140     if (opcode == ~0U || opcode == ~1U)
141       return true;
142     if (type != other.type)
143       return false;
144     if (varargs != other.varargs)
145       return false;
146     return true;
147   }
148 
149   friend hash_code hash_value(const Expression &Value) {
150     return hash_combine(
151         Value.opcode, Value.type,
152         hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
153   }
154 };
155 
156 namespace llvm {
157 
158 template <> struct DenseMapInfo<GVN::Expression> {
159   static inline GVN::Expression getEmptyKey() { return ~0U; }
160   static inline GVN::Expression getTombstoneKey() { return ~1U; }
161 
162   static unsigned getHashValue(const GVN::Expression &e) {
163     using llvm::hash_value;
164 
165     return static_cast<unsigned>(hash_value(e));
166   }
167 
168   static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) {
169     return LHS == RHS;
170   }
171 };
172 
173 } // end namespace llvm
174 
175 /// Represents a particular available value that we know how to materialize.
176 /// Materialization of an AvailableValue never fails.  An AvailableValue is
177 /// implicitly associated with a rematerialization point which is the
178 /// location of the instruction from which it was formed.
179 struct llvm::gvn::AvailableValue {
180   enum ValType {
181     SimpleVal, // A simple offsetted value that is accessed.
182     LoadVal,   // A value produced by a load.
183     MemIntrin, // A memory intrinsic which is loaded from.
184     UndefVal   // A UndefValue representing a value from dead block (which
185                // is not yet physically removed from the CFG).
186   };
187 
188   /// V - The value that is live out of the block.
189   PointerIntPair<Value *, 2, ValType> Val;
190 
191   /// Offset - The byte offset in Val that is interesting for the load query.
192   unsigned Offset = 0;
193 
194   static AvailableValue get(Value *V, unsigned Offset = 0) {
195     AvailableValue Res;
196     Res.Val.setPointer(V);
197     Res.Val.setInt(SimpleVal);
198     Res.Offset = Offset;
199     return Res;
200   }
201 
202   static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
203     AvailableValue Res;
204     Res.Val.setPointer(MI);
205     Res.Val.setInt(MemIntrin);
206     Res.Offset = Offset;
207     return Res;
208   }
209 
210   static AvailableValue getLoad(LoadInst *LI, unsigned Offset = 0) {
211     AvailableValue Res;
212     Res.Val.setPointer(LI);
213     Res.Val.setInt(LoadVal);
214     Res.Offset = Offset;
215     return Res;
216   }
217 
218   static AvailableValue getUndef() {
219     AvailableValue Res;
220     Res.Val.setPointer(nullptr);
221     Res.Val.setInt(UndefVal);
222     Res.Offset = 0;
223     return Res;
224   }
225 
226   bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
227   bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
228   bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
229   bool isUndefValue() const { return Val.getInt() == UndefVal; }
230 
231   Value *getSimpleValue() const {
232     assert(isSimpleValue() && "Wrong accessor");
233     return Val.getPointer();
234   }
235 
236   LoadInst *getCoercedLoadValue() const {
237     assert(isCoercedLoadValue() && "Wrong accessor");
238     return cast<LoadInst>(Val.getPointer());
239   }
240 
241   MemIntrinsic *getMemIntrinValue() const {
242     assert(isMemIntrinValue() && "Wrong accessor");
243     return cast<MemIntrinsic>(Val.getPointer());
244   }
245 
246   /// Emit code at the specified insertion point to adjust the value defined
247   /// here to the specified type. This handles various coercion cases.
248   Value *MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt,
249                                   GVN &gvn) const;
250 };
251 
252 /// Represents an AvailableValue which can be rematerialized at the end of
253 /// the associated BasicBlock.
254 struct llvm::gvn::AvailableValueInBlock {
255   /// BB - The basic block in question.
256   BasicBlock *BB = nullptr;
257 
258   /// AV - The actual available value
259   AvailableValue AV;
260 
261   static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
262     AvailableValueInBlock Res;
263     Res.BB = BB;
264     Res.AV = std::move(AV);
265     return Res;
266   }
267 
268   static AvailableValueInBlock get(BasicBlock *BB, Value *V,
269                                    unsigned Offset = 0) {
270     return get(BB, AvailableValue::get(V, Offset));
271   }
272 
273   static AvailableValueInBlock getUndef(BasicBlock *BB) {
274     return get(BB, AvailableValue::getUndef());
275   }
276 
277   /// Emit code at the end of this block to adjust the value defined here to
278   /// the specified type. This handles various coercion cases.
279   Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const {
280     return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn);
281   }
282 };
283 
284 //===----------------------------------------------------------------------===//
285 //                     ValueTable Internal Functions
286 //===----------------------------------------------------------------------===//
287 
288 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
289   Expression e;
290   e.type = I->getType();
291   e.opcode = I->getOpcode();
292   for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
293        OI != OE; ++OI)
294     e.varargs.push_back(lookupOrAdd(*OI));
295   if (I->isCommutative()) {
296     // Ensure that commutative instructions that only differ by a permutation
297     // of their operands get the same value number by sorting the operand value
298     // numbers.  Since commutative operands are the 1st two operands it is more
299     // efficient to sort by hand rather than using, say, std::sort.
300     assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
301     if (e.varargs[0] > e.varargs[1])
302       std::swap(e.varargs[0], e.varargs[1]);
303     e.commutative = true;
304   }
305 
306   if (auto *C = dyn_cast<CmpInst>(I)) {
307     // Sort the operand value numbers so x<y and y>x get the same value number.
308     CmpInst::Predicate Predicate = C->getPredicate();
309     if (e.varargs[0] > e.varargs[1]) {
310       std::swap(e.varargs[0], e.varargs[1]);
311       Predicate = CmpInst::getSwappedPredicate(Predicate);
312     }
313     e.opcode = (C->getOpcode() << 8) | Predicate;
314     e.commutative = true;
315   } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
316     e.varargs.append(E->idx_begin(), E->idx_end());
317   } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
318     ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
319     e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
320   }
321 
322   return e;
323 }
324 
325 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode,
326                                                CmpInst::Predicate Predicate,
327                                                Value *LHS, Value *RHS) {
328   assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
329          "Not a comparison!");
330   Expression e;
331   e.type = CmpInst::makeCmpResultType(LHS->getType());
332   e.varargs.push_back(lookupOrAdd(LHS));
333   e.varargs.push_back(lookupOrAdd(RHS));
334 
335   // Sort the operand value numbers so x<y and y>x get the same value number.
336   if (e.varargs[0] > e.varargs[1]) {
337     std::swap(e.varargs[0], e.varargs[1]);
338     Predicate = CmpInst::getSwappedPredicate(Predicate);
339   }
340   e.opcode = (Opcode << 8) | Predicate;
341   e.commutative = true;
342   return e;
343 }
344 
345 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
346   assert(EI && "Not an ExtractValueInst?");
347   Expression e;
348   e.type = EI->getType();
349   e.opcode = 0;
350 
351   WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
352   if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
353     // EI is an extract from one of our with.overflow intrinsics. Synthesize
354     // a semantically equivalent expression instead of an extract value
355     // expression.
356     e.opcode = WO->getBinaryOp();
357     e.varargs.push_back(lookupOrAdd(WO->getLHS()));
358     e.varargs.push_back(lookupOrAdd(WO->getRHS()));
359     return e;
360   }
361 
362   // Not a recognised intrinsic. Fall back to producing an extract value
363   // expression.
364   e.opcode = EI->getOpcode();
365   for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end();
366        OI != OE; ++OI)
367     e.varargs.push_back(lookupOrAdd(*OI));
368 
369   append_range(e.varargs, EI->indices());
370 
371   return e;
372 }
373 
374 //===----------------------------------------------------------------------===//
375 //                     ValueTable External Functions
376 //===----------------------------------------------------------------------===//
377 
378 GVN::ValueTable::ValueTable() = default;
379 GVN::ValueTable::ValueTable(const ValueTable &) = default;
380 GVN::ValueTable::ValueTable(ValueTable &&) = default;
381 GVN::ValueTable::~ValueTable() = default;
382 GVN::ValueTable &GVN::ValueTable::operator=(const GVN::ValueTable &Arg) = default;
383 
384 /// add - Insert a value into the table with a specified value number.
385 void GVN::ValueTable::add(Value *V, uint32_t num) {
386   valueNumbering.insert(std::make_pair(V, num));
387   if (PHINode *PN = dyn_cast<PHINode>(V))
388     NumberingPhi[num] = PN;
389 }
390 
391 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
392   if (AA->doesNotAccessMemory(C)) {
393     Expression exp = createExpr(C);
394     uint32_t e = assignExpNewValueNum(exp).first;
395     valueNumbering[C] = e;
396     return e;
397   } else if (MD && AA->onlyReadsMemory(C)) {
398     Expression exp = createExpr(C);
399     auto ValNum = assignExpNewValueNum(exp);
400     if (ValNum.second) {
401       valueNumbering[C] = ValNum.first;
402       return ValNum.first;
403     }
404 
405     MemDepResult local_dep = MD->getDependency(C);
406 
407     if (!local_dep.isDef() && !local_dep.isNonLocal()) {
408       valueNumbering[C] =  nextValueNumber;
409       return nextValueNumber++;
410     }
411 
412     if (local_dep.isDef()) {
413       // For masked load/store intrinsics, the local_dep may actully be
414       // a normal load or store instruction.
415       CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
416 
417       if (!local_cdep ||
418           local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
419         valueNumbering[C] = nextValueNumber;
420         return nextValueNumber++;
421       }
422 
423       for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
424         uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
425         uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
426         if (c_vn != cd_vn) {
427           valueNumbering[C] = nextValueNumber;
428           return nextValueNumber++;
429         }
430       }
431 
432       uint32_t v = lookupOrAdd(local_cdep);
433       valueNumbering[C] = v;
434       return v;
435     }
436 
437     // Non-local case.
438     const MemoryDependenceResults::NonLocalDepInfo &deps =
439         MD->getNonLocalCallDependency(C);
440     // FIXME: Move the checking logic to MemDep!
441     CallInst* cdep = nullptr;
442 
443     // Check to see if we have a single dominating call instruction that is
444     // identical to C.
445     for (unsigned i = 0, e = deps.size(); i != e; ++i) {
446       const NonLocalDepEntry *I = &deps[i];
447       if (I->getResult().isNonLocal())
448         continue;
449 
450       // We don't handle non-definitions.  If we already have a call, reject
451       // instruction dependencies.
452       if (!I->getResult().isDef() || cdep != nullptr) {
453         cdep = nullptr;
454         break;
455       }
456 
457       CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
458       // FIXME: All duplicated with non-local case.
459       if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
460         cdep = NonLocalDepCall;
461         continue;
462       }
463 
464       cdep = nullptr;
465       break;
466     }
467 
468     if (!cdep) {
469       valueNumbering[C] = nextValueNumber;
470       return nextValueNumber++;
471     }
472 
473     if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
474       valueNumbering[C] = nextValueNumber;
475       return nextValueNumber++;
476     }
477     for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
478       uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
479       uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
480       if (c_vn != cd_vn) {
481         valueNumbering[C] = nextValueNumber;
482         return nextValueNumber++;
483       }
484     }
485 
486     uint32_t v = lookupOrAdd(cdep);
487     valueNumbering[C] = v;
488     return v;
489   } else {
490     valueNumbering[C] = nextValueNumber;
491     return nextValueNumber++;
492   }
493 }
494 
495 /// Returns true if a value number exists for the specified value.
496 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; }
497 
498 /// lookup_or_add - Returns the value number for the specified value, assigning
499 /// it a new number if it did not have one before.
500 uint32_t GVN::ValueTable::lookupOrAdd(Value *V) {
501   DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
502   if (VI != valueNumbering.end())
503     return VI->second;
504 
505   if (!isa<Instruction>(V)) {
506     valueNumbering[V] = nextValueNumber;
507     return nextValueNumber++;
508   }
509 
510   Instruction* I = cast<Instruction>(V);
511   Expression exp;
512   switch (I->getOpcode()) {
513     case Instruction::Call:
514       return lookupOrAddCall(cast<CallInst>(I));
515     case Instruction::FNeg:
516     case Instruction::Add:
517     case Instruction::FAdd:
518     case Instruction::Sub:
519     case Instruction::FSub:
520     case Instruction::Mul:
521     case Instruction::FMul:
522     case Instruction::UDiv:
523     case Instruction::SDiv:
524     case Instruction::FDiv:
525     case Instruction::URem:
526     case Instruction::SRem:
527     case Instruction::FRem:
528     case Instruction::Shl:
529     case Instruction::LShr:
530     case Instruction::AShr:
531     case Instruction::And:
532     case Instruction::Or:
533     case Instruction::Xor:
534     case Instruction::ICmp:
535     case Instruction::FCmp:
536     case Instruction::Trunc:
537     case Instruction::ZExt:
538     case Instruction::SExt:
539     case Instruction::FPToUI:
540     case Instruction::FPToSI:
541     case Instruction::UIToFP:
542     case Instruction::SIToFP:
543     case Instruction::FPTrunc:
544     case Instruction::FPExt:
545     case Instruction::PtrToInt:
546     case Instruction::IntToPtr:
547     case Instruction::AddrSpaceCast:
548     case Instruction::BitCast:
549     case Instruction::Select:
550     case Instruction::Freeze:
551     case Instruction::ExtractElement:
552     case Instruction::InsertElement:
553     case Instruction::ShuffleVector:
554     case Instruction::InsertValue:
555     case Instruction::GetElementPtr:
556       exp = createExpr(I);
557       break;
558     case Instruction::ExtractValue:
559       exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
560       break;
561     case Instruction::PHI:
562       valueNumbering[V] = nextValueNumber;
563       NumberingPhi[nextValueNumber] = cast<PHINode>(V);
564       return nextValueNumber++;
565     default:
566       valueNumbering[V] = nextValueNumber;
567       return nextValueNumber++;
568   }
569 
570   uint32_t e = assignExpNewValueNum(exp).first;
571   valueNumbering[V] = e;
572   return e;
573 }
574 
575 /// Returns the value number of the specified value. Fails if
576 /// the value has not yet been numbered.
577 uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const {
578   DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
579   if (Verify) {
580     assert(VI != valueNumbering.end() && "Value not numbered?");
581     return VI->second;
582   }
583   return (VI != valueNumbering.end()) ? VI->second : 0;
584 }
585 
586 /// Returns the value number of the given comparison,
587 /// assigning it a new number if it did not have one before.  Useful when
588 /// we deduced the result of a comparison, but don't immediately have an
589 /// instruction realizing that comparison to hand.
590 uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode,
591                                          CmpInst::Predicate Predicate,
592                                          Value *LHS, Value *RHS) {
593   Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
594   return assignExpNewValueNum(exp).first;
595 }
596 
597 /// Remove all entries from the ValueTable.
598 void GVN::ValueTable::clear() {
599   valueNumbering.clear();
600   expressionNumbering.clear();
601   NumberingPhi.clear();
602   PhiTranslateTable.clear();
603   nextValueNumber = 1;
604   Expressions.clear();
605   ExprIdx.clear();
606   nextExprNumber = 0;
607 }
608 
609 /// Remove a value from the value numbering.
610 void GVN::ValueTable::erase(Value *V) {
611   uint32_t Num = valueNumbering.lookup(V);
612   valueNumbering.erase(V);
613   // If V is PHINode, V <--> value number is an one-to-one mapping.
614   if (isa<PHINode>(V))
615     NumberingPhi.erase(Num);
616 }
617 
618 /// verifyRemoved - Verify that the value is removed from all internal data
619 /// structures.
620 void GVN::ValueTable::verifyRemoved(const Value *V) const {
621   for (DenseMap<Value*, uint32_t>::const_iterator
622          I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
623     assert(I->first != V && "Inst still occurs in value numbering map!");
624   }
625 }
626 
627 //===----------------------------------------------------------------------===//
628 //                                GVN Pass
629 //===----------------------------------------------------------------------===//
630 
631 bool GVN::isPREEnabled() const {
632   return Options.AllowPRE.getValueOr(GVNEnablePRE);
633 }
634 
635 bool GVN::isLoadPREEnabled() const {
636   return Options.AllowLoadPRE.getValueOr(GVNEnableLoadPRE);
637 }
638 
639 bool GVN::isLoadInLoopPREEnabled() const {
640   return Options.AllowLoadInLoopPRE.getValueOr(GVNEnableLoadInLoopPRE);
641 }
642 
643 bool GVN::isLoadPRESplitBackedgeEnabled() const {
644   return Options.AllowLoadPRESplitBackedge.getValueOr(
645       GVNEnableSplitBackedgeInLoadPRE);
646 }
647 
648 bool GVN::isMemDepEnabled() const {
649   return Options.AllowMemDep.getValueOr(GVNEnableMemDep);
650 }
651 
652 PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) {
653   // FIXME: The order of evaluation of these 'getResult' calls is very
654   // significant! Re-ordering these variables will cause GVN when run alone to
655   // be less effective! We should fix memdep and basic-aa to not exhibit this
656   // behavior, but until then don't change the order here.
657   auto &AC = AM.getResult<AssumptionAnalysis>(F);
658   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
659   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
660   auto &AA = AM.getResult<AAManager>(F);
661   auto *MemDep =
662       isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr;
663   auto *LI = AM.getCachedResult<LoopAnalysis>(F);
664   auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
665   auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
666   bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
667                          MSSA ? &MSSA->getMSSA() : nullptr);
668   if (!Changed)
669     return PreservedAnalyses::all();
670   PreservedAnalyses PA;
671   PA.preserve<DominatorTreeAnalysis>();
672   PA.preserve<GlobalsAA>();
673   PA.preserve<TargetLibraryAnalysis>();
674   if (MSSA)
675     PA.preserve<MemorySSAAnalysis>();
676   if (LI)
677     PA.preserve<LoopAnalysis>();
678   return PA;
679 }
680 
681 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
682 LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const {
683   errs() << "{\n";
684   for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
685        E = d.end(); I != E; ++I) {
686       errs() << I->first << "\n";
687       I->second->dump();
688   }
689   errs() << "}\n";
690 }
691 #endif
692 
693 enum class AvailabilityState : char {
694   /// We know the block *is not* fully available. This is a fixpoint.
695   Unavailable = 0,
696   /// We know the block *is* fully available. This is a fixpoint.
697   Available = 1,
698   /// We do not know whether the block is fully available or not,
699   /// but we are currently speculating that it will be.
700   /// If it would have turned out that the block was, in fact, not fully
701   /// available, this would have been cleaned up into an Unavailable.
702   SpeculativelyAvailable = 2,
703 };
704 
705 /// Return true if we can prove that the value
706 /// we're analyzing is fully available in the specified block.  As we go, keep
707 /// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
708 /// map is actually a tri-state map with the following values:
709 ///   0) we know the block *is not* fully available.
710 ///   1) we know the block *is* fully available.
711 ///   2) we do not know whether the block is fully available or not, but we are
712 ///      currently speculating that it will be.
713 static bool IsValueFullyAvailableInBlock(
714     BasicBlock *BB,
715     DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
716   SmallVector<BasicBlock *, 32> Worklist;
717   Optional<BasicBlock *> UnavailableBB;
718 
719   // The number of times we didn't find an entry for a block in a map and
720   // optimistically inserted an entry marking block as speculatively available.
721   unsigned NumNewNewSpeculativelyAvailableBBs = 0;
722 
723 #ifndef NDEBUG
724   SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
725   SmallVector<BasicBlock *, 32> AvailableBBs;
726 #endif
727 
728   Worklist.emplace_back(BB);
729   while (!Worklist.empty()) {
730     BasicBlock *CurrBB = Worklist.pop_back_val(); // LIFO - depth-first!
731     // Optimistically assume that the block is Speculatively Available and check
732     // to see if we already know about this block in one lookup.
733     std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
734         FullyAvailableBlocks.try_emplace(
735             CurrBB, AvailabilityState::SpeculativelyAvailable);
736     AvailabilityState &State = IV.first->second;
737 
738     // Did the entry already exist for this block?
739     if (!IV.second) {
740       if (State == AvailabilityState::Unavailable) {
741         UnavailableBB = CurrBB;
742         break; // Backpropagate unavailability info.
743       }
744 
745 #ifndef NDEBUG
746       AvailableBBs.emplace_back(CurrBB);
747 #endif
748       continue; // Don't recurse further, but continue processing worklist.
749     }
750 
751     // No entry found for block.
752     ++NumNewNewSpeculativelyAvailableBBs;
753     bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
754 
755     // If we have exhausted our budget, mark this block as unavailable.
756     // Also, if this block has no predecessors, the value isn't live-in here.
757     if (OutOfBudget || pred_empty(CurrBB)) {
758       MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
759       State = AvailabilityState::Unavailable;
760       UnavailableBB = CurrBB;
761       break; // Backpropagate unavailability info.
762     }
763 
764     // Tentatively consider this block as speculatively available.
765 #ifndef NDEBUG
766     NewSpeculativelyAvailableBBs.insert(CurrBB);
767 #endif
768     // And further recurse into block's predecessors, in depth-first order!
769     Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
770   }
771 
772 #if LLVM_ENABLE_STATS
773   IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
774       NumNewNewSpeculativelyAvailableBBs);
775 #endif
776 
777   // If the block isn't marked as fixpoint yet
778   // (the Unavailable and Available states are fixpoints)
779   auto MarkAsFixpointAndEnqueueSuccessors =
780       [&](BasicBlock *BB, AvailabilityState FixpointState) {
781         auto It = FullyAvailableBlocks.find(BB);
782         if (It == FullyAvailableBlocks.end())
783           return; // Never queried this block, leave as-is.
784         switch (AvailabilityState &State = It->second) {
785         case AvailabilityState::Unavailable:
786         case AvailabilityState::Available:
787           return; // Don't backpropagate further, continue processing worklist.
788         case AvailabilityState::SpeculativelyAvailable: // Fix it!
789           State = FixpointState;
790 #ifndef NDEBUG
791           assert(NewSpeculativelyAvailableBBs.erase(BB) &&
792                  "Found a speculatively available successor leftover?");
793 #endif
794           // Queue successors for further processing.
795           Worklist.append(succ_begin(BB), succ_end(BB));
796           return;
797         }
798       };
799 
800   if (UnavailableBB) {
801     // Okay, we have encountered an unavailable block.
802     // Mark speculatively available blocks reachable from UnavailableBB as
803     // unavailable as well. Paths are terminated when they reach blocks not in
804     // FullyAvailableBlocks or they are not marked as speculatively available.
805     Worklist.clear();
806     Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
807     while (!Worklist.empty())
808       MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
809                                          AvailabilityState::Unavailable);
810   }
811 
812 #ifndef NDEBUG
813   Worklist.clear();
814   for (BasicBlock *AvailableBB : AvailableBBs)
815     Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
816   while (!Worklist.empty())
817     MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
818                                        AvailabilityState::Available);
819 
820   assert(NewSpeculativelyAvailableBBs.empty() &&
821          "Must have fixed all the new speculatively available blocks.");
822 #endif
823 
824   return !UnavailableBB;
825 }
826 
827 /// Given a set of loads specified by ValuesPerBlock,
828 /// construct SSA form, allowing us to eliminate LI.  This returns the value
829 /// that should be used at LI's definition site.
830 static Value *ConstructSSAForLoadSet(LoadInst *LI,
831                          SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
832                                      GVN &gvn) {
833   // Check for the fully redundant, dominating load case.  In this case, we can
834   // just use the dominating value directly.
835   if (ValuesPerBlock.size() == 1 &&
836       gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
837                                                LI->getParent())) {
838     assert(!ValuesPerBlock[0].AV.isUndefValue() &&
839            "Dead BB dominate this block");
840     return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
841   }
842 
843   // Otherwise, we have to construct SSA form.
844   SmallVector<PHINode*, 8> NewPHIs;
845   SSAUpdater SSAUpdate(&NewPHIs);
846   SSAUpdate.Initialize(LI->getType(), LI->getName());
847 
848   for (const AvailableValueInBlock &AV : ValuesPerBlock) {
849     BasicBlock *BB = AV.BB;
850 
851     if (SSAUpdate.HasValueForBlock(BB))
852       continue;
853 
854     // If the value is the load that we will be eliminating, and the block it's
855     // available in is the block that the load is in, then don't add it as
856     // SSAUpdater will resolve the value to the relevant phi which may let it
857     // avoid phi construction entirely if there's actually only one value.
858     if (BB == LI->getParent() &&
859         ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == LI) ||
860          (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == LI)))
861       continue;
862 
863     SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn));
864   }
865 
866   // Perform PHI construction.
867   return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
868 }
869 
870 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI,
871                                                 Instruction *InsertPt,
872                                                 GVN &gvn) const {
873   Value *Res;
874   Type *LoadTy = LI->getType();
875   const DataLayout &DL = LI->getModule()->getDataLayout();
876   if (isSimpleValue()) {
877     Res = getSimpleValue();
878     if (Res->getType() != LoadTy) {
879       Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
880 
881       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
882                         << "  " << *getSimpleValue() << '\n'
883                         << *Res << '\n'
884                         << "\n\n\n");
885     }
886   } else if (isCoercedLoadValue()) {
887     LoadInst *Load = getCoercedLoadValue();
888     if (Load->getType() == LoadTy && Offset == 0) {
889       Res = Load;
890     } else {
891       Res = getLoadValueForLoad(Load, Offset, LoadTy, InsertPt, DL);
892       // We would like to use gvn.markInstructionForDeletion here, but we can't
893       // because the load is already memoized into the leader map table that GVN
894       // tracks.  It is potentially possible to remove the load from the table,
895       // but then there all of the operations based on it would need to be
896       // rehashed.  Just leave the dead load around.
897       gvn.getMemDep().removeInstruction(Load);
898       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
899                         << "  " << *getCoercedLoadValue() << '\n'
900                         << *Res << '\n'
901                         << "\n\n\n");
902     }
903   } else if (isMemIntrinValue()) {
904     Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
905                                  InsertPt, DL);
906     LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
907                       << "  " << *getMemIntrinValue() << '\n'
908                       << *Res << '\n'
909                       << "\n\n\n");
910   } else {
911     assert(isUndefValue() && "Should be UndefVal");
912     LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";);
913     return UndefValue::get(LoadTy);
914   }
915   assert(Res && "failed to materialize?");
916   return Res;
917 }
918 
919 static bool isLifetimeStart(const Instruction *Inst) {
920   if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
921     return II->getIntrinsicID() == Intrinsic::lifetime_start;
922   return false;
923 }
924 
925 /// Try to locate the three instruction involved in a missed
926 /// load-elimination case that is due to an intervening store.
927 static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo,
928                                    DominatorTree *DT,
929                                    OptimizationRemarkEmitter *ORE) {
930   using namespace ore;
931 
932   User *OtherAccess = nullptr;
933 
934   OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", LI);
935   R << "load of type " << NV("Type", LI->getType()) << " not eliminated"
936     << setExtraArgs();
937 
938   for (auto *U : LI->getPointerOperand()->users())
939     if (U != LI && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
940         DT->dominates(cast<Instruction>(U), LI)) {
941       // FIXME: for now give up if there are multiple memory accesses that
942       // dominate the load.  We need further analysis to decide which one is
943       // that we're forwarding from.
944       if (OtherAccess)
945         OtherAccess = nullptr;
946       else
947         OtherAccess = U;
948     }
949 
950   if (OtherAccess)
951     R << " in favor of " << NV("OtherAccess", OtherAccess);
952 
953   R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
954 
955   ORE->emit(R);
956 }
957 
958 bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
959                                   Value *Address, AvailableValue &Res) {
960   assert((DepInfo.isDef() || DepInfo.isClobber()) &&
961          "expected a local dependence");
962   assert(LI->isUnordered() && "rules below are incorrect for ordered access");
963 
964   const DataLayout &DL = LI->getModule()->getDataLayout();
965 
966   Instruction *DepInst = DepInfo.getInst();
967   if (DepInfo.isClobber()) {
968     // If the dependence is to a store that writes to a superset of the bits
969     // read by the load, we can extract the bits we need for the load from the
970     // stored value.
971     if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
972       // Can't forward from non-atomic to atomic without violating memory model.
973       if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
974         int Offset =
975           analyzeLoadFromClobberingStore(LI->getType(), Address, DepSI, DL);
976         if (Offset != -1) {
977           Res = AvailableValue::get(DepSI->getValueOperand(), Offset);
978           return true;
979         }
980       }
981     }
982 
983     // Check to see if we have something like this:
984     //    load i32* P
985     //    load i8* (P+1)
986     // if we have this, replace the later with an extraction from the former.
987     if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
988       // If this is a clobber and L is the first instruction in its block, then
989       // we have the first instruction in the entry block.
990       // Can't forward from non-atomic to atomic without violating memory model.
991       if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) {
992         int Offset =
993           analyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
994 
995         if (Offset != -1) {
996           Res = AvailableValue::getLoad(DepLI, Offset);
997           return true;
998         }
999       }
1000     }
1001 
1002     // If the clobbering value is a memset/memcpy/memmove, see if we can
1003     // forward a value on from it.
1004     if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1005       if (Address && !LI->isAtomic()) {
1006         int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address,
1007                                                       DepMI, DL);
1008         if (Offset != -1) {
1009           Res = AvailableValue::getMI(DepMI, Offset);
1010           return true;
1011         }
1012       }
1013     }
1014     // Nothing known about this clobber, have to be conservative
1015     LLVM_DEBUG(
1016         // fast print dep, using operator<< on instruction is too slow.
1017         dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
1018         dbgs() << " is clobbered by " << *DepInst << '\n';);
1019     if (ORE->allowExtraAnalysis(DEBUG_TYPE))
1020       reportMayClobberedLoad(LI, DepInfo, DT, ORE);
1021 
1022     return false;
1023   }
1024   assert(DepInfo.isDef() && "follows from above");
1025 
1026   // Loading the allocation -> undef.
1027   if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
1028       isAlignedAllocLikeFn(DepInst, TLI) ||
1029       // Loading immediately after lifetime begin -> undef.
1030       isLifetimeStart(DepInst)) {
1031     Res = AvailableValue::get(UndefValue::get(LI->getType()));
1032     return true;
1033   }
1034 
1035   // Loading from calloc (which zero initializes memory) -> zero
1036   if (isCallocLikeFn(DepInst, TLI)) {
1037     Res = AvailableValue::get(Constant::getNullValue(LI->getType()));
1038     return true;
1039   }
1040 
1041   if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1042     // Reject loads and stores that are to the same address but are of
1043     // different types if we have to. If the stored value is convertable to
1044     // the loaded value, we can reuse it.
1045     if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(),
1046                                          DL))
1047       return false;
1048 
1049     // Can't forward from non-atomic to atomic without violating memory model.
1050     if (S->isAtomic() < LI->isAtomic())
1051       return false;
1052 
1053     Res = AvailableValue::get(S->getValueOperand());
1054     return true;
1055   }
1056 
1057   if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1058     // If the types mismatch and we can't handle it, reject reuse of the load.
1059     // If the stored value is larger or equal to the loaded value, we can reuse
1060     // it.
1061     if (!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
1062       return false;
1063 
1064     // Can't forward from non-atomic to atomic without violating memory model.
1065     if (LD->isAtomic() < LI->isAtomic())
1066       return false;
1067 
1068     Res = AvailableValue::getLoad(LD);
1069     return true;
1070   }
1071 
1072   // Unknown def - must be conservative
1073   LLVM_DEBUG(
1074       // fast print dep, using operator<< on instruction is too slow.
1075       dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
1076       dbgs() << " has unknown def " << *DepInst << '\n';);
1077   return false;
1078 }
1079 
1080 void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
1081                                   AvailValInBlkVect &ValuesPerBlock,
1082                                   UnavailBlkVect &UnavailableBlocks) {
1083   // Filter out useless results (non-locals, etc).  Keep track of the blocks
1084   // where we have a value available in repl, also keep track of whether we see
1085   // dependencies that produce an unknown value for the load (such as a call
1086   // that could potentially clobber the load).
1087   unsigned NumDeps = Deps.size();
1088   for (unsigned i = 0, e = NumDeps; i != e; ++i) {
1089     BasicBlock *DepBB = Deps[i].getBB();
1090     MemDepResult DepInfo = Deps[i].getResult();
1091 
1092     if (DeadBlocks.count(DepBB)) {
1093       // Dead dependent mem-op disguise as a load evaluating the same value
1094       // as the load in question.
1095       ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1096       continue;
1097     }
1098 
1099     if (!DepInfo.isDef() && !DepInfo.isClobber()) {
1100       UnavailableBlocks.push_back(DepBB);
1101       continue;
1102     }
1103 
1104     // The address being loaded in this non-local block may not be the same as
1105     // the pointer operand of the load if PHI translation occurs.  Make sure
1106     // to consider the right address.
1107     Value *Address = Deps[i].getAddress();
1108 
1109     AvailableValue AV;
1110     if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) {
1111       // subtlety: because we know this was a non-local dependency, we know
1112       // it's safe to materialize anywhere between the instruction within
1113       // DepInfo and the end of it's block.
1114       ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1115                                                           std::move(AV)));
1116     } else {
1117       UnavailableBlocks.push_back(DepBB);
1118     }
1119   }
1120 
1121   assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1122          "post condition violation");
1123 }
1124 
1125 bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
1126                          UnavailBlkVect &UnavailableBlocks) {
1127   // Okay, we have *some* definitions of the value.  This means that the value
1128   // is available in some of our (transitive) predecessors.  Lets think about
1129   // doing PRE of this load.  This will involve inserting a new load into the
1130   // predecessor when it's not available.  We could do this in general, but
1131   // prefer to not increase code size.  As such, we only do this when we know
1132   // that we only have to insert *one* load (which means we're basically moving
1133   // the load, not inserting a new one).
1134 
1135   SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1136                                         UnavailableBlocks.end());
1137 
1138   // Let's find the first basic block with more than one predecessor.  Walk
1139   // backwards through predecessors if needed.
1140   BasicBlock *LoadBB = LI->getParent();
1141   BasicBlock *TmpBB = LoadBB;
1142 
1143   // Check that there is no implicit control flow instructions above our load in
1144   // its block. If there is an instruction that doesn't always pass the
1145   // execution to the following instruction, then moving through it may become
1146   // invalid. For example:
1147   //
1148   // int arr[LEN];
1149   // int index = ???;
1150   // ...
1151   // guard(0 <= index && index < LEN);
1152   // use(arr[index]);
1153   //
1154   // It is illegal to move the array access to any point above the guard,
1155   // because if the index is out of bounds we should deoptimize rather than
1156   // access the array.
1157   // Check that there is no guard in this block above our instruction.
1158   bool MustEnsureSafetyOfSpeculativeExecution =
1159       ICF->isDominatedByICFIFromSameBlock(LI);
1160 
1161   while (TmpBB->getSinglePredecessor()) {
1162     TmpBB = TmpBB->getSinglePredecessor();
1163     if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1164       return false;
1165     if (Blockers.count(TmpBB))
1166       return false;
1167 
1168     // If any of these blocks has more than one successor (i.e. if the edge we
1169     // just traversed was critical), then there are other paths through this
1170     // block along which the load may not be anticipated.  Hoisting the load
1171     // above this block would be adding the load to execution paths along
1172     // which it was not previously executed.
1173     if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1174       return false;
1175 
1176     // Check that there is no implicit control flow in a block above.
1177     MustEnsureSafetyOfSpeculativeExecution =
1178         MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1179   }
1180 
1181   assert(TmpBB);
1182   LoadBB = TmpBB;
1183 
1184   // Check to see how many predecessors have the loaded value fully
1185   // available.
1186   MapVector<BasicBlock *, Value *> PredLoads;
1187   DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1188   for (const AvailableValueInBlock &AV : ValuesPerBlock)
1189     FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1190   for (BasicBlock *UnavailableBB : UnavailableBlocks)
1191     FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1192 
1193   SmallVector<BasicBlock *, 4> CriticalEdgePred;
1194   for (BasicBlock *Pred : predecessors(LoadBB)) {
1195     // If any predecessor block is an EH pad that does not allow non-PHI
1196     // instructions before the terminator, we can't PRE the load.
1197     if (Pred->getTerminator()->isEHPad()) {
1198       LLVM_DEBUG(
1199           dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1200                  << Pred->getName() << "': " << *LI << '\n');
1201       return false;
1202     }
1203 
1204     if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1205       continue;
1206     }
1207 
1208     if (Pred->getTerminator()->getNumSuccessors() != 1) {
1209       if (isa<IndirectBrInst>(Pred->getTerminator())) {
1210         LLVM_DEBUG(
1211             dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1212                    << Pred->getName() << "': " << *LI << '\n');
1213         return false;
1214       }
1215 
1216       // FIXME: Can we support the fallthrough edge?
1217       if (isa<CallBrInst>(Pred->getTerminator())) {
1218         LLVM_DEBUG(
1219             dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '"
1220                    << Pred->getName() << "': " << *LI << '\n');
1221         return false;
1222       }
1223 
1224       if (LoadBB->isEHPad()) {
1225         LLVM_DEBUG(
1226             dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1227                    << Pred->getName() << "': " << *LI << '\n');
1228         return false;
1229       }
1230 
1231       // Do not split backedge as it will break the canonical loop form.
1232       if (!isLoadPRESplitBackedgeEnabled())
1233         if (DT->dominates(LoadBB, Pred)) {
1234           LLVM_DEBUG(
1235               dbgs()
1236               << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1237               << Pred->getName() << "': " << *LI << '\n');
1238           return false;
1239         }
1240 
1241       CriticalEdgePred.push_back(Pred);
1242     } else {
1243       // Only add the predecessors that will not be split for now.
1244       PredLoads[Pred] = nullptr;
1245     }
1246   }
1247 
1248   // Decide whether PRE is profitable for this load.
1249   unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1250   assert(NumUnavailablePreds != 0 &&
1251          "Fully available value should already be eliminated!");
1252 
1253   // If this load is unavailable in multiple predecessors, reject it.
1254   // FIXME: If we could restructure the CFG, we could make a common pred with
1255   // all the preds that don't have an available LI and insert a new load into
1256   // that one block.
1257   if (NumUnavailablePreds != 1)
1258       return false;
1259 
1260   // Now we know where we will insert load. We must ensure that it is safe
1261   // to speculatively execute the load at that points.
1262   if (MustEnsureSafetyOfSpeculativeExecution) {
1263     if (CriticalEdgePred.size())
1264       if (!isSafeToSpeculativelyExecute(LI, LoadBB->getFirstNonPHI(), DT))
1265         return false;
1266     for (auto &PL : PredLoads)
1267       if (!isSafeToSpeculativelyExecute(LI, PL.first->getTerminator(), DT))
1268         return false;
1269   }
1270 
1271   // Split critical edges, and update the unavailable predecessors accordingly.
1272   for (BasicBlock *OrigPred : CriticalEdgePred) {
1273     BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1274     assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1275     PredLoads[NewPred] = nullptr;
1276     LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1277                       << LoadBB->getName() << '\n');
1278   }
1279 
1280   // Check if the load can safely be moved to all the unavailable predecessors.
1281   bool CanDoPRE = true;
1282   const DataLayout &DL = LI->getModule()->getDataLayout();
1283   SmallVector<Instruction*, 8> NewInsts;
1284   for (auto &PredLoad : PredLoads) {
1285     BasicBlock *UnavailablePred = PredLoad.first;
1286 
1287     // Do PHI translation to get its value in the predecessor if necessary.  The
1288     // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1289     // We do the translation for each edge we skipped by going from LI's block
1290     // to LoadBB, otherwise we might miss pieces needing translation.
1291 
1292     // If all preds have a single successor, then we know it is safe to insert
1293     // the load on the pred (?!?), so we can insert code to materialize the
1294     // pointer if it is not available.
1295     Value *LoadPtr = LI->getPointerOperand();
1296     BasicBlock *Cur = LI->getParent();
1297     while (Cur != LoadBB) {
1298       PHITransAddr Address(LoadPtr, DL, AC);
1299       LoadPtr = Address.PHITranslateWithInsertion(
1300           Cur, Cur->getSinglePredecessor(), *DT, NewInsts);
1301       if (!LoadPtr) {
1302         CanDoPRE = false;
1303         break;
1304       }
1305       Cur = Cur->getSinglePredecessor();
1306     }
1307 
1308     if (LoadPtr) {
1309       PHITransAddr Address(LoadPtr, DL, AC);
1310       LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT,
1311                                                   NewInsts);
1312     }
1313     // If we couldn't find or insert a computation of this phi translated value,
1314     // we fail PRE.
1315     if (!LoadPtr) {
1316       LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1317                         << *LI->getPointerOperand() << "\n");
1318       CanDoPRE = false;
1319       break;
1320     }
1321 
1322     PredLoad.second = LoadPtr;
1323   }
1324 
1325   if (!CanDoPRE) {
1326     while (!NewInsts.empty()) {
1327       // Erase instructions generated by the failed PHI translation before
1328       // trying to number them. PHI translation might insert instructions
1329       // in basic blocks other than the current one, and we delete them
1330       // directly, as markInstructionForDeletion only allows removing from the
1331       // current basic block.
1332       NewInsts.pop_back_val()->eraseFromParent();
1333     }
1334     // HINT: Don't revert the edge-splitting as following transformation may
1335     // also need to split these critical edges.
1336     return !CriticalEdgePred.empty();
1337   }
1338 
1339   // Okay, we can eliminate this load by inserting a reload in the predecessor
1340   // and using PHI construction to get the value in the other predecessors, do
1341   // it.
1342   LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1343   LLVM_DEBUG(if (!NewInsts.empty()) dbgs()
1344              << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back()
1345              << '\n');
1346 
1347   // Assign value numbers to the new instructions.
1348   for (Instruction *I : NewInsts) {
1349     // Instructions that have been inserted in predecessor(s) to materialize
1350     // the load address do not retain their original debug locations. Doing
1351     // so could lead to confusing (but correct) source attributions.
1352     I->updateLocationAfterHoist();
1353 
1354     // FIXME: We really _ought_ to insert these value numbers into their
1355     // parent's availability map.  However, in doing so, we risk getting into
1356     // ordering issues.  If a block hasn't been processed yet, we would be
1357     // marking a value as AVAIL-IN, which isn't what we intend.
1358     VN.lookupOrAdd(I);
1359   }
1360 
1361   for (const auto &PredLoad : PredLoads) {
1362     BasicBlock *UnavailablePred = PredLoad.first;
1363     Value *LoadPtr = PredLoad.second;
1364 
1365     auto *NewLoad = new LoadInst(
1366         LI->getType(), LoadPtr, LI->getName() + ".pre", LI->isVolatile(),
1367         LI->getAlign(), LI->getOrdering(), LI->getSyncScopeID(),
1368         UnavailablePred->getTerminator());
1369     NewLoad->setDebugLoc(LI->getDebugLoc());
1370     if (MSSAU) {
1371       auto *MSSA = MSSAU->getMemorySSA();
1372       // Get the defining access of the original load or use the load if it is a
1373       // MemoryDef (e.g. because it is volatile). The inserted loads are
1374       // guaranteed to load from the same definition.
1375       auto *LIAcc = MSSA->getMemoryAccess(LI);
1376       auto *DefiningAcc =
1377           isa<MemoryDef>(LIAcc) ? LIAcc : LIAcc->getDefiningAccess();
1378       auto *NewAccess = MSSAU->createMemoryAccessInBB(
1379           NewLoad, DefiningAcc, NewLoad->getParent(),
1380           MemorySSA::BeforeTerminator);
1381       if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1382         MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1383       else
1384         MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1385     }
1386 
1387     // Transfer the old load's AA tags to the new load.
1388     AAMDNodes Tags;
1389     LI->getAAMetadata(Tags);
1390     if (Tags)
1391       NewLoad->setAAMetadata(Tags);
1392 
1393     if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load))
1394       NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1395     if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group))
1396       NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1397     if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range))
1398       NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1399 
1400     // We do not propagate the old load's debug location, because the new
1401     // load now lives in a different BB, and we want to avoid a jumpy line
1402     // table.
1403     // FIXME: How do we retain source locations without causing poor debugging
1404     // behavior?
1405 
1406     // Add the newly created load.
1407     ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1408                                                         NewLoad));
1409     MD->invalidateCachedPointerInfo(LoadPtr);
1410     LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1411   }
1412 
1413   // Perform PHI construction.
1414   Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
1415   LI->replaceAllUsesWith(V);
1416   if (isa<PHINode>(V))
1417     V->takeName(LI);
1418   if (Instruction *I = dyn_cast<Instruction>(V))
1419     I->setDebugLoc(LI->getDebugLoc());
1420   if (V->getType()->isPtrOrPtrVectorTy())
1421     MD->invalidateCachedPointerInfo(V);
1422   markInstructionForDeletion(LI);
1423   ORE->emit([&]() {
1424     return OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI)
1425            << "load eliminated by PRE";
1426   });
1427   ++NumPRELoad;
1428   return true;
1429 }
1430 
1431 static void reportLoadElim(LoadInst *LI, Value *AvailableValue,
1432                            OptimizationRemarkEmitter *ORE) {
1433   using namespace ore;
1434 
1435   ORE->emit([&]() {
1436     return OptimizationRemark(DEBUG_TYPE, "LoadElim", LI)
1437            << "load of type " << NV("Type", LI->getType()) << " eliminated"
1438            << setExtraArgs() << " in favor of "
1439            << NV("InfavorOfValue", AvailableValue);
1440   });
1441 }
1442 
1443 /// Attempt to eliminate a load whose dependencies are
1444 /// non-local by performing PHI construction.
1445 bool GVN::processNonLocalLoad(LoadInst *LI) {
1446   // non-local speculations are not allowed under asan.
1447   if (LI->getParent()->getParent()->hasFnAttribute(
1448           Attribute::SanitizeAddress) ||
1449       LI->getParent()->getParent()->hasFnAttribute(
1450           Attribute::SanitizeHWAddress))
1451     return false;
1452 
1453   // Step 1: Find the non-local dependencies of the load.
1454   LoadDepVect Deps;
1455   MD->getNonLocalPointerDependency(LI, Deps);
1456 
1457   // If we had to process more than one hundred blocks to find the
1458   // dependencies, this load isn't worth worrying about.  Optimizing
1459   // it will be too expensive.
1460   unsigned NumDeps = Deps.size();
1461   if (NumDeps > MaxNumDeps)
1462     return false;
1463 
1464   // If we had a phi translation failure, we'll have a single entry which is a
1465   // clobber in the current block.  Reject this early.
1466   if (NumDeps == 1 &&
1467       !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1468     LLVM_DEBUG(dbgs() << "GVN: non-local load "; LI->printAsOperand(dbgs());
1469                dbgs() << " has unknown dependencies\n";);
1470     return false;
1471   }
1472 
1473   bool Changed = false;
1474   // If this load follows a GEP, see if we can PRE the indices before analyzing.
1475   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) {
1476     for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(),
1477                                         OE = GEP->idx_end();
1478          OI != OE; ++OI)
1479       if (Instruction *I = dyn_cast<Instruction>(OI->get()))
1480         Changed |= performScalarPRE(I);
1481   }
1482 
1483   // Step 2: Analyze the availability of the load
1484   AvailValInBlkVect ValuesPerBlock;
1485   UnavailBlkVect UnavailableBlocks;
1486   AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks);
1487 
1488   // If we have no predecessors that produce a known value for this load, exit
1489   // early.
1490   if (ValuesPerBlock.empty())
1491     return Changed;
1492 
1493   // Step 3: Eliminate fully redundancy.
1494   //
1495   // If all of the instructions we depend on produce a known value for this
1496   // load, then it is fully redundant and we can use PHI insertion to compute
1497   // its value.  Insert PHIs and remove the fully redundant value now.
1498   if (UnavailableBlocks.empty()) {
1499     LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1500 
1501     // Perform PHI construction.
1502     Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
1503     LI->replaceAllUsesWith(V);
1504 
1505     if (isa<PHINode>(V))
1506       V->takeName(LI);
1507     if (Instruction *I = dyn_cast<Instruction>(V))
1508       // If instruction I has debug info, then we should not update it.
1509       // Also, if I has a null DebugLoc, then it is still potentially incorrect
1510       // to propagate LI's DebugLoc because LI may not post-dominate I.
1511       if (LI->getDebugLoc() && LI->getParent() == I->getParent())
1512         I->setDebugLoc(LI->getDebugLoc());
1513     if (V->getType()->isPtrOrPtrVectorTy())
1514       MD->invalidateCachedPointerInfo(V);
1515     markInstructionForDeletion(LI);
1516     ++NumGVNLoad;
1517     reportLoadElim(LI, V, ORE);
1518     return true;
1519   }
1520 
1521   // Step 4: Eliminate partial redundancy.
1522   if (!isPREEnabled() || !isLoadPREEnabled())
1523     return Changed;
1524   if (!isLoadInLoopPREEnabled() && this->LI &&
1525       this->LI->getLoopFor(LI->getParent()))
1526     return Changed;
1527 
1528   return Changed || PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks);
1529 }
1530 
1531 static bool impliesEquivalanceIfTrue(CmpInst* Cmp) {
1532   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1533     return true;
1534 
1535   // Floating point comparisons can be equal, but not equivalent.  Cases:
1536   // NaNs for unordered operators
1537   // +0.0 vs 0.0 for all operators
1538   if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1539       (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1540        Cmp->getFastMathFlags().noNaNs())) {
1541       Value *LHS = Cmp->getOperand(0);
1542       Value *RHS = Cmp->getOperand(1);
1543       // If we can prove either side non-zero, then equality must imply
1544       // equivalence.
1545       // FIXME: We should do this optimization if 'no signed zeros' is
1546       // applicable via an instruction-level fast-math-flag or some other
1547       // indicator that relaxed FP semantics are being used.
1548       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1549         return true;
1550       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1551         return true;;
1552       // TODO: Handle vector floating point constants
1553   }
1554   return false;
1555 }
1556 
1557 static bool impliesEquivalanceIfFalse(CmpInst* Cmp) {
1558   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1559     return true;
1560 
1561   // Floating point comparisons can be equal, but not equivelent.  Cases:
1562   // NaNs for unordered operators
1563   // +0.0 vs 0.0 for all operators
1564   if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1565        Cmp->getFastMathFlags().noNaNs()) ||
1566       Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1567       Value *LHS = Cmp->getOperand(0);
1568       Value *RHS = Cmp->getOperand(1);
1569       // If we can prove either side non-zero, then equality must imply
1570       // equivalence.
1571       // FIXME: We should do this optimization if 'no signed zeros' is
1572       // applicable via an instruction-level fast-math-flag or some other
1573       // indicator that relaxed FP semantics are being used.
1574       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1575         return true;
1576       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1577         return true;;
1578       // TODO: Handle vector floating point constants
1579   }
1580   return false;
1581 }
1582 
1583 
1584 static bool hasUsersIn(Value *V, BasicBlock *BB) {
1585   for (User *U : V->users())
1586     if (isa<Instruction>(U) &&
1587         cast<Instruction>(U)->getParent() == BB)
1588       return true;
1589   return false;
1590 }
1591 
1592 bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) {
1593   assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume &&
1594          "This function can only be called with llvm.assume intrinsic");
1595   Value *V = IntrinsicI->getArgOperand(0);
1596 
1597   if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1598     if (Cond->isZero()) {
1599       Type *Int8Ty = Type::getInt8Ty(V->getContext());
1600       // Insert a new store to null instruction before the load to indicate that
1601       // this code is not reachable.  FIXME: We could insert unreachable
1602       // instruction directly because we can modify the CFG.
1603       auto *NewS = new StoreInst(UndefValue::get(Int8Ty),
1604                                  Constant::getNullValue(Int8Ty->getPointerTo()),
1605                                  IntrinsicI);
1606       if (MSSAU) {
1607         const MemoryUseOrDef *FirstNonDom = nullptr;
1608         const auto *AL =
1609             MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
1610 
1611         // If there are accesses in the current basic block, find the first one
1612         // that does not come before NewS. The new memory access is inserted
1613         // after the found access or before the terminator if no such access is
1614         // found.
1615         if (AL) {
1616           for (auto &Acc : *AL) {
1617             if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
1618               if (!Current->getMemoryInst()->comesBefore(NewS)) {
1619                 FirstNonDom = Current;
1620                 break;
1621               }
1622           }
1623         }
1624 
1625         // This added store is to null, so it will never executed and we can
1626         // just use the LiveOnEntry def as defining access.
1627         auto *NewDef =
1628             FirstNonDom ? MSSAU->createMemoryAccessBefore(
1629                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1630                               const_cast<MemoryUseOrDef *>(FirstNonDom))
1631                         : MSSAU->createMemoryAccessInBB(
1632                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1633                               NewS->getParent(), MemorySSA::BeforeTerminator);
1634 
1635         MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
1636       }
1637     }
1638     if (isAssumeWithEmptyBundle(*IntrinsicI))
1639       markInstructionForDeletion(IntrinsicI);
1640     return false;
1641   } else if (isa<Constant>(V)) {
1642     // If it's not false, and constant, it must evaluate to true. This means our
1643     // assume is assume(true), and thus, pointless, and we don't want to do
1644     // anything more here.
1645     return false;
1646   }
1647 
1648   Constant *True = ConstantInt::getTrue(V->getContext());
1649   bool Changed = false;
1650 
1651   for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1652     BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1653 
1654     // This property is only true in dominated successors, propagateEquality
1655     // will check dominance for us.
1656     Changed |= propagateEquality(V, True, Edge, false);
1657   }
1658 
1659   // We can replace assume value with true, which covers cases like this:
1660   // call void @llvm.assume(i1 %cmp)
1661   // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1662   ReplaceOperandsWithMap[V] = True;
1663 
1664   // Similarly, after assume(!NotV) we know that NotV == false.
1665   Value *NotV;
1666   if (match(V, m_Not(m_Value(NotV))))
1667     ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
1668 
1669   // If we find an equality fact, canonicalize all dominated uses in this block
1670   // to one of the two values.  We heuristically choice the "oldest" of the
1671   // two where age is determined by value number. (Note that propagateEquality
1672   // above handles the cross block case.)
1673   //
1674   // Key case to cover are:
1675   // 1)
1676   // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1677   // call void @llvm.assume(i1 %cmp)
1678   // ret float %0 ; will change it to ret float 3.000000e+00
1679   // 2)
1680   // %load = load float, float* %addr
1681   // %cmp = fcmp oeq float %load, %0
1682   // call void @llvm.assume(i1 %cmp)
1683   // ret float %load ; will change it to ret float %0
1684   if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1685     if (impliesEquivalanceIfTrue(CmpI)) {
1686       Value *CmpLHS = CmpI->getOperand(0);
1687       Value *CmpRHS = CmpI->getOperand(1);
1688       // Heuristically pick the better replacement -- the choice of heuristic
1689       // isn't terribly important here, but the fact we canonicalize on some
1690       // replacement is for exposing other simplifications.
1691       // TODO: pull this out as a helper function and reuse w/existing
1692       // (slightly different) logic.
1693       if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1694         std::swap(CmpLHS, CmpRHS);
1695       if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1696         std::swap(CmpLHS, CmpRHS);
1697       if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1698           (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
1699         // Move the 'oldest' value to the right-hand side, using the value
1700         // number as a proxy for age.
1701         uint32_t LVN = VN.lookupOrAdd(CmpLHS);
1702         uint32_t RVN = VN.lookupOrAdd(CmpRHS);
1703         if (LVN < RVN)
1704           std::swap(CmpLHS, CmpRHS);
1705       }
1706 
1707       // Handle degenerate case where we either haven't pruned a dead path or a
1708       // removed a trivial assume yet.
1709       if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
1710         return Changed;
1711 
1712       LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
1713                  << *CmpLHS << " with "
1714                  << *CmpRHS << " in block "
1715                  << IntrinsicI->getParent()->getName() << "\n");
1716 
1717 
1718       // Setup the replacement map - this handles uses within the same block
1719       if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
1720         ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
1721 
1722       // NOTE: The non-block local cases are handled by the call to
1723       // propagateEquality above; this block is just about handling the block
1724       // local cases.  TODO: There's a bunch of logic in propagateEqualiy which
1725       // isn't duplicated for the block local case, can we share it somehow?
1726     }
1727   }
1728   return Changed;
1729 }
1730 
1731 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
1732   patchReplacementInstruction(I, Repl);
1733   I->replaceAllUsesWith(Repl);
1734 }
1735 
1736 /// Attempt to eliminate a load, first by eliminating it
1737 /// locally, and then attempting non-local elimination if that fails.
1738 bool GVN::processLoad(LoadInst *L) {
1739   if (!MD)
1740     return false;
1741 
1742   // This code hasn't been audited for ordered or volatile memory access
1743   if (!L->isUnordered())
1744     return false;
1745 
1746   if (L->use_empty()) {
1747     markInstructionForDeletion(L);
1748     return true;
1749   }
1750 
1751   // ... to a pointer that has been loaded from before...
1752   MemDepResult Dep = MD->getDependency(L);
1753 
1754   // If it is defined in another block, try harder.
1755   if (Dep.isNonLocal())
1756     return processNonLocalLoad(L);
1757 
1758   // Only handle the local case below
1759   if (!Dep.isDef() && !Dep.isClobber()) {
1760     // This might be a NonFuncLocal or an Unknown
1761     LLVM_DEBUG(
1762         // fast print dep, using operator<< on instruction is too slow.
1763         dbgs() << "GVN: load "; L->printAsOperand(dbgs());
1764         dbgs() << " has unknown dependence\n";);
1765     return false;
1766   }
1767 
1768   AvailableValue AV;
1769   if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) {
1770     Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
1771 
1772     // Replace the load!
1773     patchAndReplaceAllUsesWith(L, AvailableValue);
1774     markInstructionForDeletion(L);
1775     if (MSSAU)
1776       MSSAU->removeMemoryAccess(L);
1777     ++NumGVNLoad;
1778     reportLoadElim(L, AvailableValue, ORE);
1779     // Tell MDA to rexamine the reused pointer since we might have more
1780     // information after forwarding it.
1781     if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
1782       MD->invalidateCachedPointerInfo(AvailableValue);
1783     return true;
1784   }
1785 
1786   return false;
1787 }
1788 
1789 /// Return a pair the first field showing the value number of \p Exp and the
1790 /// second field showing whether it is a value number newly created.
1791 std::pair<uint32_t, bool>
1792 GVN::ValueTable::assignExpNewValueNum(Expression &Exp) {
1793   uint32_t &e = expressionNumbering[Exp];
1794   bool CreateNewValNum = !e;
1795   if (CreateNewValNum) {
1796     Expressions.push_back(Exp);
1797     if (ExprIdx.size() < nextValueNumber + 1)
1798       ExprIdx.resize(nextValueNumber * 2);
1799     e = nextValueNumber;
1800     ExprIdx[nextValueNumber++] = nextExprNumber++;
1801   }
1802   return {e, CreateNewValNum};
1803 }
1804 
1805 /// Return whether all the values related with the same \p num are
1806 /// defined in \p BB.
1807 bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
1808                                      GVN &Gvn) {
1809   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1810   while (Vals && Vals->BB == BB)
1811     Vals = Vals->Next;
1812   return !Vals;
1813 }
1814 
1815 /// Wrap phiTranslateImpl to provide caching functionality.
1816 uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred,
1817                                        const BasicBlock *PhiBlock, uint32_t Num,
1818                                        GVN &Gvn) {
1819   auto FindRes = PhiTranslateTable.find({Num, Pred});
1820   if (FindRes != PhiTranslateTable.end())
1821     return FindRes->second;
1822   uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
1823   PhiTranslateTable.insert({{Num, Pred}, NewNum});
1824   return NewNum;
1825 }
1826 
1827 // Return true if the value number \p Num and NewNum have equal value.
1828 // Return false if the result is unknown.
1829 bool GVN::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
1830                                        const BasicBlock *Pred,
1831                                        const BasicBlock *PhiBlock, GVN &Gvn) {
1832   CallInst *Call = nullptr;
1833   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1834   while (Vals) {
1835     Call = dyn_cast<CallInst>(Vals->Val);
1836     if (Call && Call->getParent() == PhiBlock)
1837       break;
1838     Vals = Vals->Next;
1839   }
1840 
1841   if (AA->doesNotAccessMemory(Call))
1842     return true;
1843 
1844   if (!MD || !AA->onlyReadsMemory(Call))
1845     return false;
1846 
1847   MemDepResult local_dep = MD->getDependency(Call);
1848   if (!local_dep.isNonLocal())
1849     return false;
1850 
1851   const MemoryDependenceResults::NonLocalDepInfo &deps =
1852       MD->getNonLocalCallDependency(Call);
1853 
1854   // Check to see if the Call has no function local clobber.
1855   for (unsigned i = 0; i < deps.size(); i++) {
1856     if (deps[i].getResult().isNonFuncLocal())
1857       return true;
1858   }
1859   return false;
1860 }
1861 
1862 /// Translate value number \p Num using phis, so that it has the values of
1863 /// the phis in BB.
1864 uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
1865                                            const BasicBlock *PhiBlock,
1866                                            uint32_t Num, GVN &Gvn) {
1867   if (PHINode *PN = NumberingPhi[Num]) {
1868     for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
1869       if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
1870         if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
1871           return TransVal;
1872     }
1873     return Num;
1874   }
1875 
1876   // If there is any value related with Num is defined in a BB other than
1877   // PhiBlock, it cannot depend on a phi in PhiBlock without going through
1878   // a backedge. We can do an early exit in that case to save compile time.
1879   if (!areAllValsInBB(Num, PhiBlock, Gvn))
1880     return Num;
1881 
1882   if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
1883     return Num;
1884   Expression Exp = Expressions[ExprIdx[Num]];
1885 
1886   for (unsigned i = 0; i < Exp.varargs.size(); i++) {
1887     // For InsertValue and ExtractValue, some varargs are index numbers
1888     // instead of value numbers. Those index numbers should not be
1889     // translated.
1890     if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
1891         (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
1892         (i > 1 && Exp.opcode == Instruction::ShuffleVector))
1893       continue;
1894     Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
1895   }
1896 
1897   if (Exp.commutative) {
1898     assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
1899     if (Exp.varargs[0] > Exp.varargs[1]) {
1900       std::swap(Exp.varargs[0], Exp.varargs[1]);
1901       uint32_t Opcode = Exp.opcode >> 8;
1902       if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
1903         Exp.opcode = (Opcode << 8) |
1904                      CmpInst::getSwappedPredicate(
1905                          static_cast<CmpInst::Predicate>(Exp.opcode & 255));
1906     }
1907   }
1908 
1909   if (uint32_t NewNum = expressionNumbering[Exp]) {
1910     if (Exp.opcode == Instruction::Call && NewNum != Num)
1911       return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
1912     return NewNum;
1913   }
1914   return Num;
1915 }
1916 
1917 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed
1918 /// again.
1919 void GVN::ValueTable::eraseTranslateCacheEntry(uint32_t Num,
1920                                                const BasicBlock &CurrBlock) {
1921   for (const BasicBlock *Pred : predecessors(&CurrBlock))
1922     PhiTranslateTable.erase({Num, Pred});
1923 }
1924 
1925 // In order to find a leader for a given value number at a
1926 // specific basic block, we first obtain the list of all Values for that number,
1927 // and then scan the list to find one whose block dominates the block in
1928 // question.  This is fast because dominator tree queries consist of only
1929 // a few comparisons of DFS numbers.
1930 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
1931   LeaderTableEntry Vals = LeaderTable[num];
1932   if (!Vals.Val) return nullptr;
1933 
1934   Value *Val = nullptr;
1935   if (DT->dominates(Vals.BB, BB)) {
1936     Val = Vals.Val;
1937     if (isa<Constant>(Val)) return Val;
1938   }
1939 
1940   LeaderTableEntry* Next = Vals.Next;
1941   while (Next) {
1942     if (DT->dominates(Next->BB, BB)) {
1943       if (isa<Constant>(Next->Val)) return Next->Val;
1944       if (!Val) Val = Next->Val;
1945     }
1946 
1947     Next = Next->Next;
1948   }
1949 
1950   return Val;
1951 }
1952 
1953 /// There is an edge from 'Src' to 'Dst'.  Return
1954 /// true if every path from the entry block to 'Dst' passes via this edge.  In
1955 /// particular 'Dst' must not be reachable via another edge from 'Src'.
1956 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
1957                                        DominatorTree *DT) {
1958   // While in theory it is interesting to consider the case in which Dst has
1959   // more than one predecessor, because Dst might be part of a loop which is
1960   // only reachable from Src, in practice it is pointless since at the time
1961   // GVN runs all such loops have preheaders, which means that Dst will have
1962   // been changed to have only one predecessor, namely Src.
1963   const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
1964   assert((!Pred || Pred == E.getStart()) &&
1965          "No edge between these basic blocks!");
1966   return Pred != nullptr;
1967 }
1968 
1969 void GVN::assignBlockRPONumber(Function &F) {
1970   BlockRPONumber.clear();
1971   uint32_t NextBlockNumber = 1;
1972   ReversePostOrderTraversal<Function *> RPOT(&F);
1973   for (BasicBlock *BB : RPOT)
1974     BlockRPONumber[BB] = NextBlockNumber++;
1975   InvalidBlockRPONumbers = false;
1976 }
1977 
1978 bool GVN::replaceOperandsForInBlockEquality(Instruction *Instr) const {
1979   bool Changed = false;
1980   for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
1981     Value *Operand = Instr->getOperand(OpNum);
1982     auto it = ReplaceOperandsWithMap.find(Operand);
1983     if (it != ReplaceOperandsWithMap.end()) {
1984       LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
1985                         << *it->second << " in instruction " << *Instr << '\n');
1986       Instr->setOperand(OpNum, it->second);
1987       Changed = true;
1988     }
1989   }
1990   return Changed;
1991 }
1992 
1993 /// The given values are known to be equal in every block
1994 /// dominated by 'Root'.  Exploit this, for example by replacing 'LHS' with
1995 /// 'RHS' everywhere in the scope.  Returns whether a change was made.
1996 /// If DominatesByEdge is false, then it means that we will propagate the RHS
1997 /// value starting from the end of Root.Start.
1998 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
1999                             bool DominatesByEdge) {
2000   SmallVector<std::pair<Value*, Value*>, 4> Worklist;
2001   Worklist.push_back(std::make_pair(LHS, RHS));
2002   bool Changed = false;
2003   // For speed, compute a conservative fast approximation to
2004   // DT->dominates(Root, Root.getEnd());
2005   const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2006 
2007   while (!Worklist.empty()) {
2008     std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2009     LHS = Item.first; RHS = Item.second;
2010 
2011     if (LHS == RHS)
2012       continue;
2013     assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2014 
2015     // Don't try to propagate equalities between constants.
2016     if (isa<Constant>(LHS) && isa<Constant>(RHS))
2017       continue;
2018 
2019     // Prefer a constant on the right-hand side, or an Argument if no constants.
2020     if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2021       std::swap(LHS, RHS);
2022     assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2023 
2024     // If there is no obvious reason to prefer the left-hand side over the
2025     // right-hand side, ensure the longest lived term is on the right-hand side,
2026     // so the shortest lived term will be replaced by the longest lived.
2027     // This tends to expose more simplifications.
2028     uint32_t LVN = VN.lookupOrAdd(LHS);
2029     if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2030         (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2031       // Move the 'oldest' value to the right-hand side, using the value number
2032       // as a proxy for age.
2033       uint32_t RVN = VN.lookupOrAdd(RHS);
2034       if (LVN < RVN) {
2035         std::swap(LHS, RHS);
2036         LVN = RVN;
2037       }
2038     }
2039 
2040     // If value numbering later sees that an instruction in the scope is equal
2041     // to 'LHS' then ensure it will be turned into 'RHS'.  In order to preserve
2042     // the invariant that instructions only occur in the leader table for their
2043     // own value number (this is used by removeFromLeaderTable), do not do this
2044     // if RHS is an instruction (if an instruction in the scope is morphed into
2045     // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2046     // using the leader table is about compiling faster, not optimizing better).
2047     // The leader table only tracks basic blocks, not edges. Only add to if we
2048     // have the simple case where the edge dominates the end.
2049     if (RootDominatesEnd && !isa<Instruction>(RHS))
2050       addToLeaderTable(LVN, RHS, Root.getEnd());
2051 
2052     // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope.  As
2053     // LHS always has at least one use that is not dominated by Root, this will
2054     // never do anything if LHS has only one use.
2055     if (!LHS->hasOneUse()) {
2056       unsigned NumReplacements =
2057           DominatesByEdge
2058               ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
2059               : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2060 
2061       Changed |= NumReplacements > 0;
2062       NumGVNEqProp += NumReplacements;
2063       // Cached information for anything that uses LHS will be invalid.
2064       if (MD)
2065         MD->invalidateCachedPointerInfo(LHS);
2066     }
2067 
2068     // Now try to deduce additional equalities from this one. For example, if
2069     // the known equality was "(A != B)" == "false" then it follows that A and B
2070     // are equal in the scope. Only boolean equalities with an explicit true or
2071     // false RHS are currently supported.
2072     if (!RHS->getType()->isIntegerTy(1))
2073       // Not a boolean equality - bail out.
2074       continue;
2075     ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2076     if (!CI)
2077       // RHS neither 'true' nor 'false' - bail out.
2078       continue;
2079     // Whether RHS equals 'true'.  Otherwise it equals 'false'.
2080     bool isKnownTrue = CI->isMinusOne();
2081     bool isKnownFalse = !isKnownTrue;
2082 
2083     // If "A && B" is known true then both A and B are known true.  If "A || B"
2084     // is known false then both A and B are known false.
2085     Value *A, *B;
2086     if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2087         (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2088       Worklist.push_back(std::make_pair(A, RHS));
2089       Worklist.push_back(std::make_pair(B, RHS));
2090       continue;
2091     }
2092 
2093     // If we are propagating an equality like "(A == B)" == "true" then also
2094     // propagate the equality A == B.  When propagating a comparison such as
2095     // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2096     if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2097       Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2098 
2099       // If "A == B" is known true, or "A != B" is known false, then replace
2100       // A with B everywhere in the scope.  For floating point operations, we
2101       // have to be careful since equality does not always imply equivalance.
2102       if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) ||
2103           (isKnownFalse && impliesEquivalanceIfFalse(Cmp)))
2104         Worklist.push_back(std::make_pair(Op0, Op1));
2105 
2106       // If "A >= B" is known true, replace "A < B" with false everywhere.
2107       CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2108       Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2109       // Since we don't have the instruction "A < B" immediately to hand, work
2110       // out the value number that it would have and use that to find an
2111       // appropriate instruction (if any).
2112       uint32_t NextNum = VN.getNextUnusedValueNumber();
2113       uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2114       // If the number we were assigned was brand new then there is no point in
2115       // looking for an instruction realizing it: there cannot be one!
2116       if (Num < NextNum) {
2117         Value *NotCmp = findLeader(Root.getEnd(), Num);
2118         if (NotCmp && isa<Instruction>(NotCmp)) {
2119           unsigned NumReplacements =
2120               DominatesByEdge
2121                   ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2122                   : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2123                                              Root.getStart());
2124           Changed |= NumReplacements > 0;
2125           NumGVNEqProp += NumReplacements;
2126           // Cached information for anything that uses NotCmp will be invalid.
2127           if (MD)
2128             MD->invalidateCachedPointerInfo(NotCmp);
2129         }
2130       }
2131       // Ensure that any instruction in scope that gets the "A < B" value number
2132       // is replaced with false.
2133       // The leader table only tracks basic blocks, not edges. Only add to if we
2134       // have the simple case where the edge dominates the end.
2135       if (RootDominatesEnd)
2136         addToLeaderTable(Num, NotVal, Root.getEnd());
2137 
2138       continue;
2139     }
2140   }
2141 
2142   return Changed;
2143 }
2144 
2145 /// When calculating availability, handle an instruction
2146 /// by inserting it into the appropriate sets
2147 bool GVN::processInstruction(Instruction *I) {
2148   // Ignore dbg info intrinsics.
2149   if (isa<DbgInfoIntrinsic>(I))
2150     return false;
2151 
2152   // If the instruction can be easily simplified then do so now in preference
2153   // to value numbering it.  Value numbering often exposes redundancies, for
2154   // example if it determines that %y is equal to %x then the instruction
2155   // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2156   const DataLayout &DL = I->getModule()->getDataLayout();
2157   if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) {
2158     bool Changed = false;
2159     if (!I->use_empty()) {
2160       I->replaceAllUsesWith(V);
2161       Changed = true;
2162     }
2163     if (isInstructionTriviallyDead(I, TLI)) {
2164       markInstructionForDeletion(I);
2165       Changed = true;
2166     }
2167     if (Changed) {
2168       if (MD && V->getType()->isPtrOrPtrVectorTy())
2169         MD->invalidateCachedPointerInfo(V);
2170       ++NumGVNSimpl;
2171       return true;
2172     }
2173   }
2174 
2175   if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I))
2176     if (IntrinsicI->getIntrinsicID() == Intrinsic::assume)
2177       return processAssumeIntrinsic(IntrinsicI);
2178 
2179   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
2180     if (processLoad(LI))
2181       return true;
2182 
2183     unsigned Num = VN.lookupOrAdd(LI);
2184     addToLeaderTable(Num, LI, LI->getParent());
2185     return false;
2186   }
2187 
2188   // For conditional branches, we can perform simple conditional propagation on
2189   // the condition value itself.
2190   if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2191     if (!BI->isConditional())
2192       return false;
2193 
2194     if (isa<Constant>(BI->getCondition()))
2195       return processFoldableCondBr(BI);
2196 
2197     Value *BranchCond = BI->getCondition();
2198     BasicBlock *TrueSucc = BI->getSuccessor(0);
2199     BasicBlock *FalseSucc = BI->getSuccessor(1);
2200     // Avoid multiple edges early.
2201     if (TrueSucc == FalseSucc)
2202       return false;
2203 
2204     BasicBlock *Parent = BI->getParent();
2205     bool Changed = false;
2206 
2207     Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
2208     BasicBlockEdge TrueE(Parent, TrueSucc);
2209     Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2210 
2211     Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
2212     BasicBlockEdge FalseE(Parent, FalseSucc);
2213     Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2214 
2215     return Changed;
2216   }
2217 
2218   // For switches, propagate the case values into the case destinations.
2219   if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2220     Value *SwitchCond = SI->getCondition();
2221     BasicBlock *Parent = SI->getParent();
2222     bool Changed = false;
2223 
2224     // Remember how many outgoing edges there are to every successor.
2225     SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2226     for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2227       ++SwitchEdges[SI->getSuccessor(i)];
2228 
2229     for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2230          i != e; ++i) {
2231       BasicBlock *Dst = i->getCaseSuccessor();
2232       // If there is only a single edge, propagate the case value into it.
2233       if (SwitchEdges.lookup(Dst) == 1) {
2234         BasicBlockEdge E(Parent, Dst);
2235         Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2236       }
2237     }
2238     return Changed;
2239   }
2240 
2241   // Instructions with void type don't return a value, so there's
2242   // no point in trying to find redundancies in them.
2243   if (I->getType()->isVoidTy())
2244     return false;
2245 
2246   uint32_t NextNum = VN.getNextUnusedValueNumber();
2247   unsigned Num = VN.lookupOrAdd(I);
2248 
2249   // Allocations are always uniquely numbered, so we can save time and memory
2250   // by fast failing them.
2251   if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2252     addToLeaderTable(Num, I, I->getParent());
2253     return false;
2254   }
2255 
2256   // If the number we were assigned was a brand new VN, then we don't
2257   // need to do a lookup to see if the number already exists
2258   // somewhere in the domtree: it can't!
2259   if (Num >= NextNum) {
2260     addToLeaderTable(Num, I, I->getParent());
2261     return false;
2262   }
2263 
2264   // Perform fast-path value-number based elimination of values inherited from
2265   // dominators.
2266   Value *Repl = findLeader(I->getParent(), Num);
2267   if (!Repl) {
2268     // Failure, just remember this instance for future use.
2269     addToLeaderTable(Num, I, I->getParent());
2270     return false;
2271   } else if (Repl == I) {
2272     // If I was the result of a shortcut PRE, it might already be in the table
2273     // and the best replacement for itself. Nothing to do.
2274     return false;
2275   }
2276 
2277   // Remove it!
2278   patchAndReplaceAllUsesWith(I, Repl);
2279   if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2280     MD->invalidateCachedPointerInfo(Repl);
2281   markInstructionForDeletion(I);
2282   return true;
2283 }
2284 
2285 /// runOnFunction - This is the main transformation entry point for a function.
2286 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2287                   const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2288                   MemoryDependenceResults *RunMD, LoopInfo *LI,
2289                   OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2290   AC = &RunAC;
2291   DT = &RunDT;
2292   VN.setDomTree(DT);
2293   TLI = &RunTLI;
2294   VN.setAliasAnalysis(&RunAA);
2295   MD = RunMD;
2296   ImplicitControlFlowTracking ImplicitCFT;
2297   ICF = &ImplicitCFT;
2298   this->LI = LI;
2299   VN.setMemDep(MD);
2300   ORE = RunORE;
2301   InvalidBlockRPONumbers = true;
2302   MemorySSAUpdater Updater(MSSA);
2303   MSSAU = MSSA ? &Updater : nullptr;
2304 
2305   bool Changed = false;
2306   bool ShouldContinue = true;
2307 
2308   DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2309   // Merge unconditional branches, allowing PRE to catch more
2310   // optimization opportunities.
2311   for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2312     BasicBlock *BB = &*FI++;
2313 
2314     bool removedBlock = MergeBlockIntoPredecessor(BB, &DTU, LI, MSSAU, MD);
2315     if (removedBlock)
2316       ++NumGVNBlocks;
2317 
2318     Changed |= removedBlock;
2319   }
2320 
2321   unsigned Iteration = 0;
2322   while (ShouldContinue) {
2323     LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2324     ShouldContinue = iterateOnFunction(F);
2325     Changed |= ShouldContinue;
2326     ++Iteration;
2327   }
2328 
2329   if (isPREEnabled()) {
2330     // Fabricate val-num for dead-code in order to suppress assertion in
2331     // performPRE().
2332     assignValNumForDeadCode();
2333     bool PREChanged = true;
2334     while (PREChanged) {
2335       PREChanged = performPRE(F);
2336       Changed |= PREChanged;
2337     }
2338   }
2339 
2340   // FIXME: Should perform GVN again after PRE does something.  PRE can move
2341   // computations into blocks where they become fully redundant.  Note that
2342   // we can't do this until PRE's critical edge splitting updates memdep.
2343   // Actually, when this happens, we should just fully integrate PRE into GVN.
2344 
2345   cleanupGlobalSets();
2346   // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2347   // iteration.
2348   DeadBlocks.clear();
2349 
2350   if (MSSA && VerifyMemorySSA)
2351     MSSA->verifyMemorySSA();
2352 
2353   return Changed;
2354 }
2355 
2356 bool GVN::processBlock(BasicBlock *BB) {
2357   // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2358   // (and incrementing BI before processing an instruction).
2359   assert(InstrsToErase.empty() &&
2360          "We expect InstrsToErase to be empty across iterations");
2361   if (DeadBlocks.count(BB))
2362     return false;
2363 
2364   // Clearing map before every BB because it can be used only for single BB.
2365   ReplaceOperandsWithMap.clear();
2366   bool ChangedFunction = false;
2367 
2368   for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2369        BI != BE;) {
2370     if (!ReplaceOperandsWithMap.empty())
2371       ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2372     ChangedFunction |= processInstruction(&*BI);
2373 
2374     if (InstrsToErase.empty()) {
2375       ++BI;
2376       continue;
2377     }
2378 
2379     // If we need some instructions deleted, do it now.
2380     NumGVNInstr += InstrsToErase.size();
2381 
2382     // Avoid iterator invalidation.
2383     bool AtStart = BI == BB->begin();
2384     if (!AtStart)
2385       --BI;
2386 
2387     for (auto *I : InstrsToErase) {
2388       assert(I->getParent() == BB && "Removing instruction from wrong block?");
2389       LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2390       salvageKnowledge(I, AC);
2391       salvageDebugInfo(*I);
2392       if (MD) MD->removeInstruction(I);
2393       if (MSSAU)
2394         MSSAU->removeMemoryAccess(I);
2395       LLVM_DEBUG(verifyRemoved(I));
2396       ICF->removeInstruction(I);
2397       I->eraseFromParent();
2398     }
2399     InstrsToErase.clear();
2400 
2401     if (AtStart)
2402       BI = BB->begin();
2403     else
2404       ++BI;
2405   }
2406 
2407   return ChangedFunction;
2408 }
2409 
2410 // Instantiate an expression in a predecessor that lacked it.
2411 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2412                                     BasicBlock *Curr, unsigned int ValNo) {
2413   // Because we are going top-down through the block, all value numbers
2414   // will be available in the predecessor by the time we need them.  Any
2415   // that weren't originally present will have been instantiated earlier
2416   // in this loop.
2417   bool success = true;
2418   for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2419     Value *Op = Instr->getOperand(i);
2420     if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2421       continue;
2422     // This could be a newly inserted instruction, in which case, we won't
2423     // find a value number, and should give up before we hurt ourselves.
2424     // FIXME: Rewrite the infrastructure to let it easier to value number
2425     // and process newly inserted instructions.
2426     if (!VN.exists(Op)) {
2427       success = false;
2428       break;
2429     }
2430     uint32_t TValNo =
2431         VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2432     if (Value *V = findLeader(Pred, TValNo)) {
2433       Instr->setOperand(i, V);
2434     } else {
2435       success = false;
2436       break;
2437     }
2438   }
2439 
2440   // Fail out if we encounter an operand that is not available in
2441   // the PRE predecessor.  This is typically because of loads which
2442   // are not value numbered precisely.
2443   if (!success)
2444     return false;
2445 
2446   Instr->insertBefore(Pred->getTerminator());
2447   Instr->setName(Instr->getName() + ".pre");
2448   Instr->setDebugLoc(Instr->getDebugLoc());
2449 
2450   unsigned Num = VN.lookupOrAdd(Instr);
2451   VN.add(Instr, Num);
2452 
2453   // Update the availability map to include the new instruction.
2454   addToLeaderTable(Num, Instr, Pred);
2455   return true;
2456 }
2457 
2458 bool GVN::performScalarPRE(Instruction *CurInst) {
2459   if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2460       isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2461       CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2462       isa<DbgInfoIntrinsic>(CurInst))
2463     return false;
2464 
2465   // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2466   // sinking the compare again, and it would force the code generator to
2467   // move the i1 from processor flags or predicate registers into a general
2468   // purpose register.
2469   if (isa<CmpInst>(CurInst))
2470     return false;
2471 
2472   // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2473   // sinking the addressing mode computation back to its uses. Extending the
2474   // GEP's live range increases the register pressure, and therefore it can
2475   // introduce unnecessary spills.
2476   //
2477   // This doesn't prevent Load PRE. PHI translation will make the GEP available
2478   // to the load by moving it to the predecessor block if necessary.
2479   if (isa<GetElementPtrInst>(CurInst))
2480     return false;
2481 
2482   if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2483     // We don't currently value number ANY inline asm calls.
2484     if (CallB->isInlineAsm())
2485       return false;
2486     // Don't do PRE on convergent calls.
2487     if (CallB->isConvergent())
2488       return false;
2489   }
2490 
2491   uint32_t ValNo = VN.lookup(CurInst);
2492 
2493   // Look for the predecessors for PRE opportunities.  We're
2494   // only trying to solve the basic diamond case, where
2495   // a value is computed in the successor and one predecessor,
2496   // but not the other.  We also explicitly disallow cases
2497   // where the successor is its own predecessor, because they're
2498   // more complicated to get right.
2499   unsigned NumWith = 0;
2500   unsigned NumWithout = 0;
2501   BasicBlock *PREPred = nullptr;
2502   BasicBlock *CurrentBlock = CurInst->getParent();
2503 
2504   // Update the RPO numbers for this function.
2505   if (InvalidBlockRPONumbers)
2506     assignBlockRPONumber(*CurrentBlock->getParent());
2507 
2508   SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
2509   for (BasicBlock *P : predecessors(CurrentBlock)) {
2510     // We're not interested in PRE where blocks with predecessors that are
2511     // not reachable.
2512     if (!DT->isReachableFromEntry(P)) {
2513       NumWithout = 2;
2514       break;
2515     }
2516     // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
2517     // when CurInst has operand defined in CurrentBlock (so it may be defined
2518     // by phi in the loop header).
2519     assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2520            "Invalid BlockRPONumber map.");
2521     if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
2522         llvm::any_of(CurInst->operands(), [&](const Use &U) {
2523           if (auto *Inst = dyn_cast<Instruction>(U.get()))
2524             return Inst->getParent() == CurrentBlock;
2525           return false;
2526         })) {
2527       NumWithout = 2;
2528       break;
2529     }
2530 
2531     uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2532     Value *predV = findLeader(P, TValNo);
2533     if (!predV) {
2534       predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2535       PREPred = P;
2536       ++NumWithout;
2537     } else if (predV == CurInst) {
2538       /* CurInst dominates this predecessor. */
2539       NumWithout = 2;
2540       break;
2541     } else {
2542       predMap.push_back(std::make_pair(predV, P));
2543       ++NumWith;
2544     }
2545   }
2546 
2547   // Don't do PRE when it might increase code size, i.e. when
2548   // we would need to insert instructions in more than one pred.
2549   if (NumWithout > 1 || NumWith == 0)
2550     return false;
2551 
2552   // We may have a case where all predecessors have the instruction,
2553   // and we just need to insert a phi node. Otherwise, perform
2554   // insertion.
2555   Instruction *PREInstr = nullptr;
2556 
2557   if (NumWithout != 0) {
2558     if (!isSafeToSpeculativelyExecute(CurInst)) {
2559       // It is only valid to insert a new instruction if the current instruction
2560       // is always executed. An instruction with implicit control flow could
2561       // prevent us from doing it. If we cannot speculate the execution, then
2562       // PRE should be prohibited.
2563       if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2564         return false;
2565     }
2566 
2567     // Don't do PRE across indirect branch.
2568     if (isa<IndirectBrInst>(PREPred->getTerminator()))
2569       return false;
2570 
2571     // Don't do PRE across callbr.
2572     // FIXME: Can we do this across the fallthrough edge?
2573     if (isa<CallBrInst>(PREPred->getTerminator()))
2574       return false;
2575 
2576     // We can't do PRE safely on a critical edge, so instead we schedule
2577     // the edge to be split and perform the PRE the next time we iterate
2578     // on the function.
2579     unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2580     if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2581       toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2582       return false;
2583     }
2584     // We need to insert somewhere, so let's give it a shot
2585     PREInstr = CurInst->clone();
2586     if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2587       // If we failed insertion, make sure we remove the instruction.
2588       LLVM_DEBUG(verifyRemoved(PREInstr));
2589       PREInstr->deleteValue();
2590       return false;
2591     }
2592   }
2593 
2594   // Either we should have filled in the PRE instruction, or we should
2595   // not have needed insertions.
2596   assert(PREInstr != nullptr || NumWithout == 0);
2597 
2598   ++NumGVNPRE;
2599 
2600   // Create a PHI to make the value available in this block.
2601   PHINode *Phi =
2602       PHINode::Create(CurInst->getType(), predMap.size(),
2603                       CurInst->getName() + ".pre-phi", &CurrentBlock->front());
2604   for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
2605     if (Value *V = predMap[i].first) {
2606       // If we use an existing value in this phi, we have to patch the original
2607       // value because the phi will be used to replace a later value.
2608       patchReplacementInstruction(CurInst, V);
2609       Phi->addIncoming(V, predMap[i].second);
2610     } else
2611       Phi->addIncoming(PREInstr, PREPred);
2612   }
2613 
2614   VN.add(Phi, ValNo);
2615   // After creating a new PHI for ValNo, the phi translate result for ValNo will
2616   // be changed, so erase the related stale entries in phi translate cache.
2617   VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
2618   addToLeaderTable(ValNo, Phi, CurrentBlock);
2619   Phi->setDebugLoc(CurInst->getDebugLoc());
2620   CurInst->replaceAllUsesWith(Phi);
2621   if (MD && Phi->getType()->isPtrOrPtrVectorTy())
2622     MD->invalidateCachedPointerInfo(Phi);
2623   VN.erase(CurInst);
2624   removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2625 
2626   LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2627   if (MD)
2628     MD->removeInstruction(CurInst);
2629   if (MSSAU)
2630     MSSAU->removeMemoryAccess(CurInst);
2631   LLVM_DEBUG(verifyRemoved(CurInst));
2632   // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
2633   // some assertion failures.
2634   ICF->removeInstruction(CurInst);
2635   CurInst->eraseFromParent();
2636   ++NumGVNInstr;
2637 
2638   return true;
2639 }
2640 
2641 /// Perform a purely local form of PRE that looks for diamond
2642 /// control flow patterns and attempts to perform simple PRE at the join point.
2643 bool GVN::performPRE(Function &F) {
2644   bool Changed = false;
2645   for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
2646     // Nothing to PRE in the entry block.
2647     if (CurrentBlock == &F.getEntryBlock())
2648       continue;
2649 
2650     // Don't perform PRE on an EH pad.
2651     if (CurrentBlock->isEHPad())
2652       continue;
2653 
2654     for (BasicBlock::iterator BI = CurrentBlock->begin(),
2655                               BE = CurrentBlock->end();
2656          BI != BE;) {
2657       Instruction *CurInst = &*BI++;
2658       Changed |= performScalarPRE(CurInst);
2659     }
2660   }
2661 
2662   if (splitCriticalEdges())
2663     Changed = true;
2664 
2665   return Changed;
2666 }
2667 
2668 /// Split the critical edge connecting the given two blocks, and return
2669 /// the block inserted to the critical edge.
2670 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
2671   // GVN does not require loop-simplify, do not try to preserve it if it is not
2672   // possible.
2673   BasicBlock *BB = SplitCriticalEdge(
2674       Pred, Succ,
2675       CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
2676   if (BB) {
2677     if (MD)
2678       MD->invalidateCachedPredecessors();
2679     InvalidBlockRPONumbers = true;
2680   }
2681   return BB;
2682 }
2683 
2684 /// Split critical edges found during the previous
2685 /// iteration that may enable further optimization.
2686 bool GVN::splitCriticalEdges() {
2687   if (toSplit.empty())
2688     return false;
2689 
2690   bool Changed = false;
2691   do {
2692     std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
2693     Changed |= SplitCriticalEdge(Edge.first, Edge.second,
2694                                  CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
2695                nullptr;
2696   } while (!toSplit.empty());
2697   if (Changed) {
2698     if (MD)
2699       MD->invalidateCachedPredecessors();
2700     InvalidBlockRPONumbers = true;
2701   }
2702   return Changed;
2703 }
2704 
2705 /// Executes one iteration of GVN
2706 bool GVN::iterateOnFunction(Function &F) {
2707   cleanupGlobalSets();
2708 
2709   // Top-down walk of the dominator tree
2710   bool Changed = false;
2711   // Needed for value numbering with phi construction to work.
2712   // RPOT walks the graph in its constructor and will not be invalidated during
2713   // processBlock.
2714   ReversePostOrderTraversal<Function *> RPOT(&F);
2715 
2716   for (BasicBlock *BB : RPOT)
2717     Changed |= processBlock(BB);
2718 
2719   return Changed;
2720 }
2721 
2722 void GVN::cleanupGlobalSets() {
2723   VN.clear();
2724   LeaderTable.clear();
2725   BlockRPONumber.clear();
2726   TableAllocator.Reset();
2727   ICF->clear();
2728   InvalidBlockRPONumbers = true;
2729 }
2730 
2731 /// Verify that the specified instruction does not occur in our
2732 /// internal data structures.
2733 void GVN::verifyRemoved(const Instruction *Inst) const {
2734   VN.verifyRemoved(Inst);
2735 
2736   // Walk through the value number scope to make sure the instruction isn't
2737   // ferreted away in it.
2738   for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator
2739        I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) {
2740     const LeaderTableEntry *Node = &I->second;
2741     assert(Node->Val != Inst && "Inst still in value numbering scope!");
2742 
2743     while (Node->Next) {
2744       Node = Node->Next;
2745       assert(Node->Val != Inst && "Inst still in value numbering scope!");
2746     }
2747   }
2748 }
2749 
2750 /// BB is declared dead, which implied other blocks become dead as well. This
2751 /// function is to add all these blocks to "DeadBlocks". For the dead blocks'
2752 /// live successors, update their phi nodes by replacing the operands
2753 /// corresponding to dead blocks with UndefVal.
2754 void GVN::addDeadBlock(BasicBlock *BB) {
2755   SmallVector<BasicBlock *, 4> NewDead;
2756   SmallSetVector<BasicBlock *, 4> DF;
2757 
2758   NewDead.push_back(BB);
2759   while (!NewDead.empty()) {
2760     BasicBlock *D = NewDead.pop_back_val();
2761     if (DeadBlocks.count(D))
2762       continue;
2763 
2764     // All blocks dominated by D are dead.
2765     SmallVector<BasicBlock *, 8> Dom;
2766     DT->getDescendants(D, Dom);
2767     DeadBlocks.insert(Dom.begin(), Dom.end());
2768 
2769     // Figure out the dominance-frontier(D).
2770     for (BasicBlock *B : Dom) {
2771       for (BasicBlock *S : successors(B)) {
2772         if (DeadBlocks.count(S))
2773           continue;
2774 
2775         bool AllPredDead = true;
2776         for (BasicBlock *P : predecessors(S))
2777           if (!DeadBlocks.count(P)) {
2778             AllPredDead = false;
2779             break;
2780           }
2781 
2782         if (!AllPredDead) {
2783           // S could be proved dead later on. That is why we don't update phi
2784           // operands at this moment.
2785           DF.insert(S);
2786         } else {
2787           // While S is not dominated by D, it is dead by now. This could take
2788           // place if S already have a dead predecessor before D is declared
2789           // dead.
2790           NewDead.push_back(S);
2791         }
2792       }
2793     }
2794   }
2795 
2796   // For the dead blocks' live successors, update their phi nodes by replacing
2797   // the operands corresponding to dead blocks with UndefVal.
2798   for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end();
2799         I != E; I++) {
2800     BasicBlock *B = *I;
2801     if (DeadBlocks.count(B))
2802       continue;
2803 
2804     // First, split the critical edges. This might also create additional blocks
2805     // to preserve LoopSimplify form and adjust edges accordingly.
2806     SmallVector<BasicBlock *, 4> Preds(predecessors(B));
2807     for (BasicBlock *P : Preds) {
2808       if (!DeadBlocks.count(P))
2809         continue;
2810 
2811       if (llvm::is_contained(successors(P), B) &&
2812           isCriticalEdge(P->getTerminator(), B)) {
2813         if (BasicBlock *S = splitCriticalEdges(P, B))
2814           DeadBlocks.insert(P = S);
2815       }
2816     }
2817 
2818     // Now undef the incoming values from the dead predecessors.
2819     for (BasicBlock *P : predecessors(B)) {
2820       if (!DeadBlocks.count(P))
2821         continue;
2822       for (PHINode &Phi : B->phis()) {
2823         Phi.setIncomingValueForBlock(P, UndefValue::get(Phi.getType()));
2824         if (MD)
2825           MD->invalidateCachedPointerInfo(&Phi);
2826       }
2827     }
2828   }
2829 }
2830 
2831 // If the given branch is recognized as a foldable branch (i.e. conditional
2832 // branch with constant condition), it will perform following analyses and
2833 // transformation.
2834 //  1) If the dead out-coming edge is a critical-edge, split it. Let
2835 //     R be the target of the dead out-coming edge.
2836 //  1) Identify the set of dead blocks implied by the branch's dead outcoming
2837 //     edge. The result of this step will be {X| X is dominated by R}
2838 //  2) Identify those blocks which haves at least one dead predecessor. The
2839 //     result of this step will be dominance-frontier(R).
2840 //  3) Update the PHIs in DF(R) by replacing the operands corresponding to
2841 //     dead blocks with "UndefVal" in an hope these PHIs will optimized away.
2842 //
2843 // Return true iff *NEW* dead code are found.
2844 bool GVN::processFoldableCondBr(BranchInst *BI) {
2845   if (!BI || BI->isUnconditional())
2846     return false;
2847 
2848   // If a branch has two identical successors, we cannot declare either dead.
2849   if (BI->getSuccessor(0) == BI->getSuccessor(1))
2850     return false;
2851 
2852   ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
2853   if (!Cond)
2854     return false;
2855 
2856   BasicBlock *DeadRoot =
2857       Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
2858   if (DeadBlocks.count(DeadRoot))
2859     return false;
2860 
2861   if (!DeadRoot->getSinglePredecessor())
2862     DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
2863 
2864   addDeadBlock(DeadRoot);
2865   return true;
2866 }
2867 
2868 // performPRE() will trigger assert if it comes across an instruction without
2869 // associated val-num. As it normally has far more live instructions than dead
2870 // instructions, it makes more sense just to "fabricate" a val-number for the
2871 // dead code than checking if instruction involved is dead or not.
2872 void GVN::assignValNumForDeadCode() {
2873   for (BasicBlock *BB : DeadBlocks) {
2874     for (Instruction &Inst : *BB) {
2875       unsigned ValNum = VN.lookupOrAdd(&Inst);
2876       addToLeaderTable(ValNum, &Inst, BB);
2877     }
2878   }
2879 }
2880 
2881 class llvm::gvn::GVNLegacyPass : public FunctionPass {
2882 public:
2883   static char ID; // Pass identification, replacement for typeid
2884 
2885   explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
2886       : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
2887     initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry());
2888   }
2889 
2890   bool runOnFunction(Function &F) override {
2891     if (skipFunction(F))
2892       return false;
2893 
2894     auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2895 
2896     auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
2897     return Impl.runImpl(
2898         F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
2899         getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
2900         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
2901         getAnalysis<AAResultsWrapperPass>().getAAResults(),
2902         Impl.isMemDepEnabled()
2903             ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
2904             : nullptr,
2905         LIWP ? &LIWP->getLoopInfo() : nullptr,
2906         &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
2907         MSSAWP ? &MSSAWP->getMSSA() : nullptr);
2908   }
2909 
2910   void getAnalysisUsage(AnalysisUsage &AU) const override {
2911     AU.addRequired<AssumptionCacheTracker>();
2912     AU.addRequired<DominatorTreeWrapperPass>();
2913     AU.addRequired<TargetLibraryInfoWrapperPass>();
2914     AU.addRequired<LoopInfoWrapperPass>();
2915     if (Impl.isMemDepEnabled())
2916       AU.addRequired<MemoryDependenceWrapperPass>();
2917     AU.addRequired<AAResultsWrapperPass>();
2918     AU.addPreserved<DominatorTreeWrapperPass>();
2919     AU.addPreserved<GlobalsAAWrapperPass>();
2920     AU.addPreserved<TargetLibraryInfoWrapperPass>();
2921     AU.addPreserved<LoopInfoWrapperPass>();
2922     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2923     AU.addPreserved<MemorySSAWrapperPass>();
2924   }
2925 
2926 private:
2927   GVN Impl;
2928 };
2929 
2930 char GVNLegacyPass::ID = 0;
2931 
2932 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
2933 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2934 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2935 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2936 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2937 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2938 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2939 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
2940 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
2941 
2942 // The public interface to this file...
2943 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
2944   return new GVNLegacyPass(NoMemDepAnalysis);
2945 }
2946