1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs global value numbering to eliminate fully redundant
10 // instructions. It also performs simple dead load elimination.
11 //
12 // Note that this pass does the value numbering itself; it does not use the
13 // ValueNumbering analysis passes.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "llvm/Transforms/Scalar/GVN.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/MapVector.h"
22 #include "llvm/ADT/PointerIntPair.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Analysis/AssumeBundleQueries.h"
30 #include "llvm/Analysis/AliasAnalysis.h"
31 #include "llvm/Analysis/AssumptionCache.h"
32 #include "llvm/Analysis/CFG.h"
33 #include "llvm/Analysis/DomTreeUpdater.h"
34 #include "llvm/Analysis/GlobalsModRef.h"
35 #include "llvm/Analysis/InstructionSimplify.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/MemoryBuiltins.h"
38 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
39 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
40 #include "llvm/Analysis/PHITransAddr.h"
41 #include "llvm/Analysis/TargetLibraryInfo.h"
42 #include "llvm/Analysis/ValueTracking.h"
43 #include "llvm/Config/llvm-config.h"
44 #include "llvm/IR/Attributes.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/Constant.h"
47 #include "llvm/IR/Constants.h"
48 #include "llvm/IR/DataLayout.h"
49 #include "llvm/IR/DebugInfoMetadata.h"
50 #include "llvm/IR/DebugLoc.h"
51 #include "llvm/IR/Dominators.h"
52 #include "llvm/IR/Function.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instruction.h"
55 #include "llvm/IR/Instructions.h"
56 #include "llvm/IR/IntrinsicInst.h"
57 #include "llvm/IR/Intrinsics.h"
58 #include "llvm/IR/LLVMContext.h"
59 #include "llvm/IR/Metadata.h"
60 #include "llvm/IR/Module.h"
61 #include "llvm/IR/Operator.h"
62 #include "llvm/IR/PassManager.h"
63 #include "llvm/IR/PatternMatch.h"
64 #include "llvm/IR/Type.h"
65 #include "llvm/IR/Use.h"
66 #include "llvm/IR/Value.h"
67 #include "llvm/InitializePasses.h"
68 #include "llvm/Pass.h"
69 #include "llvm/Support/Casting.h"
70 #include "llvm/Support/CommandLine.h"
71 #include "llvm/Support/Compiler.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/raw_ostream.h"
74 #include "llvm/Transforms/Utils.h"
75 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
76 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
77 #include "llvm/Transforms/Utils/Local.h"
78 #include "llvm/Transforms/Utils/SSAUpdater.h"
79 #include "llvm/Transforms/Utils/VNCoercion.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <cstdint>
83 #include <utility>
84 #include <vector>
85
86 using namespace llvm;
87 using namespace llvm::gvn;
88 using namespace llvm::VNCoercion;
89 using namespace PatternMatch;
90
91 #define DEBUG_TYPE "gvn"
92
93 STATISTIC(NumGVNInstr, "Number of instructions deleted");
94 STATISTIC(NumGVNLoad, "Number of loads deleted");
95 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
96 STATISTIC(NumGVNBlocks, "Number of blocks merged");
97 STATISTIC(NumGVNSimpl, "Number of instructions simplified");
98 STATISTIC(NumGVNEqProp, "Number of equalities propagated");
99 STATISTIC(NumPRELoad, "Number of loads PRE'd");
100
101 static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
102 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
103 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
104 cl::init(true));
105 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
106
107 // Maximum allowed recursion depth.
108 static cl::opt<uint32_t>
109 MaxRecurseDepth("gvn-max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore,
110 cl::desc("Max recurse depth in GVN (default = 1000)"));
111
112 static cl::opt<uint32_t> MaxNumDeps(
113 "gvn-max-num-deps", cl::Hidden, cl::init(100), cl::ZeroOrMore,
114 cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
115
116 struct llvm::GVN::Expression {
117 uint32_t opcode;
118 bool commutative = false;
119 Type *type = nullptr;
120 SmallVector<uint32_t, 4> varargs;
121
Expressionllvm::GVN::Expression122 Expression(uint32_t o = ~2U) : opcode(o) {}
123
operator ==llvm::GVN::Expression124 bool operator==(const Expression &other) const {
125 if (opcode != other.opcode)
126 return false;
127 if (opcode == ~0U || opcode == ~1U)
128 return true;
129 if (type != other.type)
130 return false;
131 if (varargs != other.varargs)
132 return false;
133 return true;
134 }
135
hash_value(const Expression & Value)136 friend hash_code hash_value(const Expression &Value) {
137 return hash_combine(
138 Value.opcode, Value.type,
139 hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
140 }
141 };
142
143 namespace llvm {
144
145 template <> struct DenseMapInfo<GVN::Expression> {
getEmptyKeyllvm::DenseMapInfo146 static inline GVN::Expression getEmptyKey() { return ~0U; }
getTombstoneKeyllvm::DenseMapInfo147 static inline GVN::Expression getTombstoneKey() { return ~1U; }
148
getHashValuellvm::DenseMapInfo149 static unsigned getHashValue(const GVN::Expression &e) {
150 using llvm::hash_value;
151
152 return static_cast<unsigned>(hash_value(e));
153 }
154
isEqualllvm::DenseMapInfo155 static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) {
156 return LHS == RHS;
157 }
158 };
159
160 } // end namespace llvm
161
162 /// Represents a particular available value that we know how to materialize.
163 /// Materialization of an AvailableValue never fails. An AvailableValue is
164 /// implicitly associated with a rematerialization point which is the
165 /// location of the instruction from which it was formed.
166 struct llvm::gvn::AvailableValue {
167 enum ValType {
168 SimpleVal, // A simple offsetted value that is accessed.
169 LoadVal, // A value produced by a load.
170 MemIntrin, // A memory intrinsic which is loaded from.
171 UndefVal // A UndefValue representing a value from dead block (which
172 // is not yet physically removed from the CFG).
173 };
174
175 /// V - The value that is live out of the block.
176 PointerIntPair<Value *, 2, ValType> Val;
177
178 /// Offset - The byte offset in Val that is interesting for the load query.
179 unsigned Offset = 0;
180
getllvm::gvn::AvailableValue181 static AvailableValue get(Value *V, unsigned Offset = 0) {
182 AvailableValue Res;
183 Res.Val.setPointer(V);
184 Res.Val.setInt(SimpleVal);
185 Res.Offset = Offset;
186 return Res;
187 }
188
getMIllvm::gvn::AvailableValue189 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
190 AvailableValue Res;
191 Res.Val.setPointer(MI);
192 Res.Val.setInt(MemIntrin);
193 Res.Offset = Offset;
194 return Res;
195 }
196
getLoadllvm::gvn::AvailableValue197 static AvailableValue getLoad(LoadInst *LI, unsigned Offset = 0) {
198 AvailableValue Res;
199 Res.Val.setPointer(LI);
200 Res.Val.setInt(LoadVal);
201 Res.Offset = Offset;
202 return Res;
203 }
204
getUndefllvm::gvn::AvailableValue205 static AvailableValue getUndef() {
206 AvailableValue Res;
207 Res.Val.setPointer(nullptr);
208 Res.Val.setInt(UndefVal);
209 Res.Offset = 0;
210 return Res;
211 }
212
isSimpleValuellvm::gvn::AvailableValue213 bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
isCoercedLoadValuellvm::gvn::AvailableValue214 bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
isMemIntrinValuellvm::gvn::AvailableValue215 bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
isUndefValuellvm::gvn::AvailableValue216 bool isUndefValue() const { return Val.getInt() == UndefVal; }
217
getSimpleValuellvm::gvn::AvailableValue218 Value *getSimpleValue() const {
219 assert(isSimpleValue() && "Wrong accessor");
220 return Val.getPointer();
221 }
222
getCoercedLoadValuellvm::gvn::AvailableValue223 LoadInst *getCoercedLoadValue() const {
224 assert(isCoercedLoadValue() && "Wrong accessor");
225 return cast<LoadInst>(Val.getPointer());
226 }
227
getMemIntrinValuellvm::gvn::AvailableValue228 MemIntrinsic *getMemIntrinValue() const {
229 assert(isMemIntrinValue() && "Wrong accessor");
230 return cast<MemIntrinsic>(Val.getPointer());
231 }
232
233 /// Emit code at the specified insertion point to adjust the value defined
234 /// here to the specified type. This handles various coercion cases.
235 Value *MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt,
236 GVN &gvn) const;
237 };
238
239 /// Represents an AvailableValue which can be rematerialized at the end of
240 /// the associated BasicBlock.
241 struct llvm::gvn::AvailableValueInBlock {
242 /// BB - The basic block in question.
243 BasicBlock *BB = nullptr;
244
245 /// AV - The actual available value
246 AvailableValue AV;
247
getllvm::gvn::AvailableValueInBlock248 static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
249 AvailableValueInBlock Res;
250 Res.BB = BB;
251 Res.AV = std::move(AV);
252 return Res;
253 }
254
getllvm::gvn::AvailableValueInBlock255 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
256 unsigned Offset = 0) {
257 return get(BB, AvailableValue::get(V, Offset));
258 }
259
getUndefllvm::gvn::AvailableValueInBlock260 static AvailableValueInBlock getUndef(BasicBlock *BB) {
261 return get(BB, AvailableValue::getUndef());
262 }
263
264 /// Emit code at the end of this block to adjust the value defined here to
265 /// the specified type. This handles various coercion cases.
MaterializeAdjustedValuellvm::gvn::AvailableValueInBlock266 Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const {
267 return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn);
268 }
269 };
270
271 //===----------------------------------------------------------------------===//
272 // ValueTable Internal Functions
273 //===----------------------------------------------------------------------===//
274
createExpr(Instruction * I)275 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
276 Expression e;
277 e.type = I->getType();
278 e.opcode = I->getOpcode();
279 for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
280 OI != OE; ++OI)
281 e.varargs.push_back(lookupOrAdd(*OI));
282 if (I->isCommutative()) {
283 // Ensure that commutative instructions that only differ by a permutation
284 // of their operands get the same value number by sorting the operand value
285 // numbers. Since all commutative instructions have two operands it is more
286 // efficient to sort by hand rather than using, say, std::sort.
287 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!");
288 if (e.varargs[0] > e.varargs[1])
289 std::swap(e.varargs[0], e.varargs[1]);
290 e.commutative = true;
291 }
292
293 if (auto *C = dyn_cast<CmpInst>(I)) {
294 // Sort the operand value numbers so x<y and y>x get the same value number.
295 CmpInst::Predicate Predicate = C->getPredicate();
296 if (e.varargs[0] > e.varargs[1]) {
297 std::swap(e.varargs[0], e.varargs[1]);
298 Predicate = CmpInst::getSwappedPredicate(Predicate);
299 }
300 e.opcode = (C->getOpcode() << 8) | Predicate;
301 e.commutative = true;
302 } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
303 e.varargs.append(E->idx_begin(), E->idx_end());
304 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
305 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
306 e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
307 }
308
309 return e;
310 }
311
createCmpExpr(unsigned Opcode,CmpInst::Predicate Predicate,Value * LHS,Value * RHS)312 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode,
313 CmpInst::Predicate Predicate,
314 Value *LHS, Value *RHS) {
315 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
316 "Not a comparison!");
317 Expression e;
318 e.type = CmpInst::makeCmpResultType(LHS->getType());
319 e.varargs.push_back(lookupOrAdd(LHS));
320 e.varargs.push_back(lookupOrAdd(RHS));
321
322 // Sort the operand value numbers so x<y and y>x get the same value number.
323 if (e.varargs[0] > e.varargs[1]) {
324 std::swap(e.varargs[0], e.varargs[1]);
325 Predicate = CmpInst::getSwappedPredicate(Predicate);
326 }
327 e.opcode = (Opcode << 8) | Predicate;
328 e.commutative = true;
329 return e;
330 }
331
createExtractvalueExpr(ExtractValueInst * EI)332 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
333 assert(EI && "Not an ExtractValueInst?");
334 Expression e;
335 e.type = EI->getType();
336 e.opcode = 0;
337
338 WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
339 if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
340 // EI is an extract from one of our with.overflow intrinsics. Synthesize
341 // a semantically equivalent expression instead of an extract value
342 // expression.
343 e.opcode = WO->getBinaryOp();
344 e.varargs.push_back(lookupOrAdd(WO->getLHS()));
345 e.varargs.push_back(lookupOrAdd(WO->getRHS()));
346 return e;
347 }
348
349 // Not a recognised intrinsic. Fall back to producing an extract value
350 // expression.
351 e.opcode = EI->getOpcode();
352 for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end();
353 OI != OE; ++OI)
354 e.varargs.push_back(lookupOrAdd(*OI));
355
356 for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end();
357 II != IE; ++II)
358 e.varargs.push_back(*II);
359
360 return e;
361 }
362
363 //===----------------------------------------------------------------------===//
364 // ValueTable External Functions
365 //===----------------------------------------------------------------------===//
366
367 GVN::ValueTable::ValueTable() = default;
368 GVN::ValueTable::ValueTable(const ValueTable &) = default;
369 GVN::ValueTable::ValueTable(ValueTable &&) = default;
370 GVN::ValueTable::~ValueTable() = default;
371 GVN::ValueTable &GVN::ValueTable::operator=(const GVN::ValueTable &Arg) = default;
372
373 /// add - Insert a value into the table with a specified value number.
add(Value * V,uint32_t num)374 void GVN::ValueTable::add(Value *V, uint32_t num) {
375 valueNumbering.insert(std::make_pair(V, num));
376 if (PHINode *PN = dyn_cast<PHINode>(V))
377 NumberingPhi[num] = PN;
378 }
379
lookupOrAddCall(CallInst * C)380 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
381 if (AA->doesNotAccessMemory(C)) {
382 Expression exp = createExpr(C);
383 uint32_t e = assignExpNewValueNum(exp).first;
384 valueNumbering[C] = e;
385 return e;
386 } else if (MD && AA->onlyReadsMemory(C)) {
387 Expression exp = createExpr(C);
388 auto ValNum = assignExpNewValueNum(exp);
389 if (ValNum.second) {
390 valueNumbering[C] = ValNum.first;
391 return ValNum.first;
392 }
393
394 MemDepResult local_dep = MD->getDependency(C);
395
396 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
397 valueNumbering[C] = nextValueNumber;
398 return nextValueNumber++;
399 }
400
401 if (local_dep.isDef()) {
402 CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
403
404 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
405 valueNumbering[C] = nextValueNumber;
406 return nextValueNumber++;
407 }
408
409 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
410 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
411 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
412 if (c_vn != cd_vn) {
413 valueNumbering[C] = nextValueNumber;
414 return nextValueNumber++;
415 }
416 }
417
418 uint32_t v = lookupOrAdd(local_cdep);
419 valueNumbering[C] = v;
420 return v;
421 }
422
423 // Non-local case.
424 const MemoryDependenceResults::NonLocalDepInfo &deps =
425 MD->getNonLocalCallDependency(C);
426 // FIXME: Move the checking logic to MemDep!
427 CallInst* cdep = nullptr;
428
429 // Check to see if we have a single dominating call instruction that is
430 // identical to C.
431 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
432 const NonLocalDepEntry *I = &deps[i];
433 if (I->getResult().isNonLocal())
434 continue;
435
436 // We don't handle non-definitions. If we already have a call, reject
437 // instruction dependencies.
438 if (!I->getResult().isDef() || cdep != nullptr) {
439 cdep = nullptr;
440 break;
441 }
442
443 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
444 // FIXME: All duplicated with non-local case.
445 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
446 cdep = NonLocalDepCall;
447 continue;
448 }
449
450 cdep = nullptr;
451 break;
452 }
453
454 if (!cdep) {
455 valueNumbering[C] = nextValueNumber;
456 return nextValueNumber++;
457 }
458
459 if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
460 valueNumbering[C] = nextValueNumber;
461 return nextValueNumber++;
462 }
463 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
464 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
465 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
466 if (c_vn != cd_vn) {
467 valueNumbering[C] = nextValueNumber;
468 return nextValueNumber++;
469 }
470 }
471
472 uint32_t v = lookupOrAdd(cdep);
473 valueNumbering[C] = v;
474 return v;
475 } else {
476 valueNumbering[C] = nextValueNumber;
477 return nextValueNumber++;
478 }
479 }
480
481 /// Returns true if a value number exists for the specified value.
exists(Value * V) const482 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; }
483
484 /// lookup_or_add - Returns the value number for the specified value, assigning
485 /// it a new number if it did not have one before.
lookupOrAdd(Value * V)486 uint32_t GVN::ValueTable::lookupOrAdd(Value *V) {
487 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
488 if (VI != valueNumbering.end())
489 return VI->second;
490
491 if (!isa<Instruction>(V)) {
492 valueNumbering[V] = nextValueNumber;
493 return nextValueNumber++;
494 }
495
496 Instruction* I = cast<Instruction>(V);
497 Expression exp;
498 switch (I->getOpcode()) {
499 case Instruction::Call:
500 return lookupOrAddCall(cast<CallInst>(I));
501 case Instruction::FNeg:
502 case Instruction::Add:
503 case Instruction::FAdd:
504 case Instruction::Sub:
505 case Instruction::FSub:
506 case Instruction::Mul:
507 case Instruction::FMul:
508 case Instruction::UDiv:
509 case Instruction::SDiv:
510 case Instruction::FDiv:
511 case Instruction::URem:
512 case Instruction::SRem:
513 case Instruction::FRem:
514 case Instruction::Shl:
515 case Instruction::LShr:
516 case Instruction::AShr:
517 case Instruction::And:
518 case Instruction::Or:
519 case Instruction::Xor:
520 case Instruction::ICmp:
521 case Instruction::FCmp:
522 case Instruction::Trunc:
523 case Instruction::ZExt:
524 case Instruction::SExt:
525 case Instruction::FPToUI:
526 case Instruction::FPToSI:
527 case Instruction::UIToFP:
528 case Instruction::SIToFP:
529 case Instruction::FPTrunc:
530 case Instruction::FPExt:
531 case Instruction::PtrToInt:
532 case Instruction::IntToPtr:
533 case Instruction::AddrSpaceCast:
534 case Instruction::BitCast:
535 case Instruction::Select:
536 case Instruction::Freeze:
537 case Instruction::ExtractElement:
538 case Instruction::InsertElement:
539 case Instruction::ShuffleVector:
540 case Instruction::InsertValue:
541 case Instruction::GetElementPtr:
542 exp = createExpr(I);
543 break;
544 case Instruction::ExtractValue:
545 exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
546 break;
547 case Instruction::PHI:
548 valueNumbering[V] = nextValueNumber;
549 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
550 return nextValueNumber++;
551 default:
552 valueNumbering[V] = nextValueNumber;
553 return nextValueNumber++;
554 }
555
556 uint32_t e = assignExpNewValueNum(exp).first;
557 valueNumbering[V] = e;
558 return e;
559 }
560
561 /// Returns the value number of the specified value. Fails if
562 /// the value has not yet been numbered.
lookup(Value * V,bool Verify) const563 uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const {
564 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
565 if (Verify) {
566 assert(VI != valueNumbering.end() && "Value not numbered?");
567 return VI->second;
568 }
569 return (VI != valueNumbering.end()) ? VI->second : 0;
570 }
571
572 /// Returns the value number of the given comparison,
573 /// assigning it a new number if it did not have one before. Useful when
574 /// we deduced the result of a comparison, but don't immediately have an
575 /// instruction realizing that comparison to hand.
lookupOrAddCmp(unsigned Opcode,CmpInst::Predicate Predicate,Value * LHS,Value * RHS)576 uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode,
577 CmpInst::Predicate Predicate,
578 Value *LHS, Value *RHS) {
579 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
580 return assignExpNewValueNum(exp).first;
581 }
582
583 /// Remove all entries from the ValueTable.
clear()584 void GVN::ValueTable::clear() {
585 valueNumbering.clear();
586 expressionNumbering.clear();
587 NumberingPhi.clear();
588 PhiTranslateTable.clear();
589 nextValueNumber = 1;
590 Expressions.clear();
591 ExprIdx.clear();
592 nextExprNumber = 0;
593 }
594
595 /// Remove a value from the value numbering.
erase(Value * V)596 void GVN::ValueTable::erase(Value *V) {
597 uint32_t Num = valueNumbering.lookup(V);
598 valueNumbering.erase(V);
599 // If V is PHINode, V <--> value number is an one-to-one mapping.
600 if (isa<PHINode>(V))
601 NumberingPhi.erase(Num);
602 }
603
604 /// verifyRemoved - Verify that the value is removed from all internal data
605 /// structures.
verifyRemoved(const Value * V) const606 void GVN::ValueTable::verifyRemoved(const Value *V) const {
607 for (DenseMap<Value*, uint32_t>::const_iterator
608 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
609 assert(I->first != V && "Inst still occurs in value numbering map!");
610 }
611 }
612
613 //===----------------------------------------------------------------------===//
614 // GVN Pass
615 //===----------------------------------------------------------------------===//
616
isPREEnabled() const617 bool GVN::isPREEnabled() const {
618 return Options.AllowPRE.getValueOr(GVNEnablePRE);
619 }
620
isLoadPREEnabled() const621 bool GVN::isLoadPREEnabled() const {
622 return Options.AllowLoadPRE.getValueOr(GVNEnableLoadPRE);
623 }
624
isLoadInLoopPREEnabled() const625 bool GVN::isLoadInLoopPREEnabled() const {
626 return Options.AllowLoadInLoopPRE.getValueOr(GVNEnableLoadInLoopPRE);
627 }
628
isMemDepEnabled() const629 bool GVN::isMemDepEnabled() const {
630 return Options.AllowMemDep.getValueOr(GVNEnableMemDep);
631 }
632
run(Function & F,FunctionAnalysisManager & AM)633 PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) {
634 // FIXME: The order of evaluation of these 'getResult' calls is very
635 // significant! Re-ordering these variables will cause GVN when run alone to
636 // be less effective! We should fix memdep and basic-aa to not exhibit this
637 // behavior, but until then don't change the order here.
638 auto &AC = AM.getResult<AssumptionAnalysis>(F);
639 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
640 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
641 auto &AA = AM.getResult<AAManager>(F);
642 auto *MemDep =
643 isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr;
644 auto *LI = AM.getCachedResult<LoopAnalysis>(F);
645 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
646 bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE);
647 if (!Changed)
648 return PreservedAnalyses::all();
649 PreservedAnalyses PA;
650 PA.preserve<DominatorTreeAnalysis>();
651 PA.preserve<GlobalsAA>();
652 PA.preserve<TargetLibraryAnalysis>();
653 if (LI)
654 PA.preserve<LoopAnalysis>();
655 return PA;
656 }
657
658 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(DenseMap<uint32_t,Value * > & d) const659 LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const {
660 errs() << "{\n";
661 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
662 E = d.end(); I != E; ++I) {
663 errs() << I->first << "\n";
664 I->second->dump();
665 }
666 errs() << "}\n";
667 }
668 #endif
669
670 /// Return true if we can prove that the value
671 /// we're analyzing is fully available in the specified block. As we go, keep
672 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
673 /// map is actually a tri-state map with the following values:
674 /// 0) we know the block *is not* fully available.
675 /// 1) we know the block *is* fully available.
676 /// 2) we do not know whether the block is fully available or not, but we are
677 /// currently speculating that it will be.
678 /// 3) we are speculating for this block and have used that to speculate for
679 /// other blocks.
IsValueFullyAvailableInBlock(BasicBlock * BB,DenseMap<BasicBlock *,char> & FullyAvailableBlocks,uint32_t RecurseDepth)680 static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
681 DenseMap<BasicBlock*, char> &FullyAvailableBlocks,
682 uint32_t RecurseDepth) {
683 if (RecurseDepth > MaxRecurseDepth)
684 return false;
685
686 // Optimistically assume that the block is fully available and check to see
687 // if we already know about this block in one lookup.
688 std::pair<DenseMap<BasicBlock*, char>::iterator, bool> IV =
689 FullyAvailableBlocks.insert(std::make_pair(BB, 2));
690
691 // If the entry already existed for this block, return the precomputed value.
692 if (!IV.second) {
693 // If this is a speculative "available" value, mark it as being used for
694 // speculation of other blocks.
695 if (IV.first->second == 2)
696 IV.first->second = 3;
697 return IV.first->second != 0;
698 }
699
700 // Otherwise, see if it is fully available in all predecessors.
701 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
702
703 // If this block has no predecessors, it isn't live-in here.
704 if (PI == PE)
705 goto SpeculationFailure;
706
707 for (; PI != PE; ++PI)
708 // If the value isn't fully available in one of our predecessors, then it
709 // isn't fully available in this block either. Undo our previous
710 // optimistic assumption and bail out.
711 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1))
712 goto SpeculationFailure;
713
714 return true;
715
716 // If we get here, we found out that this is not, after
717 // all, a fully-available block. We have a problem if we speculated on this and
718 // used the speculation to mark other blocks as available.
719 SpeculationFailure:
720 char &BBVal = FullyAvailableBlocks[BB];
721
722 // If we didn't speculate on this, just return with it set to false.
723 if (BBVal == 2) {
724 BBVal = 0;
725 return false;
726 }
727
728 // If we did speculate on this value, we could have blocks set to 1 that are
729 // incorrect. Walk the (transitive) successors of this block and mark them as
730 // 0 if set to one.
731 SmallVector<BasicBlock*, 32> BBWorklist;
732 BBWorklist.push_back(BB);
733
734 do {
735 BasicBlock *Entry = BBWorklist.pop_back_val();
736 // Note that this sets blocks to 0 (unavailable) if they happen to not
737 // already be in FullyAvailableBlocks. This is safe.
738 char &EntryVal = FullyAvailableBlocks[Entry];
739 if (EntryVal == 0) continue; // Already unavailable.
740
741 // Mark as unavailable.
742 EntryVal = 0;
743
744 BBWorklist.append(succ_begin(Entry), succ_end(Entry));
745 } while (!BBWorklist.empty());
746
747 return false;
748 }
749
750 /// Given a set of loads specified by ValuesPerBlock,
751 /// construct SSA form, allowing us to eliminate LI. This returns the value
752 /// that should be used at LI's definition site.
ConstructSSAForLoadSet(LoadInst * LI,SmallVectorImpl<AvailableValueInBlock> & ValuesPerBlock,GVN & gvn)753 static Value *ConstructSSAForLoadSet(LoadInst *LI,
754 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
755 GVN &gvn) {
756 // Check for the fully redundant, dominating load case. In this case, we can
757 // just use the dominating value directly.
758 if (ValuesPerBlock.size() == 1 &&
759 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
760 LI->getParent())) {
761 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
762 "Dead BB dominate this block");
763 return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn);
764 }
765
766 // Otherwise, we have to construct SSA form.
767 SmallVector<PHINode*, 8> NewPHIs;
768 SSAUpdater SSAUpdate(&NewPHIs);
769 SSAUpdate.Initialize(LI->getType(), LI->getName());
770
771 for (const AvailableValueInBlock &AV : ValuesPerBlock) {
772 BasicBlock *BB = AV.BB;
773
774 if (SSAUpdate.HasValueForBlock(BB))
775 continue;
776
777 // If the value is the load that we will be eliminating, and the block it's
778 // available in is the block that the load is in, then don't add it as
779 // SSAUpdater will resolve the value to the relevant phi which may let it
780 // avoid phi construction entirely if there's actually only one value.
781 if (BB == LI->getParent() &&
782 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == LI) ||
783 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == LI)))
784 continue;
785
786 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn));
787 }
788
789 // Perform PHI construction.
790 return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
791 }
792
MaterializeAdjustedValue(LoadInst * LI,Instruction * InsertPt,GVN & gvn) const793 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI,
794 Instruction *InsertPt,
795 GVN &gvn) const {
796 Value *Res;
797 Type *LoadTy = LI->getType();
798 const DataLayout &DL = LI->getModule()->getDataLayout();
799 if (isSimpleValue()) {
800 Res = getSimpleValue();
801 if (Res->getType() != LoadTy) {
802 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
803
804 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
805 << " " << *getSimpleValue() << '\n'
806 << *Res << '\n'
807 << "\n\n\n");
808 }
809 } else if (isCoercedLoadValue()) {
810 LoadInst *Load = getCoercedLoadValue();
811 if (Load->getType() == LoadTy && Offset == 0) {
812 Res = Load;
813 } else {
814 Res = getLoadValueForLoad(Load, Offset, LoadTy, InsertPt, DL);
815 // We would like to use gvn.markInstructionForDeletion here, but we can't
816 // because the load is already memoized into the leader map table that GVN
817 // tracks. It is potentially possible to remove the load from the table,
818 // but then there all of the operations based on it would need to be
819 // rehashed. Just leave the dead load around.
820 gvn.getMemDep().removeInstruction(Load);
821 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
822 << " " << *getCoercedLoadValue() << '\n'
823 << *Res << '\n'
824 << "\n\n\n");
825 }
826 } else if (isMemIntrinValue()) {
827 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
828 InsertPt, DL);
829 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
830 << " " << *getMemIntrinValue() << '\n'
831 << *Res << '\n'
832 << "\n\n\n");
833 } else {
834 assert(isUndefValue() && "Should be UndefVal");
835 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";);
836 return UndefValue::get(LoadTy);
837 }
838 assert(Res && "failed to materialize?");
839 return Res;
840 }
841
isLifetimeStart(const Instruction * Inst)842 static bool isLifetimeStart(const Instruction *Inst) {
843 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
844 return II->getIntrinsicID() == Intrinsic::lifetime_start;
845 return false;
846 }
847
848 /// Try to locate the three instruction involved in a missed
849 /// load-elimination case that is due to an intervening store.
reportMayClobberedLoad(LoadInst * LI,MemDepResult DepInfo,DominatorTree * DT,OptimizationRemarkEmitter * ORE)850 static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo,
851 DominatorTree *DT,
852 OptimizationRemarkEmitter *ORE) {
853 using namespace ore;
854
855 User *OtherAccess = nullptr;
856
857 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", LI);
858 R << "load of type " << NV("Type", LI->getType()) << " not eliminated"
859 << setExtraArgs();
860
861 for (auto *U : LI->getPointerOperand()->users())
862 if (U != LI && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
863 DT->dominates(cast<Instruction>(U), LI)) {
864 // FIXME: for now give up if there are multiple memory accesses that
865 // dominate the load. We need further analysis to decide which one is
866 // that we're forwarding from.
867 if (OtherAccess)
868 OtherAccess = nullptr;
869 else
870 OtherAccess = U;
871 }
872
873 if (OtherAccess)
874 R << " in favor of " << NV("OtherAccess", OtherAccess);
875
876 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
877
878 ORE->emit(R);
879 }
880
AnalyzeLoadAvailability(LoadInst * LI,MemDepResult DepInfo,Value * Address,AvailableValue & Res)881 bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo,
882 Value *Address, AvailableValue &Res) {
883 assert((DepInfo.isDef() || DepInfo.isClobber()) &&
884 "expected a local dependence");
885 assert(LI->isUnordered() && "rules below are incorrect for ordered access");
886
887 const DataLayout &DL = LI->getModule()->getDataLayout();
888
889 Instruction *DepInst = DepInfo.getInst();
890 if (DepInfo.isClobber()) {
891 // If the dependence is to a store that writes to a superset of the bits
892 // read by the load, we can extract the bits we need for the load from the
893 // stored value.
894 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
895 // Can't forward from non-atomic to atomic without violating memory model.
896 if (Address && LI->isAtomic() <= DepSI->isAtomic()) {
897 int Offset =
898 analyzeLoadFromClobberingStore(LI->getType(), Address, DepSI, DL);
899 if (Offset != -1) {
900 Res = AvailableValue::get(DepSI->getValueOperand(), Offset);
901 return true;
902 }
903 }
904 }
905
906 // Check to see if we have something like this:
907 // load i32* P
908 // load i8* (P+1)
909 // if we have this, replace the later with an extraction from the former.
910 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
911 // If this is a clobber and L is the first instruction in its block, then
912 // we have the first instruction in the entry block.
913 // Can't forward from non-atomic to atomic without violating memory model.
914 if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) {
915 int Offset =
916 analyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL);
917
918 if (Offset != -1) {
919 Res = AvailableValue::getLoad(DepLI, Offset);
920 return true;
921 }
922 }
923 }
924
925 // If the clobbering value is a memset/memcpy/memmove, see if we can
926 // forward a value on from it.
927 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
928 if (Address && !LI->isAtomic()) {
929 int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address,
930 DepMI, DL);
931 if (Offset != -1) {
932 Res = AvailableValue::getMI(DepMI, Offset);
933 return true;
934 }
935 }
936 }
937 // Nothing known about this clobber, have to be conservative
938 LLVM_DEBUG(
939 // fast print dep, using operator<< on instruction is too slow.
940 dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
941 dbgs() << " is clobbered by " << *DepInst << '\n';);
942 if (ORE->allowExtraAnalysis(DEBUG_TYPE))
943 reportMayClobberedLoad(LI, DepInfo, DT, ORE);
944
945 return false;
946 }
947 assert(DepInfo.isDef() && "follows from above");
948
949 // Loading the allocation -> undef.
950 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
951 isAlignedAllocLikeFn(DepInst, TLI) ||
952 // Loading immediately after lifetime begin -> undef.
953 isLifetimeStart(DepInst)) {
954 Res = AvailableValue::get(UndefValue::get(LI->getType()));
955 return true;
956 }
957
958 // Loading from calloc (which zero initializes memory) -> zero
959 if (isCallocLikeFn(DepInst, TLI)) {
960 Res = AvailableValue::get(Constant::getNullValue(LI->getType()));
961 return true;
962 }
963
964 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
965 // Reject loads and stores that are to the same address but are of
966 // different types if we have to. If the stored value is larger or equal to
967 // the loaded value, we can reuse it.
968 if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(),
969 DL))
970 return false;
971
972 // Can't forward from non-atomic to atomic without violating memory model.
973 if (S->isAtomic() < LI->isAtomic())
974 return false;
975
976 Res = AvailableValue::get(S->getValueOperand());
977 return true;
978 }
979
980 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
981 // If the types mismatch and we can't handle it, reject reuse of the load.
982 // If the stored value is larger or equal to the loaded value, we can reuse
983 // it.
984 if (!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL))
985 return false;
986
987 // Can't forward from non-atomic to atomic without violating memory model.
988 if (LD->isAtomic() < LI->isAtomic())
989 return false;
990
991 Res = AvailableValue::getLoad(LD);
992 return true;
993 }
994
995 // Unknown def - must be conservative
996 LLVM_DEBUG(
997 // fast print dep, using operator<< on instruction is too slow.
998 dbgs() << "GVN: load "; LI->printAsOperand(dbgs());
999 dbgs() << " has unknown def " << *DepInst << '\n';);
1000 return false;
1001 }
1002
AnalyzeLoadAvailability(LoadInst * LI,LoadDepVect & Deps,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1003 void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
1004 AvailValInBlkVect &ValuesPerBlock,
1005 UnavailBlkVect &UnavailableBlocks) {
1006 // Filter out useless results (non-locals, etc). Keep track of the blocks
1007 // where we have a value available in repl, also keep track of whether we see
1008 // dependencies that produce an unknown value for the load (such as a call
1009 // that could potentially clobber the load).
1010 unsigned NumDeps = Deps.size();
1011 for (unsigned i = 0, e = NumDeps; i != e; ++i) {
1012 BasicBlock *DepBB = Deps[i].getBB();
1013 MemDepResult DepInfo = Deps[i].getResult();
1014
1015 if (DeadBlocks.count(DepBB)) {
1016 // Dead dependent mem-op disguise as a load evaluating the same value
1017 // as the load in question.
1018 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1019 continue;
1020 }
1021
1022 if (!DepInfo.isDef() && !DepInfo.isClobber()) {
1023 UnavailableBlocks.push_back(DepBB);
1024 continue;
1025 }
1026
1027 // The address being loaded in this non-local block may not be the same as
1028 // the pointer operand of the load if PHI translation occurs. Make sure
1029 // to consider the right address.
1030 Value *Address = Deps[i].getAddress();
1031
1032 AvailableValue AV;
1033 if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) {
1034 // subtlety: because we know this was a non-local dependency, we know
1035 // it's safe to materialize anywhere between the instruction within
1036 // DepInfo and the end of it's block.
1037 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1038 std::move(AV)));
1039 } else {
1040 UnavailableBlocks.push_back(DepBB);
1041 }
1042 }
1043
1044 assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1045 "post condition violation");
1046 }
1047
PerformLoadPRE(LoadInst * LI,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1048 bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
1049 UnavailBlkVect &UnavailableBlocks) {
1050 // Okay, we have *some* definitions of the value. This means that the value
1051 // is available in some of our (transitive) predecessors. Lets think about
1052 // doing PRE of this load. This will involve inserting a new load into the
1053 // predecessor when it's not available. We could do this in general, but
1054 // prefer to not increase code size. As such, we only do this when we know
1055 // that we only have to insert *one* load (which means we're basically moving
1056 // the load, not inserting a new one).
1057
1058 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1059 UnavailableBlocks.end());
1060
1061 // Let's find the first basic block with more than one predecessor. Walk
1062 // backwards through predecessors if needed.
1063 BasicBlock *LoadBB = LI->getParent();
1064 BasicBlock *TmpBB = LoadBB;
1065 bool IsSafeToSpeculativelyExecute = isSafeToSpeculativelyExecute(LI);
1066
1067 // Check that there is no implicit control flow instructions above our load in
1068 // its block. If there is an instruction that doesn't always pass the
1069 // execution to the following instruction, then moving through it may become
1070 // invalid. For example:
1071 //
1072 // int arr[LEN];
1073 // int index = ???;
1074 // ...
1075 // guard(0 <= index && index < LEN);
1076 // use(arr[index]);
1077 //
1078 // It is illegal to move the array access to any point above the guard,
1079 // because if the index is out of bounds we should deoptimize rather than
1080 // access the array.
1081 // Check that there is no guard in this block above our instruction.
1082 if (!IsSafeToSpeculativelyExecute && ICF->isDominatedByICFIFromSameBlock(LI))
1083 return false;
1084 while (TmpBB->getSinglePredecessor()) {
1085 TmpBB = TmpBB->getSinglePredecessor();
1086 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1087 return false;
1088 if (Blockers.count(TmpBB))
1089 return false;
1090
1091 // If any of these blocks has more than one successor (i.e. if the edge we
1092 // just traversed was critical), then there are other paths through this
1093 // block along which the load may not be anticipated. Hoisting the load
1094 // above this block would be adding the load to execution paths along
1095 // which it was not previously executed.
1096 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1097 return false;
1098
1099 // Check that there is no implicit control flow in a block above.
1100 if (!IsSafeToSpeculativelyExecute && ICF->hasICF(TmpBB))
1101 return false;
1102 }
1103
1104 assert(TmpBB);
1105 LoadBB = TmpBB;
1106
1107 // Check to see how many predecessors have the loaded value fully
1108 // available.
1109 MapVector<BasicBlock *, Value *> PredLoads;
1110 DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1111 for (const AvailableValueInBlock &AV : ValuesPerBlock)
1112 FullyAvailableBlocks[AV.BB] = true;
1113 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1114 FullyAvailableBlocks[UnavailableBB] = false;
1115
1116 SmallVector<BasicBlock *, 4> CriticalEdgePred;
1117 for (BasicBlock *Pred : predecessors(LoadBB)) {
1118 // If any predecessor block is an EH pad that does not allow non-PHI
1119 // instructions before the terminator, we can't PRE the load.
1120 if (Pred->getTerminator()->isEHPad()) {
1121 LLVM_DEBUG(
1122 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1123 << Pred->getName() << "': " << *LI << '\n');
1124 return false;
1125 }
1126
1127 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) {
1128 continue;
1129 }
1130
1131 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1132 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1133 LLVM_DEBUG(
1134 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1135 << Pred->getName() << "': " << *LI << '\n');
1136 return false;
1137 }
1138
1139 // FIXME: Can we support the fallthrough edge?
1140 if (isa<CallBrInst>(Pred->getTerminator())) {
1141 LLVM_DEBUG(
1142 dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '"
1143 << Pred->getName() << "': " << *LI << '\n');
1144 return false;
1145 }
1146
1147 if (LoadBB->isEHPad()) {
1148 LLVM_DEBUG(
1149 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1150 << Pred->getName() << "': " << *LI << '\n');
1151 return false;
1152 }
1153
1154 CriticalEdgePred.push_back(Pred);
1155 } else {
1156 // Only add the predecessors that will not be split for now.
1157 PredLoads[Pred] = nullptr;
1158 }
1159 }
1160
1161 // Decide whether PRE is profitable for this load.
1162 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1163 assert(NumUnavailablePreds != 0 &&
1164 "Fully available value should already be eliminated!");
1165
1166 // If this load is unavailable in multiple predecessors, reject it.
1167 // FIXME: If we could restructure the CFG, we could make a common pred with
1168 // all the preds that don't have an available LI and insert a new load into
1169 // that one block.
1170 if (NumUnavailablePreds != 1)
1171 return false;
1172
1173 // Split critical edges, and update the unavailable predecessors accordingly.
1174 for (BasicBlock *OrigPred : CriticalEdgePred) {
1175 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1176 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1177 PredLoads[NewPred] = nullptr;
1178 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1179 << LoadBB->getName() << '\n');
1180 }
1181
1182 // Check if the load can safely be moved to all the unavailable predecessors.
1183 bool CanDoPRE = true;
1184 const DataLayout &DL = LI->getModule()->getDataLayout();
1185 SmallVector<Instruction*, 8> NewInsts;
1186 for (auto &PredLoad : PredLoads) {
1187 BasicBlock *UnavailablePred = PredLoad.first;
1188
1189 // Do PHI translation to get its value in the predecessor if necessary. The
1190 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1191 // We do the translation for each edge we skipped by going from LI's block
1192 // to LoadBB, otherwise we might miss pieces needing translation.
1193
1194 // If all preds have a single successor, then we know it is safe to insert
1195 // the load on the pred (?!?), so we can insert code to materialize the
1196 // pointer if it is not available.
1197 Value *LoadPtr = LI->getPointerOperand();
1198 BasicBlock *Cur = LI->getParent();
1199 while (Cur != LoadBB) {
1200 PHITransAddr Address(LoadPtr, DL, AC);
1201 LoadPtr = Address.PHITranslateWithInsertion(
1202 Cur, Cur->getSinglePredecessor(), *DT, NewInsts);
1203 if (!LoadPtr) {
1204 CanDoPRE = false;
1205 break;
1206 }
1207 Cur = Cur->getSinglePredecessor();
1208 }
1209
1210 if (LoadPtr) {
1211 PHITransAddr Address(LoadPtr, DL, AC);
1212 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT,
1213 NewInsts);
1214 }
1215 // If we couldn't find or insert a computation of this phi translated value,
1216 // we fail PRE.
1217 if (!LoadPtr) {
1218 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1219 << *LI->getPointerOperand() << "\n");
1220 CanDoPRE = false;
1221 break;
1222 }
1223
1224 PredLoad.second = LoadPtr;
1225 }
1226
1227 if (!CanDoPRE) {
1228 while (!NewInsts.empty()) {
1229 // Erase instructions generated by the failed PHI translation before
1230 // trying to number them. PHI translation might insert instructions
1231 // in basic blocks other than the current one, and we delete them
1232 // directly, as markInstructionForDeletion only allows removing from the
1233 // current basic block.
1234 NewInsts.pop_back_val()->eraseFromParent();
1235 }
1236 // HINT: Don't revert the edge-splitting as following transformation may
1237 // also need to split these critical edges.
1238 return !CriticalEdgePred.empty();
1239 }
1240
1241 // Okay, we can eliminate this load by inserting a reload in the predecessor
1242 // and using PHI construction to get the value in the other predecessors, do
1243 // it.
1244 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1245 LLVM_DEBUG(if (!NewInsts.empty()) dbgs()
1246 << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back()
1247 << '\n');
1248
1249 // Assign value numbers to the new instructions.
1250 for (Instruction *I : NewInsts) {
1251 // Instructions that have been inserted in predecessor(s) to materialize
1252 // the load address do not retain their original debug locations. Doing
1253 // so could lead to confusing (but correct) source attributions.
1254 if (const DebugLoc &DL = I->getDebugLoc())
1255 I->setDebugLoc(DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
1256
1257 // FIXME: We really _ought_ to insert these value numbers into their
1258 // parent's availability map. However, in doing so, we risk getting into
1259 // ordering issues. If a block hasn't been processed yet, we would be
1260 // marking a value as AVAIL-IN, which isn't what we intend.
1261 VN.lookupOrAdd(I);
1262 }
1263
1264 for (const auto &PredLoad : PredLoads) {
1265 BasicBlock *UnavailablePred = PredLoad.first;
1266 Value *LoadPtr = PredLoad.second;
1267
1268 auto *NewLoad = new LoadInst(
1269 LI->getType(), LoadPtr, LI->getName() + ".pre", LI->isVolatile(),
1270 LI->getAlign(), LI->getOrdering(), LI->getSyncScopeID(),
1271 UnavailablePred->getTerminator());
1272 NewLoad->setDebugLoc(LI->getDebugLoc());
1273
1274 // Transfer the old load's AA tags to the new load.
1275 AAMDNodes Tags;
1276 LI->getAAMetadata(Tags);
1277 if (Tags)
1278 NewLoad->setAAMetadata(Tags);
1279
1280 if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load))
1281 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1282 if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group))
1283 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1284 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range))
1285 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1286
1287 // We do not propagate the old load's debug location, because the new
1288 // load now lives in a different BB, and we want to avoid a jumpy line
1289 // table.
1290 // FIXME: How do we retain source locations without causing poor debugging
1291 // behavior?
1292
1293 // Add the newly created load.
1294 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1295 NewLoad));
1296 MD->invalidateCachedPointerInfo(LoadPtr);
1297 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1298 }
1299
1300 // Perform PHI construction.
1301 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
1302 LI->replaceAllUsesWith(V);
1303 if (isa<PHINode>(V))
1304 V->takeName(LI);
1305 if (Instruction *I = dyn_cast<Instruction>(V))
1306 I->setDebugLoc(LI->getDebugLoc());
1307 if (V->getType()->isPtrOrPtrVectorTy())
1308 MD->invalidateCachedPointerInfo(V);
1309 markInstructionForDeletion(LI);
1310 ORE->emit([&]() {
1311 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI)
1312 << "load eliminated by PRE";
1313 });
1314 ++NumPRELoad;
1315 return true;
1316 }
1317
reportLoadElim(LoadInst * LI,Value * AvailableValue,OptimizationRemarkEmitter * ORE)1318 static void reportLoadElim(LoadInst *LI, Value *AvailableValue,
1319 OptimizationRemarkEmitter *ORE) {
1320 using namespace ore;
1321
1322 ORE->emit([&]() {
1323 return OptimizationRemark(DEBUG_TYPE, "LoadElim", LI)
1324 << "load of type " << NV("Type", LI->getType()) << " eliminated"
1325 << setExtraArgs() << " in favor of "
1326 << NV("InfavorOfValue", AvailableValue);
1327 });
1328 }
1329
1330 /// Attempt to eliminate a load whose dependencies are
1331 /// non-local by performing PHI construction.
processNonLocalLoad(LoadInst * LI)1332 bool GVN::processNonLocalLoad(LoadInst *LI) {
1333 // non-local speculations are not allowed under asan.
1334 if (LI->getParent()->getParent()->hasFnAttribute(
1335 Attribute::SanitizeAddress) ||
1336 LI->getParent()->getParent()->hasFnAttribute(
1337 Attribute::SanitizeHWAddress))
1338 return false;
1339
1340 // Step 1: Find the non-local dependencies of the load.
1341 LoadDepVect Deps;
1342 MD->getNonLocalPointerDependency(LI, Deps);
1343
1344 // If we had to process more than one hundred blocks to find the
1345 // dependencies, this load isn't worth worrying about. Optimizing
1346 // it will be too expensive.
1347 unsigned NumDeps = Deps.size();
1348 if (NumDeps > MaxNumDeps)
1349 return false;
1350
1351 // If we had a phi translation failure, we'll have a single entry which is a
1352 // clobber in the current block. Reject this early.
1353 if (NumDeps == 1 &&
1354 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1355 LLVM_DEBUG(dbgs() << "GVN: non-local load "; LI->printAsOperand(dbgs());
1356 dbgs() << " has unknown dependencies\n";);
1357 return false;
1358 }
1359
1360 // If this load follows a GEP, see if we can PRE the indices before analyzing.
1361 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) {
1362 for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(),
1363 OE = GEP->idx_end();
1364 OI != OE; ++OI)
1365 if (Instruction *I = dyn_cast<Instruction>(OI->get()))
1366 performScalarPRE(I);
1367 }
1368
1369 // Step 2: Analyze the availability of the load
1370 AvailValInBlkVect ValuesPerBlock;
1371 UnavailBlkVect UnavailableBlocks;
1372 AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks);
1373
1374 // If we have no predecessors that produce a known value for this load, exit
1375 // early.
1376 if (ValuesPerBlock.empty())
1377 return false;
1378
1379 // Step 3: Eliminate fully redundancy.
1380 //
1381 // If all of the instructions we depend on produce a known value for this
1382 // load, then it is fully redundant and we can use PHI insertion to compute
1383 // its value. Insert PHIs and remove the fully redundant value now.
1384 if (UnavailableBlocks.empty()) {
1385 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1386
1387 // Perform PHI construction.
1388 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this);
1389 LI->replaceAllUsesWith(V);
1390
1391 if (isa<PHINode>(V))
1392 V->takeName(LI);
1393 if (Instruction *I = dyn_cast<Instruction>(V))
1394 // If instruction I has debug info, then we should not update it.
1395 // Also, if I has a null DebugLoc, then it is still potentially incorrect
1396 // to propagate LI's DebugLoc because LI may not post-dominate I.
1397 if (LI->getDebugLoc() && LI->getParent() == I->getParent())
1398 I->setDebugLoc(LI->getDebugLoc());
1399 if (V->getType()->isPtrOrPtrVectorTy())
1400 MD->invalidateCachedPointerInfo(V);
1401 markInstructionForDeletion(LI);
1402 ++NumGVNLoad;
1403 reportLoadElim(LI, V, ORE);
1404 return true;
1405 }
1406
1407 // Step 4: Eliminate partial redundancy.
1408 if (!isPREEnabled() || !isLoadPREEnabled())
1409 return false;
1410 if (!isLoadInLoopPREEnabled() && this->LI &&
1411 this->LI->getLoopFor(LI->getParent()))
1412 return false;
1413
1414 return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks);
1415 }
1416
impliesEquivalanceIfTrue(CmpInst * Cmp)1417 static bool impliesEquivalanceIfTrue(CmpInst* Cmp) {
1418 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1419 return true;
1420
1421 // Floating point comparisons can be equal, but not equivalent. Cases:
1422 // NaNs for unordered operators
1423 // +0.0 vs 0.0 for all operators
1424 if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1425 (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1426 Cmp->getFastMathFlags().noNaNs())) {
1427 Value *LHS = Cmp->getOperand(0);
1428 Value *RHS = Cmp->getOperand(1);
1429 // If we can prove either side non-zero, then equality must imply
1430 // equivalence.
1431 // FIXME: We should do this optimization if 'no signed zeros' is
1432 // applicable via an instruction-level fast-math-flag or some other
1433 // indicator that relaxed FP semantics are being used.
1434 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1435 return true;
1436 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1437 return true;;
1438 // TODO: Handle vector floating point constants
1439 }
1440 return false;
1441 }
1442
impliesEquivalanceIfFalse(CmpInst * Cmp)1443 static bool impliesEquivalanceIfFalse(CmpInst* Cmp) {
1444 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1445 return true;
1446
1447 // Floating point comparisons can be equal, but not equivelent. Cases:
1448 // NaNs for unordered operators
1449 // +0.0 vs 0.0 for all operators
1450 if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1451 Cmp->getFastMathFlags().noNaNs()) ||
1452 Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1453 Value *LHS = Cmp->getOperand(0);
1454 Value *RHS = Cmp->getOperand(1);
1455 // If we can prove either side non-zero, then equality must imply
1456 // equivalence.
1457 // FIXME: We should do this optimization if 'no signed zeros' is
1458 // applicable via an instruction-level fast-math-flag or some other
1459 // indicator that relaxed FP semantics are being used.
1460 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1461 return true;
1462 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1463 return true;;
1464 // TODO: Handle vector floating point constants
1465 }
1466 return false;
1467 }
1468
1469
hasUsersIn(Value * V,BasicBlock * BB)1470 static bool hasUsersIn(Value *V, BasicBlock *BB) {
1471 for (User *U : V->users())
1472 if (isa<Instruction>(U) &&
1473 cast<Instruction>(U)->getParent() == BB)
1474 return true;
1475 return false;
1476 }
1477
processAssumeIntrinsic(IntrinsicInst * IntrinsicI)1478 bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) {
1479 assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume &&
1480 "This function can only be called with llvm.assume intrinsic");
1481 Value *V = IntrinsicI->getArgOperand(0);
1482
1483 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1484 if (Cond->isZero()) {
1485 Type *Int8Ty = Type::getInt8Ty(V->getContext());
1486 // Insert a new store to null instruction before the load to indicate that
1487 // this code is not reachable. FIXME: We could insert unreachable
1488 // instruction directly because we can modify the CFG.
1489 new StoreInst(UndefValue::get(Int8Ty),
1490 Constant::getNullValue(Int8Ty->getPointerTo()),
1491 IntrinsicI);
1492 }
1493 if (isAssumeWithEmptyBundle(*IntrinsicI))
1494 markInstructionForDeletion(IntrinsicI);
1495 return false;
1496 } else if (isa<Constant>(V)) {
1497 // If it's not false, and constant, it must evaluate to true. This means our
1498 // assume is assume(true), and thus, pointless, and we don't want to do
1499 // anything more here.
1500 return false;
1501 }
1502
1503 Constant *True = ConstantInt::getTrue(V->getContext());
1504 bool Changed = false;
1505
1506 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1507 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1508
1509 // This property is only true in dominated successors, propagateEquality
1510 // will check dominance for us.
1511 Changed |= propagateEquality(V, True, Edge, false);
1512 }
1513
1514 // We can replace assume value with true, which covers cases like this:
1515 // call void @llvm.assume(i1 %cmp)
1516 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1517 ReplaceOperandsWithMap[V] = True;
1518
1519 // If we find an equality fact, canonicalize all dominated uses in this block
1520 // to one of the two values. We heuristically choice the "oldest" of the
1521 // two where age is determined by value number. (Note that propagateEquality
1522 // above handles the cross block case.)
1523 //
1524 // Key case to cover are:
1525 // 1)
1526 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1527 // call void @llvm.assume(i1 %cmp)
1528 // ret float %0 ; will change it to ret float 3.000000e+00
1529 // 2)
1530 // %load = load float, float* %addr
1531 // %cmp = fcmp oeq float %load, %0
1532 // call void @llvm.assume(i1 %cmp)
1533 // ret float %load ; will change it to ret float %0
1534 if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1535 if (impliesEquivalanceIfTrue(CmpI)) {
1536 Value *CmpLHS = CmpI->getOperand(0);
1537 Value *CmpRHS = CmpI->getOperand(1);
1538 // Heuristically pick the better replacement -- the choice of heuristic
1539 // isn't terribly important here, but the fact we canonicalize on some
1540 // replacement is for exposing other simplifications.
1541 // TODO: pull this out as a helper function and reuse w/existing
1542 // (slightly different) logic.
1543 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1544 std::swap(CmpLHS, CmpRHS);
1545 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1546 std::swap(CmpLHS, CmpRHS);
1547 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1548 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
1549 // Move the 'oldest' value to the right-hand side, using the value
1550 // number as a proxy for age.
1551 uint32_t LVN = VN.lookupOrAdd(CmpLHS);
1552 uint32_t RVN = VN.lookupOrAdd(CmpRHS);
1553 if (LVN < RVN)
1554 std::swap(CmpLHS, CmpRHS);
1555 }
1556
1557 // Handle degenerate case where we either haven't pruned a dead path or a
1558 // removed a trivial assume yet.
1559 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
1560 return Changed;
1561
1562 LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
1563 << *CmpLHS << " with "
1564 << *CmpRHS << " in block "
1565 << IntrinsicI->getParent()->getName() << "\n");
1566
1567
1568 // Setup the replacement map - this handles uses within the same block
1569 if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
1570 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
1571
1572 // NOTE: The non-block local cases are handled by the call to
1573 // propagateEquality above; this block is just about handling the block
1574 // local cases. TODO: There's a bunch of logic in propagateEqualiy which
1575 // isn't duplicated for the block local case, can we share it somehow?
1576 }
1577 }
1578 return Changed;
1579 }
1580
patchAndReplaceAllUsesWith(Instruction * I,Value * Repl)1581 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
1582 patchReplacementInstruction(I, Repl);
1583 I->replaceAllUsesWith(Repl);
1584 }
1585
1586 /// Attempt to eliminate a load, first by eliminating it
1587 /// locally, and then attempting non-local elimination if that fails.
processLoad(LoadInst * L)1588 bool GVN::processLoad(LoadInst *L) {
1589 if (!MD)
1590 return false;
1591
1592 // This code hasn't been audited for ordered or volatile memory access
1593 if (!L->isUnordered())
1594 return false;
1595
1596 if (L->use_empty()) {
1597 markInstructionForDeletion(L);
1598 return true;
1599 }
1600
1601 // ... to a pointer that has been loaded from before...
1602 MemDepResult Dep = MD->getDependency(L);
1603
1604 // If it is defined in another block, try harder.
1605 if (Dep.isNonLocal())
1606 return processNonLocalLoad(L);
1607
1608 // Only handle the local case below
1609 if (!Dep.isDef() && !Dep.isClobber()) {
1610 // This might be a NonFuncLocal or an Unknown
1611 LLVM_DEBUG(
1612 // fast print dep, using operator<< on instruction is too slow.
1613 dbgs() << "GVN: load "; L->printAsOperand(dbgs());
1614 dbgs() << " has unknown dependence\n";);
1615 return false;
1616 }
1617
1618 AvailableValue AV;
1619 if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) {
1620 Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
1621
1622 // Replace the load!
1623 patchAndReplaceAllUsesWith(L, AvailableValue);
1624 markInstructionForDeletion(L);
1625 ++NumGVNLoad;
1626 reportLoadElim(L, AvailableValue, ORE);
1627 // Tell MDA to rexamine the reused pointer since we might have more
1628 // information after forwarding it.
1629 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
1630 MD->invalidateCachedPointerInfo(AvailableValue);
1631 return true;
1632 }
1633
1634 return false;
1635 }
1636
1637 /// Return a pair the first field showing the value number of \p Exp and the
1638 /// second field showing whether it is a value number newly created.
1639 std::pair<uint32_t, bool>
assignExpNewValueNum(Expression & Exp)1640 GVN::ValueTable::assignExpNewValueNum(Expression &Exp) {
1641 uint32_t &e = expressionNumbering[Exp];
1642 bool CreateNewValNum = !e;
1643 if (CreateNewValNum) {
1644 Expressions.push_back(Exp);
1645 if (ExprIdx.size() < nextValueNumber + 1)
1646 ExprIdx.resize(nextValueNumber * 2);
1647 e = nextValueNumber;
1648 ExprIdx[nextValueNumber++] = nextExprNumber++;
1649 }
1650 return {e, CreateNewValNum};
1651 }
1652
1653 /// Return whether all the values related with the same \p num are
1654 /// defined in \p BB.
areAllValsInBB(uint32_t Num,const BasicBlock * BB,GVN & Gvn)1655 bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
1656 GVN &Gvn) {
1657 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1658 while (Vals && Vals->BB == BB)
1659 Vals = Vals->Next;
1660 return !Vals;
1661 }
1662
1663 /// Wrap phiTranslateImpl to provide caching functionality.
phiTranslate(const BasicBlock * Pred,const BasicBlock * PhiBlock,uint32_t Num,GVN & Gvn)1664 uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred,
1665 const BasicBlock *PhiBlock, uint32_t Num,
1666 GVN &Gvn) {
1667 auto FindRes = PhiTranslateTable.find({Num, Pred});
1668 if (FindRes != PhiTranslateTable.end())
1669 return FindRes->second;
1670 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
1671 PhiTranslateTable.insert({{Num, Pred}, NewNum});
1672 return NewNum;
1673 }
1674
1675 // Return true if the value number \p Num and NewNum have equal value.
1676 // Return false if the result is unknown.
areCallValsEqual(uint32_t Num,uint32_t NewNum,const BasicBlock * Pred,const BasicBlock * PhiBlock,GVN & Gvn)1677 bool GVN::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
1678 const BasicBlock *Pred,
1679 const BasicBlock *PhiBlock, GVN &Gvn) {
1680 CallInst *Call = nullptr;
1681 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1682 while (Vals) {
1683 Call = dyn_cast<CallInst>(Vals->Val);
1684 if (Call && Call->getParent() == PhiBlock)
1685 break;
1686 Vals = Vals->Next;
1687 }
1688
1689 if (AA->doesNotAccessMemory(Call))
1690 return true;
1691
1692 if (!MD || !AA->onlyReadsMemory(Call))
1693 return false;
1694
1695 MemDepResult local_dep = MD->getDependency(Call);
1696 if (!local_dep.isNonLocal())
1697 return false;
1698
1699 const MemoryDependenceResults::NonLocalDepInfo &deps =
1700 MD->getNonLocalCallDependency(Call);
1701
1702 // Check to see if the Call has no function local clobber.
1703 for (unsigned i = 0; i < deps.size(); i++) {
1704 if (deps[i].getResult().isNonFuncLocal())
1705 return true;
1706 }
1707 return false;
1708 }
1709
1710 /// Translate value number \p Num using phis, so that it has the values of
1711 /// the phis in BB.
phiTranslateImpl(const BasicBlock * Pred,const BasicBlock * PhiBlock,uint32_t Num,GVN & Gvn)1712 uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
1713 const BasicBlock *PhiBlock,
1714 uint32_t Num, GVN &Gvn) {
1715 if (PHINode *PN = NumberingPhi[Num]) {
1716 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
1717 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
1718 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
1719 return TransVal;
1720 }
1721 return Num;
1722 }
1723
1724 // If there is any value related with Num is defined in a BB other than
1725 // PhiBlock, it cannot depend on a phi in PhiBlock without going through
1726 // a backedge. We can do an early exit in that case to save compile time.
1727 if (!areAllValsInBB(Num, PhiBlock, Gvn))
1728 return Num;
1729
1730 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
1731 return Num;
1732 Expression Exp = Expressions[ExprIdx[Num]];
1733
1734 for (unsigned i = 0; i < Exp.varargs.size(); i++) {
1735 // For InsertValue and ExtractValue, some varargs are index numbers
1736 // instead of value numbers. Those index numbers should not be
1737 // translated.
1738 if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
1739 (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
1740 (i > 1 && Exp.opcode == Instruction::ShuffleVector))
1741 continue;
1742 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
1743 }
1744
1745 if (Exp.commutative) {
1746 assert(Exp.varargs.size() == 2 && "Unsupported commutative expression!");
1747 if (Exp.varargs[0] > Exp.varargs[1]) {
1748 std::swap(Exp.varargs[0], Exp.varargs[1]);
1749 uint32_t Opcode = Exp.opcode >> 8;
1750 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
1751 Exp.opcode = (Opcode << 8) |
1752 CmpInst::getSwappedPredicate(
1753 static_cast<CmpInst::Predicate>(Exp.opcode & 255));
1754 }
1755 }
1756
1757 if (uint32_t NewNum = expressionNumbering[Exp]) {
1758 if (Exp.opcode == Instruction::Call && NewNum != Num)
1759 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
1760 return NewNum;
1761 }
1762 return Num;
1763 }
1764
1765 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed
1766 /// again.
eraseTranslateCacheEntry(uint32_t Num,const BasicBlock & CurrBlock)1767 void GVN::ValueTable::eraseTranslateCacheEntry(uint32_t Num,
1768 const BasicBlock &CurrBlock) {
1769 for (const BasicBlock *Pred : predecessors(&CurrBlock)) {
1770 auto FindRes = PhiTranslateTable.find({Num, Pred});
1771 if (FindRes != PhiTranslateTable.end())
1772 PhiTranslateTable.erase(FindRes);
1773 }
1774 }
1775
1776 // In order to find a leader for a given value number at a
1777 // specific basic block, we first obtain the list of all Values for that number,
1778 // and then scan the list to find one whose block dominates the block in
1779 // question. This is fast because dominator tree queries consist of only
1780 // a few comparisons of DFS numbers.
findLeader(const BasicBlock * BB,uint32_t num)1781 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
1782 LeaderTableEntry Vals = LeaderTable[num];
1783 if (!Vals.Val) return nullptr;
1784
1785 Value *Val = nullptr;
1786 if (DT->dominates(Vals.BB, BB)) {
1787 Val = Vals.Val;
1788 if (isa<Constant>(Val)) return Val;
1789 }
1790
1791 LeaderTableEntry* Next = Vals.Next;
1792 while (Next) {
1793 if (DT->dominates(Next->BB, BB)) {
1794 if (isa<Constant>(Next->Val)) return Next->Val;
1795 if (!Val) Val = Next->Val;
1796 }
1797
1798 Next = Next->Next;
1799 }
1800
1801 return Val;
1802 }
1803
1804 /// There is an edge from 'Src' to 'Dst'. Return
1805 /// true if every path from the entry block to 'Dst' passes via this edge. In
1806 /// particular 'Dst' must not be reachable via another edge from 'Src'.
isOnlyReachableViaThisEdge(const BasicBlockEdge & E,DominatorTree * DT)1807 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
1808 DominatorTree *DT) {
1809 // While in theory it is interesting to consider the case in which Dst has
1810 // more than one predecessor, because Dst might be part of a loop which is
1811 // only reachable from Src, in practice it is pointless since at the time
1812 // GVN runs all such loops have preheaders, which means that Dst will have
1813 // been changed to have only one predecessor, namely Src.
1814 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
1815 assert((!Pred || Pred == E.getStart()) &&
1816 "No edge between these basic blocks!");
1817 return Pred != nullptr;
1818 }
1819
assignBlockRPONumber(Function & F)1820 void GVN::assignBlockRPONumber(Function &F) {
1821 BlockRPONumber.clear();
1822 uint32_t NextBlockNumber = 1;
1823 ReversePostOrderTraversal<Function *> RPOT(&F);
1824 for (BasicBlock *BB : RPOT)
1825 BlockRPONumber[BB] = NextBlockNumber++;
1826 InvalidBlockRPONumbers = false;
1827 }
1828
replaceOperandsForInBlockEquality(Instruction * Instr) const1829 bool GVN::replaceOperandsForInBlockEquality(Instruction *Instr) const {
1830 bool Changed = false;
1831 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
1832 Value *Operand = Instr->getOperand(OpNum);
1833 auto it = ReplaceOperandsWithMap.find(Operand);
1834 if (it != ReplaceOperandsWithMap.end()) {
1835 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
1836 << *it->second << " in instruction " << *Instr << '\n');
1837 Instr->setOperand(OpNum, it->second);
1838 Changed = true;
1839 }
1840 }
1841 return Changed;
1842 }
1843
1844 /// The given values are known to be equal in every block
1845 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
1846 /// 'RHS' everywhere in the scope. Returns whether a change was made.
1847 /// If DominatesByEdge is false, then it means that we will propagate the RHS
1848 /// value starting from the end of Root.Start.
propagateEquality(Value * LHS,Value * RHS,const BasicBlockEdge & Root,bool DominatesByEdge)1849 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
1850 bool DominatesByEdge) {
1851 SmallVector<std::pair<Value*, Value*>, 4> Worklist;
1852 Worklist.push_back(std::make_pair(LHS, RHS));
1853 bool Changed = false;
1854 // For speed, compute a conservative fast approximation to
1855 // DT->dominates(Root, Root.getEnd());
1856 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
1857
1858 while (!Worklist.empty()) {
1859 std::pair<Value*, Value*> Item = Worklist.pop_back_val();
1860 LHS = Item.first; RHS = Item.second;
1861
1862 if (LHS == RHS)
1863 continue;
1864 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
1865
1866 // Don't try to propagate equalities between constants.
1867 if (isa<Constant>(LHS) && isa<Constant>(RHS))
1868 continue;
1869
1870 // Prefer a constant on the right-hand side, or an Argument if no constants.
1871 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
1872 std::swap(LHS, RHS);
1873 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
1874
1875 // If there is no obvious reason to prefer the left-hand side over the
1876 // right-hand side, ensure the longest lived term is on the right-hand side,
1877 // so the shortest lived term will be replaced by the longest lived.
1878 // This tends to expose more simplifications.
1879 uint32_t LVN = VN.lookupOrAdd(LHS);
1880 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
1881 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
1882 // Move the 'oldest' value to the right-hand side, using the value number
1883 // as a proxy for age.
1884 uint32_t RVN = VN.lookupOrAdd(RHS);
1885 if (LVN < RVN) {
1886 std::swap(LHS, RHS);
1887 LVN = RVN;
1888 }
1889 }
1890
1891 // If value numbering later sees that an instruction in the scope is equal
1892 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
1893 // the invariant that instructions only occur in the leader table for their
1894 // own value number (this is used by removeFromLeaderTable), do not do this
1895 // if RHS is an instruction (if an instruction in the scope is morphed into
1896 // LHS then it will be turned into RHS by the next GVN iteration anyway, so
1897 // using the leader table is about compiling faster, not optimizing better).
1898 // The leader table only tracks basic blocks, not edges. Only add to if we
1899 // have the simple case where the edge dominates the end.
1900 if (RootDominatesEnd && !isa<Instruction>(RHS))
1901 addToLeaderTable(LVN, RHS, Root.getEnd());
1902
1903 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
1904 // LHS always has at least one use that is not dominated by Root, this will
1905 // never do anything if LHS has only one use.
1906 if (!LHS->hasOneUse()) {
1907 unsigned NumReplacements =
1908 DominatesByEdge
1909 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
1910 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
1911
1912 Changed |= NumReplacements > 0;
1913 NumGVNEqProp += NumReplacements;
1914 // Cached information for anything that uses LHS will be invalid.
1915 if (MD)
1916 MD->invalidateCachedPointerInfo(LHS);
1917 }
1918
1919 // Now try to deduce additional equalities from this one. For example, if
1920 // the known equality was "(A != B)" == "false" then it follows that A and B
1921 // are equal in the scope. Only boolean equalities with an explicit true or
1922 // false RHS are currently supported.
1923 if (!RHS->getType()->isIntegerTy(1))
1924 // Not a boolean equality - bail out.
1925 continue;
1926 ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
1927 if (!CI)
1928 // RHS neither 'true' nor 'false' - bail out.
1929 continue;
1930 // Whether RHS equals 'true'. Otherwise it equals 'false'.
1931 bool isKnownTrue = CI->isMinusOne();
1932 bool isKnownFalse = !isKnownTrue;
1933
1934 // If "A && B" is known true then both A and B are known true. If "A || B"
1935 // is known false then both A and B are known false.
1936 Value *A, *B;
1937 if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) ||
1938 (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) {
1939 Worklist.push_back(std::make_pair(A, RHS));
1940 Worklist.push_back(std::make_pair(B, RHS));
1941 continue;
1942 }
1943
1944 // If we are propagating an equality like "(A == B)" == "true" then also
1945 // propagate the equality A == B. When propagating a comparison such as
1946 // "(A >= B)" == "true", replace all instances of "A < B" with "false".
1947 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
1948 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
1949
1950 // If "A == B" is known true, or "A != B" is known false, then replace
1951 // A with B everywhere in the scope. For floating point operations, we
1952 // have to be careful since equality does not always imply equivalance.
1953 if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) ||
1954 (isKnownFalse && impliesEquivalanceIfFalse(Cmp)))
1955 Worklist.push_back(std::make_pair(Op0, Op1));
1956
1957 // If "A >= B" is known true, replace "A < B" with false everywhere.
1958 CmpInst::Predicate NotPred = Cmp->getInversePredicate();
1959 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
1960 // Since we don't have the instruction "A < B" immediately to hand, work
1961 // out the value number that it would have and use that to find an
1962 // appropriate instruction (if any).
1963 uint32_t NextNum = VN.getNextUnusedValueNumber();
1964 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
1965 // If the number we were assigned was brand new then there is no point in
1966 // looking for an instruction realizing it: there cannot be one!
1967 if (Num < NextNum) {
1968 Value *NotCmp = findLeader(Root.getEnd(), Num);
1969 if (NotCmp && isa<Instruction>(NotCmp)) {
1970 unsigned NumReplacements =
1971 DominatesByEdge
1972 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
1973 : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
1974 Root.getStart());
1975 Changed |= NumReplacements > 0;
1976 NumGVNEqProp += NumReplacements;
1977 // Cached information for anything that uses NotCmp will be invalid.
1978 if (MD)
1979 MD->invalidateCachedPointerInfo(NotCmp);
1980 }
1981 }
1982 // Ensure that any instruction in scope that gets the "A < B" value number
1983 // is replaced with false.
1984 // The leader table only tracks basic blocks, not edges. Only add to if we
1985 // have the simple case where the edge dominates the end.
1986 if (RootDominatesEnd)
1987 addToLeaderTable(Num, NotVal, Root.getEnd());
1988
1989 continue;
1990 }
1991 }
1992
1993 return Changed;
1994 }
1995
1996 /// When calculating availability, handle an instruction
1997 /// by inserting it into the appropriate sets
processInstruction(Instruction * I)1998 bool GVN::processInstruction(Instruction *I) {
1999 // Ignore dbg info intrinsics.
2000 if (isa<DbgInfoIntrinsic>(I))
2001 return false;
2002
2003 // If the instruction can be easily simplified then do so now in preference
2004 // to value numbering it. Value numbering often exposes redundancies, for
2005 // example if it determines that %y is equal to %x then the instruction
2006 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2007 const DataLayout &DL = I->getModule()->getDataLayout();
2008 if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) {
2009 bool Changed = false;
2010 if (!I->use_empty()) {
2011 I->replaceAllUsesWith(V);
2012 Changed = true;
2013 }
2014 if (isInstructionTriviallyDead(I, TLI)) {
2015 markInstructionForDeletion(I);
2016 Changed = true;
2017 }
2018 if (Changed) {
2019 if (MD && V->getType()->isPtrOrPtrVectorTy())
2020 MD->invalidateCachedPointerInfo(V);
2021 ++NumGVNSimpl;
2022 return true;
2023 }
2024 }
2025
2026 if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I))
2027 if (IntrinsicI->getIntrinsicID() == Intrinsic::assume)
2028 return processAssumeIntrinsic(IntrinsicI);
2029
2030 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
2031 if (processLoad(LI))
2032 return true;
2033
2034 unsigned Num = VN.lookupOrAdd(LI);
2035 addToLeaderTable(Num, LI, LI->getParent());
2036 return false;
2037 }
2038
2039 // For conditional branches, we can perform simple conditional propagation on
2040 // the condition value itself.
2041 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2042 if (!BI->isConditional())
2043 return false;
2044
2045 if (isa<Constant>(BI->getCondition()))
2046 return processFoldableCondBr(BI);
2047
2048 Value *BranchCond = BI->getCondition();
2049 BasicBlock *TrueSucc = BI->getSuccessor(0);
2050 BasicBlock *FalseSucc = BI->getSuccessor(1);
2051 // Avoid multiple edges early.
2052 if (TrueSucc == FalseSucc)
2053 return false;
2054
2055 BasicBlock *Parent = BI->getParent();
2056 bool Changed = false;
2057
2058 Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
2059 BasicBlockEdge TrueE(Parent, TrueSucc);
2060 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2061
2062 Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
2063 BasicBlockEdge FalseE(Parent, FalseSucc);
2064 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2065
2066 return Changed;
2067 }
2068
2069 // For switches, propagate the case values into the case destinations.
2070 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2071 Value *SwitchCond = SI->getCondition();
2072 BasicBlock *Parent = SI->getParent();
2073 bool Changed = false;
2074
2075 // Remember how many outgoing edges there are to every successor.
2076 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2077 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2078 ++SwitchEdges[SI->getSuccessor(i)];
2079
2080 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2081 i != e; ++i) {
2082 BasicBlock *Dst = i->getCaseSuccessor();
2083 // If there is only a single edge, propagate the case value into it.
2084 if (SwitchEdges.lookup(Dst) == 1) {
2085 BasicBlockEdge E(Parent, Dst);
2086 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2087 }
2088 }
2089 return Changed;
2090 }
2091
2092 // Instructions with void type don't return a value, so there's
2093 // no point in trying to find redundancies in them.
2094 if (I->getType()->isVoidTy())
2095 return false;
2096
2097 uint32_t NextNum = VN.getNextUnusedValueNumber();
2098 unsigned Num = VN.lookupOrAdd(I);
2099
2100 // Allocations are always uniquely numbered, so we can save time and memory
2101 // by fast failing them.
2102 if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2103 addToLeaderTable(Num, I, I->getParent());
2104 return false;
2105 }
2106
2107 // If the number we were assigned was a brand new VN, then we don't
2108 // need to do a lookup to see if the number already exists
2109 // somewhere in the domtree: it can't!
2110 if (Num >= NextNum) {
2111 addToLeaderTable(Num, I, I->getParent());
2112 return false;
2113 }
2114
2115 // Perform fast-path value-number based elimination of values inherited from
2116 // dominators.
2117 Value *Repl = findLeader(I->getParent(), Num);
2118 if (!Repl) {
2119 // Failure, just remember this instance for future use.
2120 addToLeaderTable(Num, I, I->getParent());
2121 return false;
2122 } else if (Repl == I) {
2123 // If I was the result of a shortcut PRE, it might already be in the table
2124 // and the best replacement for itself. Nothing to do.
2125 return false;
2126 }
2127
2128 // Remove it!
2129 patchAndReplaceAllUsesWith(I, Repl);
2130 if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2131 MD->invalidateCachedPointerInfo(Repl);
2132 markInstructionForDeletion(I);
2133 return true;
2134 }
2135
2136 /// runOnFunction - This is the main transformation entry point for a function.
runImpl(Function & F,AssumptionCache & RunAC,DominatorTree & RunDT,const TargetLibraryInfo & RunTLI,AAResults & RunAA,MemoryDependenceResults * RunMD,LoopInfo * LI,OptimizationRemarkEmitter * RunORE)2137 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2138 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2139 MemoryDependenceResults *RunMD, LoopInfo *LI,
2140 OptimizationRemarkEmitter *RunORE) {
2141 AC = &RunAC;
2142 DT = &RunDT;
2143 VN.setDomTree(DT);
2144 TLI = &RunTLI;
2145 VN.setAliasAnalysis(&RunAA);
2146 MD = RunMD;
2147 ImplicitControlFlowTracking ImplicitCFT;
2148 ICF = &ImplicitCFT;
2149 this->LI = LI;
2150 VN.setMemDep(MD);
2151 ORE = RunORE;
2152 InvalidBlockRPONumbers = true;
2153
2154 bool Changed = false;
2155 bool ShouldContinue = true;
2156
2157 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2158 // Merge unconditional branches, allowing PRE to catch more
2159 // optimization opportunities.
2160 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2161 BasicBlock *BB = &*FI++;
2162
2163 bool removedBlock = MergeBlockIntoPredecessor(BB, &DTU, LI, nullptr, MD);
2164 if (removedBlock)
2165 ++NumGVNBlocks;
2166
2167 Changed |= removedBlock;
2168 }
2169
2170 unsigned Iteration = 0;
2171 while (ShouldContinue) {
2172 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2173 ShouldContinue = iterateOnFunction(F);
2174 Changed |= ShouldContinue;
2175 ++Iteration;
2176 }
2177
2178 if (isPREEnabled()) {
2179 // Fabricate val-num for dead-code in order to suppress assertion in
2180 // performPRE().
2181 assignValNumForDeadCode();
2182 bool PREChanged = true;
2183 while (PREChanged) {
2184 PREChanged = performPRE(F);
2185 Changed |= PREChanged;
2186 }
2187 }
2188
2189 // FIXME: Should perform GVN again after PRE does something. PRE can move
2190 // computations into blocks where they become fully redundant. Note that
2191 // we can't do this until PRE's critical edge splitting updates memdep.
2192 // Actually, when this happens, we should just fully integrate PRE into GVN.
2193
2194 cleanupGlobalSets();
2195 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2196 // iteration.
2197 DeadBlocks.clear();
2198
2199 return Changed;
2200 }
2201
processBlock(BasicBlock * BB)2202 bool GVN::processBlock(BasicBlock *BB) {
2203 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2204 // (and incrementing BI before processing an instruction).
2205 assert(InstrsToErase.empty() &&
2206 "We expect InstrsToErase to be empty across iterations");
2207 if (DeadBlocks.count(BB))
2208 return false;
2209
2210 // Clearing map before every BB because it can be used only for single BB.
2211 ReplaceOperandsWithMap.clear();
2212 bool ChangedFunction = false;
2213
2214 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2215 BI != BE;) {
2216 if (!ReplaceOperandsWithMap.empty())
2217 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2218 ChangedFunction |= processInstruction(&*BI);
2219
2220 if (InstrsToErase.empty()) {
2221 ++BI;
2222 continue;
2223 }
2224
2225 // If we need some instructions deleted, do it now.
2226 NumGVNInstr += InstrsToErase.size();
2227
2228 // Avoid iterator invalidation.
2229 bool AtStart = BI == BB->begin();
2230 if (!AtStart)
2231 --BI;
2232
2233 for (auto *I : InstrsToErase) {
2234 assert(I->getParent() == BB && "Removing instruction from wrong block?");
2235 LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2236 salvageKnowledge(I, AC);
2237 salvageDebugInfo(*I);
2238 if (MD) MD->removeInstruction(I);
2239 LLVM_DEBUG(verifyRemoved(I));
2240 ICF->removeInstruction(I);
2241 I->eraseFromParent();
2242 }
2243 InstrsToErase.clear();
2244
2245 if (AtStart)
2246 BI = BB->begin();
2247 else
2248 ++BI;
2249 }
2250
2251 return ChangedFunction;
2252 }
2253
2254 // Instantiate an expression in a predecessor that lacked it.
performScalarPREInsertion(Instruction * Instr,BasicBlock * Pred,BasicBlock * Curr,unsigned int ValNo)2255 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2256 BasicBlock *Curr, unsigned int ValNo) {
2257 // Because we are going top-down through the block, all value numbers
2258 // will be available in the predecessor by the time we need them. Any
2259 // that weren't originally present will have been instantiated earlier
2260 // in this loop.
2261 bool success = true;
2262 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2263 Value *Op = Instr->getOperand(i);
2264 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2265 continue;
2266 // This could be a newly inserted instruction, in which case, we won't
2267 // find a value number, and should give up before we hurt ourselves.
2268 // FIXME: Rewrite the infrastructure to let it easier to value number
2269 // and process newly inserted instructions.
2270 if (!VN.exists(Op)) {
2271 success = false;
2272 break;
2273 }
2274 uint32_t TValNo =
2275 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2276 if (Value *V = findLeader(Pred, TValNo)) {
2277 Instr->setOperand(i, V);
2278 } else {
2279 success = false;
2280 break;
2281 }
2282 }
2283
2284 // Fail out if we encounter an operand that is not available in
2285 // the PRE predecessor. This is typically because of loads which
2286 // are not value numbered precisely.
2287 if (!success)
2288 return false;
2289
2290 Instr->insertBefore(Pred->getTerminator());
2291 Instr->setName(Instr->getName() + ".pre");
2292 Instr->setDebugLoc(Instr->getDebugLoc());
2293
2294 unsigned Num = VN.lookupOrAdd(Instr);
2295 VN.add(Instr, Num);
2296
2297 // Update the availability map to include the new instruction.
2298 addToLeaderTable(Num, Instr, Pred);
2299 return true;
2300 }
2301
performScalarPRE(Instruction * CurInst)2302 bool GVN::performScalarPRE(Instruction *CurInst) {
2303 if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2304 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2305 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2306 isa<DbgInfoIntrinsic>(CurInst))
2307 return false;
2308
2309 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2310 // sinking the compare again, and it would force the code generator to
2311 // move the i1 from processor flags or predicate registers into a general
2312 // purpose register.
2313 if (isa<CmpInst>(CurInst))
2314 return false;
2315
2316 // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2317 // sinking the addressing mode computation back to its uses. Extending the
2318 // GEP's live range increases the register pressure, and therefore it can
2319 // introduce unnecessary spills.
2320 //
2321 // This doesn't prevent Load PRE. PHI translation will make the GEP available
2322 // to the load by moving it to the predecessor block if necessary.
2323 if (isa<GetElementPtrInst>(CurInst))
2324 return false;
2325
2326 // We don't currently value number ANY inline asm calls.
2327 if (auto *CallB = dyn_cast<CallBase>(CurInst))
2328 if (CallB->isInlineAsm())
2329 return false;
2330
2331 uint32_t ValNo = VN.lookup(CurInst);
2332
2333 // Look for the predecessors for PRE opportunities. We're
2334 // only trying to solve the basic diamond case, where
2335 // a value is computed in the successor and one predecessor,
2336 // but not the other. We also explicitly disallow cases
2337 // where the successor is its own predecessor, because they're
2338 // more complicated to get right.
2339 unsigned NumWith = 0;
2340 unsigned NumWithout = 0;
2341 BasicBlock *PREPred = nullptr;
2342 BasicBlock *CurrentBlock = CurInst->getParent();
2343
2344 // Update the RPO numbers for this function.
2345 if (InvalidBlockRPONumbers)
2346 assignBlockRPONumber(*CurrentBlock->getParent());
2347
2348 SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
2349 for (BasicBlock *P : predecessors(CurrentBlock)) {
2350 // We're not interested in PRE where blocks with predecessors that are
2351 // not reachable.
2352 if (!DT->isReachableFromEntry(P)) {
2353 NumWithout = 2;
2354 break;
2355 }
2356 // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
2357 // when CurInst has operand defined in CurrentBlock (so it may be defined
2358 // by phi in the loop header).
2359 assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2360 "Invalid BlockRPONumber map.");
2361 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
2362 llvm::any_of(CurInst->operands(), [&](const Use &U) {
2363 if (auto *Inst = dyn_cast<Instruction>(U.get()))
2364 return Inst->getParent() == CurrentBlock;
2365 return false;
2366 })) {
2367 NumWithout = 2;
2368 break;
2369 }
2370
2371 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2372 Value *predV = findLeader(P, TValNo);
2373 if (!predV) {
2374 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2375 PREPred = P;
2376 ++NumWithout;
2377 } else if (predV == CurInst) {
2378 /* CurInst dominates this predecessor. */
2379 NumWithout = 2;
2380 break;
2381 } else {
2382 predMap.push_back(std::make_pair(predV, P));
2383 ++NumWith;
2384 }
2385 }
2386
2387 // Don't do PRE when it might increase code size, i.e. when
2388 // we would need to insert instructions in more than one pred.
2389 if (NumWithout > 1 || NumWith == 0)
2390 return false;
2391
2392 // We may have a case where all predecessors have the instruction,
2393 // and we just need to insert a phi node. Otherwise, perform
2394 // insertion.
2395 Instruction *PREInstr = nullptr;
2396
2397 if (NumWithout != 0) {
2398 if (!isSafeToSpeculativelyExecute(CurInst)) {
2399 // It is only valid to insert a new instruction if the current instruction
2400 // is always executed. An instruction with implicit control flow could
2401 // prevent us from doing it. If we cannot speculate the execution, then
2402 // PRE should be prohibited.
2403 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2404 return false;
2405 }
2406
2407 // Don't do PRE across indirect branch.
2408 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2409 return false;
2410
2411 // Don't do PRE across callbr.
2412 // FIXME: Can we do this across the fallthrough edge?
2413 if (isa<CallBrInst>(PREPred->getTerminator()))
2414 return false;
2415
2416 // We can't do PRE safely on a critical edge, so instead we schedule
2417 // the edge to be split and perform the PRE the next time we iterate
2418 // on the function.
2419 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2420 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2421 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2422 return false;
2423 }
2424 // We need to insert somewhere, so let's give it a shot
2425 PREInstr = CurInst->clone();
2426 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2427 // If we failed insertion, make sure we remove the instruction.
2428 LLVM_DEBUG(verifyRemoved(PREInstr));
2429 PREInstr->deleteValue();
2430 return false;
2431 }
2432 }
2433
2434 // Either we should have filled in the PRE instruction, or we should
2435 // not have needed insertions.
2436 assert(PREInstr != nullptr || NumWithout == 0);
2437
2438 ++NumGVNPRE;
2439
2440 // Create a PHI to make the value available in this block.
2441 PHINode *Phi =
2442 PHINode::Create(CurInst->getType(), predMap.size(),
2443 CurInst->getName() + ".pre-phi", &CurrentBlock->front());
2444 for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
2445 if (Value *V = predMap[i].first) {
2446 // If we use an existing value in this phi, we have to patch the original
2447 // value because the phi will be used to replace a later value.
2448 patchReplacementInstruction(CurInst, V);
2449 Phi->addIncoming(V, predMap[i].second);
2450 } else
2451 Phi->addIncoming(PREInstr, PREPred);
2452 }
2453
2454 VN.add(Phi, ValNo);
2455 // After creating a new PHI for ValNo, the phi translate result for ValNo will
2456 // be changed, so erase the related stale entries in phi translate cache.
2457 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
2458 addToLeaderTable(ValNo, Phi, CurrentBlock);
2459 Phi->setDebugLoc(CurInst->getDebugLoc());
2460 CurInst->replaceAllUsesWith(Phi);
2461 if (MD && Phi->getType()->isPtrOrPtrVectorTy())
2462 MD->invalidateCachedPointerInfo(Phi);
2463 VN.erase(CurInst);
2464 removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2465
2466 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2467 if (MD)
2468 MD->removeInstruction(CurInst);
2469 LLVM_DEBUG(verifyRemoved(CurInst));
2470 // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
2471 // some assertion failures.
2472 ICF->removeInstruction(CurInst);
2473 CurInst->eraseFromParent();
2474 ++NumGVNInstr;
2475
2476 return true;
2477 }
2478
2479 /// Perform a purely local form of PRE that looks for diamond
2480 /// control flow patterns and attempts to perform simple PRE at the join point.
performPRE(Function & F)2481 bool GVN::performPRE(Function &F) {
2482 bool Changed = false;
2483 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
2484 // Nothing to PRE in the entry block.
2485 if (CurrentBlock == &F.getEntryBlock())
2486 continue;
2487
2488 // Don't perform PRE on an EH pad.
2489 if (CurrentBlock->isEHPad())
2490 continue;
2491
2492 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2493 BE = CurrentBlock->end();
2494 BI != BE;) {
2495 Instruction *CurInst = &*BI++;
2496 Changed |= performScalarPRE(CurInst);
2497 }
2498 }
2499
2500 if (splitCriticalEdges())
2501 Changed = true;
2502
2503 return Changed;
2504 }
2505
2506 /// Split the critical edge connecting the given two blocks, and return
2507 /// the block inserted to the critical edge.
splitCriticalEdges(BasicBlock * Pred,BasicBlock * Succ)2508 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
2509 // GVN does not require loop-simplify, do not try to preserve it if it is not
2510 // possible.
2511 BasicBlock *BB = SplitCriticalEdge(
2512 Pred, Succ,
2513 CriticalEdgeSplittingOptions(DT, LI).unsetPreserveLoopSimplify());
2514 if (MD)
2515 MD->invalidateCachedPredecessors();
2516 InvalidBlockRPONumbers = true;
2517 return BB;
2518 }
2519
2520 /// Split critical edges found during the previous
2521 /// iteration that may enable further optimization.
splitCriticalEdges()2522 bool GVN::splitCriticalEdges() {
2523 if (toSplit.empty())
2524 return false;
2525 do {
2526 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
2527 SplitCriticalEdge(Edge.first, Edge.second,
2528 CriticalEdgeSplittingOptions(DT, LI));
2529 } while (!toSplit.empty());
2530 if (MD) MD->invalidateCachedPredecessors();
2531 InvalidBlockRPONumbers = true;
2532 return true;
2533 }
2534
2535 /// Executes one iteration of GVN
iterateOnFunction(Function & F)2536 bool GVN::iterateOnFunction(Function &F) {
2537 cleanupGlobalSets();
2538
2539 // Top-down walk of the dominator tree
2540 bool Changed = false;
2541 // Needed for value numbering with phi construction to work.
2542 // RPOT walks the graph in its constructor and will not be invalidated during
2543 // processBlock.
2544 ReversePostOrderTraversal<Function *> RPOT(&F);
2545
2546 for (BasicBlock *BB : RPOT)
2547 Changed |= processBlock(BB);
2548
2549 return Changed;
2550 }
2551
cleanupGlobalSets()2552 void GVN::cleanupGlobalSets() {
2553 VN.clear();
2554 LeaderTable.clear();
2555 BlockRPONumber.clear();
2556 TableAllocator.Reset();
2557 ICF->clear();
2558 InvalidBlockRPONumbers = true;
2559 }
2560
2561 /// Verify that the specified instruction does not occur in our
2562 /// internal data structures.
verifyRemoved(const Instruction * Inst) const2563 void GVN::verifyRemoved(const Instruction *Inst) const {
2564 VN.verifyRemoved(Inst);
2565
2566 // Walk through the value number scope to make sure the instruction isn't
2567 // ferreted away in it.
2568 for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator
2569 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) {
2570 const LeaderTableEntry *Node = &I->second;
2571 assert(Node->Val != Inst && "Inst still in value numbering scope!");
2572
2573 while (Node->Next) {
2574 Node = Node->Next;
2575 assert(Node->Val != Inst && "Inst still in value numbering scope!");
2576 }
2577 }
2578 }
2579
2580 /// BB is declared dead, which implied other blocks become dead as well. This
2581 /// function is to add all these blocks to "DeadBlocks". For the dead blocks'
2582 /// live successors, update their phi nodes by replacing the operands
2583 /// corresponding to dead blocks with UndefVal.
addDeadBlock(BasicBlock * BB)2584 void GVN::addDeadBlock(BasicBlock *BB) {
2585 SmallVector<BasicBlock *, 4> NewDead;
2586 SmallSetVector<BasicBlock *, 4> DF;
2587
2588 NewDead.push_back(BB);
2589 while (!NewDead.empty()) {
2590 BasicBlock *D = NewDead.pop_back_val();
2591 if (DeadBlocks.count(D))
2592 continue;
2593
2594 // All blocks dominated by D are dead.
2595 SmallVector<BasicBlock *, 8> Dom;
2596 DT->getDescendants(D, Dom);
2597 DeadBlocks.insert(Dom.begin(), Dom.end());
2598
2599 // Figure out the dominance-frontier(D).
2600 for (BasicBlock *B : Dom) {
2601 for (BasicBlock *S : successors(B)) {
2602 if (DeadBlocks.count(S))
2603 continue;
2604
2605 bool AllPredDead = true;
2606 for (BasicBlock *P : predecessors(S))
2607 if (!DeadBlocks.count(P)) {
2608 AllPredDead = false;
2609 break;
2610 }
2611
2612 if (!AllPredDead) {
2613 // S could be proved dead later on. That is why we don't update phi
2614 // operands at this moment.
2615 DF.insert(S);
2616 } else {
2617 // While S is not dominated by D, it is dead by now. This could take
2618 // place if S already have a dead predecessor before D is declared
2619 // dead.
2620 NewDead.push_back(S);
2621 }
2622 }
2623 }
2624 }
2625
2626 // For the dead blocks' live successors, update their phi nodes by replacing
2627 // the operands corresponding to dead blocks with UndefVal.
2628 for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end();
2629 I != E; I++) {
2630 BasicBlock *B = *I;
2631 if (DeadBlocks.count(B))
2632 continue;
2633
2634 // First, split the critical edges. This might also create additional blocks
2635 // to preserve LoopSimplify form and adjust edges accordingly.
2636 SmallVector<BasicBlock *, 4> Preds(pred_begin(B), pred_end(B));
2637 for (BasicBlock *P : Preds) {
2638 if (!DeadBlocks.count(P))
2639 continue;
2640
2641 if (llvm::any_of(successors(P),
2642 [B](BasicBlock *Succ) { return Succ == B; }) &&
2643 isCriticalEdge(P->getTerminator(), B)) {
2644 if (BasicBlock *S = splitCriticalEdges(P, B))
2645 DeadBlocks.insert(P = S);
2646 }
2647 }
2648
2649 // Now undef the incoming values from the dead predecessors.
2650 for (BasicBlock *P : predecessors(B)) {
2651 if (!DeadBlocks.count(P))
2652 continue;
2653 for (PHINode &Phi : B->phis()) {
2654 Phi.setIncomingValueForBlock(P, UndefValue::get(Phi.getType()));
2655 if (MD)
2656 MD->invalidateCachedPointerInfo(&Phi);
2657 }
2658 }
2659 }
2660 }
2661
2662 // If the given branch is recognized as a foldable branch (i.e. conditional
2663 // branch with constant condition), it will perform following analyses and
2664 // transformation.
2665 // 1) If the dead out-coming edge is a critical-edge, split it. Let
2666 // R be the target of the dead out-coming edge.
2667 // 1) Identify the set of dead blocks implied by the branch's dead outcoming
2668 // edge. The result of this step will be {X| X is dominated by R}
2669 // 2) Identify those blocks which haves at least one dead predecessor. The
2670 // result of this step will be dominance-frontier(R).
2671 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to
2672 // dead blocks with "UndefVal" in an hope these PHIs will optimized away.
2673 //
2674 // Return true iff *NEW* dead code are found.
processFoldableCondBr(BranchInst * BI)2675 bool GVN::processFoldableCondBr(BranchInst *BI) {
2676 if (!BI || BI->isUnconditional())
2677 return false;
2678
2679 // If a branch has two identical successors, we cannot declare either dead.
2680 if (BI->getSuccessor(0) == BI->getSuccessor(1))
2681 return false;
2682
2683 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
2684 if (!Cond)
2685 return false;
2686
2687 BasicBlock *DeadRoot =
2688 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
2689 if (DeadBlocks.count(DeadRoot))
2690 return false;
2691
2692 if (!DeadRoot->getSinglePredecessor())
2693 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
2694
2695 addDeadBlock(DeadRoot);
2696 return true;
2697 }
2698
2699 // performPRE() will trigger assert if it comes across an instruction without
2700 // associated val-num. As it normally has far more live instructions than dead
2701 // instructions, it makes more sense just to "fabricate" a val-number for the
2702 // dead code than checking if instruction involved is dead or not.
assignValNumForDeadCode()2703 void GVN::assignValNumForDeadCode() {
2704 for (BasicBlock *BB : DeadBlocks) {
2705 for (Instruction &Inst : *BB) {
2706 unsigned ValNum = VN.lookupOrAdd(&Inst);
2707 addToLeaderTable(ValNum, &Inst, BB);
2708 }
2709 }
2710 }
2711
2712 class llvm::gvn::GVNLegacyPass : public FunctionPass {
2713 public:
2714 static char ID; // Pass identification, replacement for typeid
2715
GVNLegacyPass(bool NoMemDepAnalysis=!GVNEnableMemDep)2716 explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
2717 : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
2718 initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry());
2719 }
2720
runOnFunction(Function & F)2721 bool runOnFunction(Function &F) override {
2722 if (skipFunction(F))
2723 return false;
2724
2725 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2726
2727 return Impl.runImpl(
2728 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
2729 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
2730 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
2731 getAnalysis<AAResultsWrapperPass>().getAAResults(),
2732 Impl.isMemDepEnabled()
2733 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
2734 : nullptr,
2735 LIWP ? &LIWP->getLoopInfo() : nullptr,
2736 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE());
2737 }
2738
getAnalysisUsage(AnalysisUsage & AU) const2739 void getAnalysisUsage(AnalysisUsage &AU) const override {
2740 AU.addRequired<AssumptionCacheTracker>();
2741 AU.addRequired<DominatorTreeWrapperPass>();
2742 AU.addRequired<TargetLibraryInfoWrapperPass>();
2743 AU.addRequired<LoopInfoWrapperPass>();
2744 if (Impl.isMemDepEnabled())
2745 AU.addRequired<MemoryDependenceWrapperPass>();
2746 AU.addRequired<AAResultsWrapperPass>();
2747
2748 AU.addPreserved<DominatorTreeWrapperPass>();
2749 AU.addPreserved<GlobalsAAWrapperPass>();
2750 AU.addPreserved<TargetLibraryInfoWrapperPass>();
2751 AU.addPreserved<LoopInfoWrapperPass>();
2752 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2753 }
2754
2755 private:
2756 GVN Impl;
2757 };
2758
2759 char GVNLegacyPass::ID = 0;
2760
2761 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)2762 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2763 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2764 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2765 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2766 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2767 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2768 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
2769 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
2770
2771 // The public interface to this file...
2772 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
2773 return new GVNLegacyPass(NoMemDepAnalysis);
2774 }
2775