1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs global value numbering to eliminate fully redundant
10 // instructions. It also performs simple dead load elimination.
11 //
12 // Note that this pass does the value numbering itself; it does not use the
13 // ValueNumbering analysis passes.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "llvm/Transforms/Scalar/GVN.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/MapVector.h"
22 #include "llvm/ADT/PointerIntPair.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/AssumeBundleQueries.h"
31 #include "llvm/Analysis/AssumptionCache.h"
32 #include "llvm/Analysis/CFG.h"
33 #include "llvm/Analysis/DomTreeUpdater.h"
34 #include "llvm/Analysis/GlobalsModRef.h"
35 #include "llvm/Analysis/InstructionSimplify.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/MemoryBuiltins.h"
38 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
39 #include "llvm/Analysis/MemorySSA.h"
40 #include "llvm/Analysis/MemorySSAUpdater.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/PHITransAddr.h"
43 #include "llvm/Analysis/TargetLibraryInfo.h"
44 #include "llvm/Analysis/ValueTracking.h"
45 #include "llvm/Config/llvm-config.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/Dominators.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/Metadata.h"
61 #include "llvm/IR/Module.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/IR/PassManager.h"
64 #include "llvm/IR/PatternMatch.h"
65 #include "llvm/IR/Type.h"
66 #include "llvm/IR/Use.h"
67 #include "llvm/IR/Value.h"
68 #include "llvm/InitializePasses.h"
69 #include "llvm/Pass.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CommandLine.h"
72 #include "llvm/Support/Compiler.h"
73 #include "llvm/Support/Debug.h"
74 #include "llvm/Support/raw_ostream.h"
75 #include "llvm/Transforms/Utils.h"
76 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
77 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
78 #include "llvm/Transforms/Utils/Local.h"
79 #include "llvm/Transforms/Utils/SSAUpdater.h"
80 #include "llvm/Transforms/Utils/VNCoercion.h"
81 #include <algorithm>
82 #include <cassert>
83 #include <cstdint>
84 #include <utility>
85
86 using namespace llvm;
87 using namespace llvm::gvn;
88 using namespace llvm::VNCoercion;
89 using namespace PatternMatch;
90
91 #define DEBUG_TYPE "gvn"
92
93 STATISTIC(NumGVNInstr, "Number of instructions deleted");
94 STATISTIC(NumGVNLoad, "Number of loads deleted");
95 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
96 STATISTIC(NumGVNBlocks, "Number of blocks merged");
97 STATISTIC(NumGVNSimpl, "Number of instructions simplified");
98 STATISTIC(NumGVNEqProp, "Number of equalities propagated");
99 STATISTIC(NumPRELoad, "Number of loads PRE'd");
100 STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
101
102 STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
103 "Number of blocks speculated as available in "
104 "IsValueFullyAvailableInBlock(), max");
105 STATISTIC(MaxBBSpeculationCutoffReachedTimes,
106 "Number of times we we reached gvn-max-block-speculations cut-off "
107 "preventing further exploration");
108
109 static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
110 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
111 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
112 cl::init(true));
113 static cl::opt<bool>
114 GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
115 cl::init(true));
116 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
117
118 static cl::opt<uint32_t> MaxNumDeps(
119 "gvn-max-num-deps", cl::Hidden, cl::init(100), cl::ZeroOrMore,
120 cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
121
122 // This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
123 static cl::opt<uint32_t> MaxBBSpeculations(
124 "gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::ZeroOrMore,
125 cl::desc("Max number of blocks we're willing to speculate on (and recurse "
126 "into) when deducing if a value is fully available or not in GVN "
127 "(default = 600)"));
128
129 struct llvm::GVN::Expression {
130 uint32_t opcode;
131 bool commutative = false;
132 Type *type = nullptr;
133 SmallVector<uint32_t, 4> varargs;
134
Expressionllvm::GVN::Expression135 Expression(uint32_t o = ~2U) : opcode(o) {}
136
operator ==llvm::GVN::Expression137 bool operator==(const Expression &other) const {
138 if (opcode != other.opcode)
139 return false;
140 if (opcode == ~0U || opcode == ~1U)
141 return true;
142 if (type != other.type)
143 return false;
144 if (varargs != other.varargs)
145 return false;
146 return true;
147 }
148
hash_value(const Expression & Value)149 friend hash_code hash_value(const Expression &Value) {
150 return hash_combine(
151 Value.opcode, Value.type,
152 hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
153 }
154 };
155
156 namespace llvm {
157
158 template <> struct DenseMapInfo<GVN::Expression> {
getEmptyKeyllvm::DenseMapInfo159 static inline GVN::Expression getEmptyKey() { return ~0U; }
getTombstoneKeyllvm::DenseMapInfo160 static inline GVN::Expression getTombstoneKey() { return ~1U; }
161
getHashValuellvm::DenseMapInfo162 static unsigned getHashValue(const GVN::Expression &e) {
163 using llvm::hash_value;
164
165 return static_cast<unsigned>(hash_value(e));
166 }
167
isEqualllvm::DenseMapInfo168 static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) {
169 return LHS == RHS;
170 }
171 };
172
173 } // end namespace llvm
174
175 /// Represents a particular available value that we know how to materialize.
176 /// Materialization of an AvailableValue never fails. An AvailableValue is
177 /// implicitly associated with a rematerialization point which is the
178 /// location of the instruction from which it was formed.
179 struct llvm::gvn::AvailableValue {
180 enum ValType {
181 SimpleVal, // A simple offsetted value that is accessed.
182 LoadVal, // A value produced by a load.
183 MemIntrin, // A memory intrinsic which is loaded from.
184 UndefVal // A UndefValue representing a value from dead block (which
185 // is not yet physically removed from the CFG).
186 };
187
188 /// V - The value that is live out of the block.
189 PointerIntPair<Value *, 2, ValType> Val;
190
191 /// Offset - The byte offset in Val that is interesting for the load query.
192 unsigned Offset = 0;
193
getllvm::gvn::AvailableValue194 static AvailableValue get(Value *V, unsigned Offset = 0) {
195 AvailableValue Res;
196 Res.Val.setPointer(V);
197 Res.Val.setInt(SimpleVal);
198 Res.Offset = Offset;
199 return Res;
200 }
201
getMIllvm::gvn::AvailableValue202 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
203 AvailableValue Res;
204 Res.Val.setPointer(MI);
205 Res.Val.setInt(MemIntrin);
206 Res.Offset = Offset;
207 return Res;
208 }
209
getLoadllvm::gvn::AvailableValue210 static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) {
211 AvailableValue Res;
212 Res.Val.setPointer(Load);
213 Res.Val.setInt(LoadVal);
214 Res.Offset = Offset;
215 return Res;
216 }
217
getUndefllvm::gvn::AvailableValue218 static AvailableValue getUndef() {
219 AvailableValue Res;
220 Res.Val.setPointer(nullptr);
221 Res.Val.setInt(UndefVal);
222 Res.Offset = 0;
223 return Res;
224 }
225
isSimpleValuellvm::gvn::AvailableValue226 bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
isCoercedLoadValuellvm::gvn::AvailableValue227 bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
isMemIntrinValuellvm::gvn::AvailableValue228 bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
isUndefValuellvm::gvn::AvailableValue229 bool isUndefValue() const { return Val.getInt() == UndefVal; }
230
getSimpleValuellvm::gvn::AvailableValue231 Value *getSimpleValue() const {
232 assert(isSimpleValue() && "Wrong accessor");
233 return Val.getPointer();
234 }
235
getCoercedLoadValuellvm::gvn::AvailableValue236 LoadInst *getCoercedLoadValue() const {
237 assert(isCoercedLoadValue() && "Wrong accessor");
238 return cast<LoadInst>(Val.getPointer());
239 }
240
getMemIntrinValuellvm::gvn::AvailableValue241 MemIntrinsic *getMemIntrinValue() const {
242 assert(isMemIntrinValue() && "Wrong accessor");
243 return cast<MemIntrinsic>(Val.getPointer());
244 }
245
246 /// Emit code at the specified insertion point to adjust the value defined
247 /// here to the specified type. This handles various coercion cases.
248 Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt,
249 GVN &gvn) const;
250 };
251
252 /// Represents an AvailableValue which can be rematerialized at the end of
253 /// the associated BasicBlock.
254 struct llvm::gvn::AvailableValueInBlock {
255 /// BB - The basic block in question.
256 BasicBlock *BB = nullptr;
257
258 /// AV - The actual available value
259 AvailableValue AV;
260
getllvm::gvn::AvailableValueInBlock261 static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
262 AvailableValueInBlock Res;
263 Res.BB = BB;
264 Res.AV = std::move(AV);
265 return Res;
266 }
267
getllvm::gvn::AvailableValueInBlock268 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
269 unsigned Offset = 0) {
270 return get(BB, AvailableValue::get(V, Offset));
271 }
272
getUndefllvm::gvn::AvailableValueInBlock273 static AvailableValueInBlock getUndef(BasicBlock *BB) {
274 return get(BB, AvailableValue::getUndef());
275 }
276
277 /// Emit code at the end of this block to adjust the value defined here to
278 /// the specified type. This handles various coercion cases.
MaterializeAdjustedValuellvm::gvn::AvailableValueInBlock279 Value *MaterializeAdjustedValue(LoadInst *Load, GVN &gvn) const {
280 return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
281 }
282 };
283
284 //===----------------------------------------------------------------------===//
285 // ValueTable Internal Functions
286 //===----------------------------------------------------------------------===//
287
createExpr(Instruction * I)288 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
289 Expression e;
290 e.type = I->getType();
291 e.opcode = I->getOpcode();
292 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
293 // gc.relocate is 'special' call: its second and third operands are
294 // not real values, but indices into statepoint's argument list.
295 // Use the refered to values for purposes of identity.
296 e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
297 e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
298 e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
299 } else {
300 for (Use &Op : I->operands())
301 e.varargs.push_back(lookupOrAdd(Op));
302 }
303 if (I->isCommutative()) {
304 // Ensure that commutative instructions that only differ by a permutation
305 // of their operands get the same value number by sorting the operand value
306 // numbers. Since commutative operands are the 1st two operands it is more
307 // efficient to sort by hand rather than using, say, std::sort.
308 assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
309 if (e.varargs[0] > e.varargs[1])
310 std::swap(e.varargs[0], e.varargs[1]);
311 e.commutative = true;
312 }
313
314 if (auto *C = dyn_cast<CmpInst>(I)) {
315 // Sort the operand value numbers so x<y and y>x get the same value number.
316 CmpInst::Predicate Predicate = C->getPredicate();
317 if (e.varargs[0] > e.varargs[1]) {
318 std::swap(e.varargs[0], e.varargs[1]);
319 Predicate = CmpInst::getSwappedPredicate(Predicate);
320 }
321 e.opcode = (C->getOpcode() << 8) | Predicate;
322 e.commutative = true;
323 } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
324 e.varargs.append(E->idx_begin(), E->idx_end());
325 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
326 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
327 e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
328 }
329
330 return e;
331 }
332
createCmpExpr(unsigned Opcode,CmpInst::Predicate Predicate,Value * LHS,Value * RHS)333 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode,
334 CmpInst::Predicate Predicate,
335 Value *LHS, Value *RHS) {
336 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
337 "Not a comparison!");
338 Expression e;
339 e.type = CmpInst::makeCmpResultType(LHS->getType());
340 e.varargs.push_back(lookupOrAdd(LHS));
341 e.varargs.push_back(lookupOrAdd(RHS));
342
343 // Sort the operand value numbers so x<y and y>x get the same value number.
344 if (e.varargs[0] > e.varargs[1]) {
345 std::swap(e.varargs[0], e.varargs[1]);
346 Predicate = CmpInst::getSwappedPredicate(Predicate);
347 }
348 e.opcode = (Opcode << 8) | Predicate;
349 e.commutative = true;
350 return e;
351 }
352
createExtractvalueExpr(ExtractValueInst * EI)353 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
354 assert(EI && "Not an ExtractValueInst?");
355 Expression e;
356 e.type = EI->getType();
357 e.opcode = 0;
358
359 WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
360 if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
361 // EI is an extract from one of our with.overflow intrinsics. Synthesize
362 // a semantically equivalent expression instead of an extract value
363 // expression.
364 e.opcode = WO->getBinaryOp();
365 e.varargs.push_back(lookupOrAdd(WO->getLHS()));
366 e.varargs.push_back(lookupOrAdd(WO->getRHS()));
367 return e;
368 }
369
370 // Not a recognised intrinsic. Fall back to producing an extract value
371 // expression.
372 e.opcode = EI->getOpcode();
373 for (Use &Op : EI->operands())
374 e.varargs.push_back(lookupOrAdd(Op));
375
376 append_range(e.varargs, EI->indices());
377
378 return e;
379 }
380
381 //===----------------------------------------------------------------------===//
382 // ValueTable External Functions
383 //===----------------------------------------------------------------------===//
384
385 GVN::ValueTable::ValueTable() = default;
386 GVN::ValueTable::ValueTable(const ValueTable &) = default;
387 GVN::ValueTable::ValueTable(ValueTable &&) = default;
388 GVN::ValueTable::~ValueTable() = default;
389 GVN::ValueTable &GVN::ValueTable::operator=(const GVN::ValueTable &Arg) = default;
390
391 /// add - Insert a value into the table with a specified value number.
add(Value * V,uint32_t num)392 void GVN::ValueTable::add(Value *V, uint32_t num) {
393 valueNumbering.insert(std::make_pair(V, num));
394 if (PHINode *PN = dyn_cast<PHINode>(V))
395 NumberingPhi[num] = PN;
396 }
397
lookupOrAddCall(CallInst * C)398 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
399 if (AA->doesNotAccessMemory(C)) {
400 Expression exp = createExpr(C);
401 uint32_t e = assignExpNewValueNum(exp).first;
402 valueNumbering[C] = e;
403 return e;
404 } else if (MD && AA->onlyReadsMemory(C)) {
405 Expression exp = createExpr(C);
406 auto ValNum = assignExpNewValueNum(exp);
407 if (ValNum.second) {
408 valueNumbering[C] = ValNum.first;
409 return ValNum.first;
410 }
411
412 MemDepResult local_dep = MD->getDependency(C);
413
414 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
415 valueNumbering[C] = nextValueNumber;
416 return nextValueNumber++;
417 }
418
419 if (local_dep.isDef()) {
420 // For masked load/store intrinsics, the local_dep may actully be
421 // a normal load or store instruction.
422 CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
423
424 if (!local_cdep || local_cdep->arg_size() != C->arg_size()) {
425 valueNumbering[C] = nextValueNumber;
426 return nextValueNumber++;
427 }
428
429 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
430 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
431 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
432 if (c_vn != cd_vn) {
433 valueNumbering[C] = nextValueNumber;
434 return nextValueNumber++;
435 }
436 }
437
438 uint32_t v = lookupOrAdd(local_cdep);
439 valueNumbering[C] = v;
440 return v;
441 }
442
443 // Non-local case.
444 const MemoryDependenceResults::NonLocalDepInfo &deps =
445 MD->getNonLocalCallDependency(C);
446 // FIXME: Move the checking logic to MemDep!
447 CallInst* cdep = nullptr;
448
449 // Check to see if we have a single dominating call instruction that is
450 // identical to C.
451 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
452 const NonLocalDepEntry *I = &deps[i];
453 if (I->getResult().isNonLocal())
454 continue;
455
456 // We don't handle non-definitions. If we already have a call, reject
457 // instruction dependencies.
458 if (!I->getResult().isDef() || cdep != nullptr) {
459 cdep = nullptr;
460 break;
461 }
462
463 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
464 // FIXME: All duplicated with non-local case.
465 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
466 cdep = NonLocalDepCall;
467 continue;
468 }
469
470 cdep = nullptr;
471 break;
472 }
473
474 if (!cdep) {
475 valueNumbering[C] = nextValueNumber;
476 return nextValueNumber++;
477 }
478
479 if (cdep->arg_size() != C->arg_size()) {
480 valueNumbering[C] = nextValueNumber;
481 return nextValueNumber++;
482 }
483 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
484 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
485 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
486 if (c_vn != cd_vn) {
487 valueNumbering[C] = nextValueNumber;
488 return nextValueNumber++;
489 }
490 }
491
492 uint32_t v = lookupOrAdd(cdep);
493 valueNumbering[C] = v;
494 return v;
495 } else {
496 valueNumbering[C] = nextValueNumber;
497 return nextValueNumber++;
498 }
499 }
500
501 /// Returns true if a value number exists for the specified value.
exists(Value * V) const502 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; }
503
504 /// lookup_or_add - Returns the value number for the specified value, assigning
505 /// it a new number if it did not have one before.
lookupOrAdd(Value * V)506 uint32_t GVN::ValueTable::lookupOrAdd(Value *V) {
507 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
508 if (VI != valueNumbering.end())
509 return VI->second;
510
511 if (!isa<Instruction>(V)) {
512 valueNumbering[V] = nextValueNumber;
513 return nextValueNumber++;
514 }
515
516 Instruction* I = cast<Instruction>(V);
517 Expression exp;
518 switch (I->getOpcode()) {
519 case Instruction::Call:
520 return lookupOrAddCall(cast<CallInst>(I));
521 case Instruction::FNeg:
522 case Instruction::Add:
523 case Instruction::FAdd:
524 case Instruction::Sub:
525 case Instruction::FSub:
526 case Instruction::Mul:
527 case Instruction::FMul:
528 case Instruction::UDiv:
529 case Instruction::SDiv:
530 case Instruction::FDiv:
531 case Instruction::URem:
532 case Instruction::SRem:
533 case Instruction::FRem:
534 case Instruction::Shl:
535 case Instruction::LShr:
536 case Instruction::AShr:
537 case Instruction::And:
538 case Instruction::Or:
539 case Instruction::Xor:
540 case Instruction::ICmp:
541 case Instruction::FCmp:
542 case Instruction::Trunc:
543 case Instruction::ZExt:
544 case Instruction::SExt:
545 case Instruction::FPToUI:
546 case Instruction::FPToSI:
547 case Instruction::UIToFP:
548 case Instruction::SIToFP:
549 case Instruction::FPTrunc:
550 case Instruction::FPExt:
551 case Instruction::PtrToInt:
552 case Instruction::IntToPtr:
553 case Instruction::AddrSpaceCast:
554 case Instruction::BitCast:
555 case Instruction::Select:
556 case Instruction::Freeze:
557 case Instruction::ExtractElement:
558 case Instruction::InsertElement:
559 case Instruction::ShuffleVector:
560 case Instruction::InsertValue:
561 case Instruction::GetElementPtr:
562 exp = createExpr(I);
563 break;
564 case Instruction::ExtractValue:
565 exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
566 break;
567 case Instruction::PHI:
568 valueNumbering[V] = nextValueNumber;
569 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
570 return nextValueNumber++;
571 default:
572 valueNumbering[V] = nextValueNumber;
573 return nextValueNumber++;
574 }
575
576 uint32_t e = assignExpNewValueNum(exp).first;
577 valueNumbering[V] = e;
578 return e;
579 }
580
581 /// Returns the value number of the specified value. Fails if
582 /// the value has not yet been numbered.
lookup(Value * V,bool Verify) const583 uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const {
584 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
585 if (Verify) {
586 assert(VI != valueNumbering.end() && "Value not numbered?");
587 return VI->second;
588 }
589 return (VI != valueNumbering.end()) ? VI->second : 0;
590 }
591
592 /// Returns the value number of the given comparison,
593 /// assigning it a new number if it did not have one before. Useful when
594 /// we deduced the result of a comparison, but don't immediately have an
595 /// instruction realizing that comparison to hand.
lookupOrAddCmp(unsigned Opcode,CmpInst::Predicate Predicate,Value * LHS,Value * RHS)596 uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode,
597 CmpInst::Predicate Predicate,
598 Value *LHS, Value *RHS) {
599 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
600 return assignExpNewValueNum(exp).first;
601 }
602
603 /// Remove all entries from the ValueTable.
clear()604 void GVN::ValueTable::clear() {
605 valueNumbering.clear();
606 expressionNumbering.clear();
607 NumberingPhi.clear();
608 PhiTranslateTable.clear();
609 nextValueNumber = 1;
610 Expressions.clear();
611 ExprIdx.clear();
612 nextExprNumber = 0;
613 }
614
615 /// Remove a value from the value numbering.
erase(Value * V)616 void GVN::ValueTable::erase(Value *V) {
617 uint32_t Num = valueNumbering.lookup(V);
618 valueNumbering.erase(V);
619 // If V is PHINode, V <--> value number is an one-to-one mapping.
620 if (isa<PHINode>(V))
621 NumberingPhi.erase(Num);
622 }
623
624 /// verifyRemoved - Verify that the value is removed from all internal data
625 /// structures.
verifyRemoved(const Value * V) const626 void GVN::ValueTable::verifyRemoved(const Value *V) const {
627 for (DenseMap<Value*, uint32_t>::const_iterator
628 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
629 assert(I->first != V && "Inst still occurs in value numbering map!");
630 }
631 }
632
633 //===----------------------------------------------------------------------===//
634 // GVN Pass
635 //===----------------------------------------------------------------------===//
636
isPREEnabled() const637 bool GVN::isPREEnabled() const {
638 return Options.AllowPRE.getValueOr(GVNEnablePRE);
639 }
640
isLoadPREEnabled() const641 bool GVN::isLoadPREEnabled() const {
642 return Options.AllowLoadPRE.getValueOr(GVNEnableLoadPRE);
643 }
644
isLoadInLoopPREEnabled() const645 bool GVN::isLoadInLoopPREEnabled() const {
646 return Options.AllowLoadInLoopPRE.getValueOr(GVNEnableLoadInLoopPRE);
647 }
648
isLoadPRESplitBackedgeEnabled() const649 bool GVN::isLoadPRESplitBackedgeEnabled() const {
650 return Options.AllowLoadPRESplitBackedge.getValueOr(
651 GVNEnableSplitBackedgeInLoadPRE);
652 }
653
isMemDepEnabled() const654 bool GVN::isMemDepEnabled() const {
655 return Options.AllowMemDep.getValueOr(GVNEnableMemDep);
656 }
657
run(Function & F,FunctionAnalysisManager & AM)658 PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) {
659 // FIXME: The order of evaluation of these 'getResult' calls is very
660 // significant! Re-ordering these variables will cause GVN when run alone to
661 // be less effective! We should fix memdep and basic-aa to not exhibit this
662 // behavior, but until then don't change the order here.
663 auto &AC = AM.getResult<AssumptionAnalysis>(F);
664 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
665 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
666 auto &AA = AM.getResult<AAManager>(F);
667 auto *MemDep =
668 isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr;
669 auto *LI = AM.getCachedResult<LoopAnalysis>(F);
670 auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
671 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
672 bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
673 MSSA ? &MSSA->getMSSA() : nullptr);
674 if (!Changed)
675 return PreservedAnalyses::all();
676 PreservedAnalyses PA;
677 PA.preserve<DominatorTreeAnalysis>();
678 PA.preserve<TargetLibraryAnalysis>();
679 if (MSSA)
680 PA.preserve<MemorySSAAnalysis>();
681 if (LI)
682 PA.preserve<LoopAnalysis>();
683 return PA;
684 }
685
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)686 void GVN::printPipeline(
687 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
688 static_cast<PassInfoMixin<GVN> *>(this)->printPipeline(OS,
689 MapClassName2PassName);
690
691 OS << "<";
692 if (Options.AllowPRE != None)
693 OS << (Options.AllowPRE.getValue() ? "" : "no-") << "pre;";
694 if (Options.AllowLoadPRE != None)
695 OS << (Options.AllowLoadPRE.getValue() ? "" : "no-") << "load-pre;";
696 if (Options.AllowLoadPRESplitBackedge != None)
697 OS << (Options.AllowLoadPRESplitBackedge.getValue() ? "" : "no-")
698 << "split-backedge-load-pre;";
699 if (Options.AllowMemDep != None)
700 OS << (Options.AllowMemDep.getValue() ? "" : "no-") << "memdep";
701 OS << ">";
702 }
703
704 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(DenseMap<uint32_t,Value * > & d) const705 LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const {
706 errs() << "{\n";
707 for (auto &I : d) {
708 errs() << I.first << "\n";
709 I.second->dump();
710 }
711 errs() << "}\n";
712 }
713 #endif
714
715 enum class AvailabilityState : char {
716 /// We know the block *is not* fully available. This is a fixpoint.
717 Unavailable = 0,
718 /// We know the block *is* fully available. This is a fixpoint.
719 Available = 1,
720 /// We do not know whether the block is fully available or not,
721 /// but we are currently speculating that it will be.
722 /// If it would have turned out that the block was, in fact, not fully
723 /// available, this would have been cleaned up into an Unavailable.
724 SpeculativelyAvailable = 2,
725 };
726
727 /// Return true if we can prove that the value
728 /// we're analyzing is fully available in the specified block. As we go, keep
729 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
730 /// map is actually a tri-state map with the following values:
731 /// 0) we know the block *is not* fully available.
732 /// 1) we know the block *is* fully available.
733 /// 2) we do not know whether the block is fully available or not, but we are
734 /// currently speculating that it will be.
IsValueFullyAvailableInBlock(BasicBlock * BB,DenseMap<BasicBlock *,AvailabilityState> & FullyAvailableBlocks)735 static bool IsValueFullyAvailableInBlock(
736 BasicBlock *BB,
737 DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
738 SmallVector<BasicBlock *, 32> Worklist;
739 Optional<BasicBlock *> UnavailableBB;
740
741 // The number of times we didn't find an entry for a block in a map and
742 // optimistically inserted an entry marking block as speculatively available.
743 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
744
745 #ifndef NDEBUG
746 SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
747 SmallVector<BasicBlock *, 32> AvailableBBs;
748 #endif
749
750 Worklist.emplace_back(BB);
751 while (!Worklist.empty()) {
752 BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first!
753 // Optimistically assume that the block is Speculatively Available and check
754 // to see if we already know about this block in one lookup.
755 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
756 FullyAvailableBlocks.try_emplace(
757 CurrBB, AvailabilityState::SpeculativelyAvailable);
758 AvailabilityState &State = IV.first->second;
759
760 // Did the entry already exist for this block?
761 if (!IV.second) {
762 if (State == AvailabilityState::Unavailable) {
763 UnavailableBB = CurrBB;
764 break; // Backpropagate unavailability info.
765 }
766
767 #ifndef NDEBUG
768 AvailableBBs.emplace_back(CurrBB);
769 #endif
770 continue; // Don't recurse further, but continue processing worklist.
771 }
772
773 // No entry found for block.
774 ++NumNewNewSpeculativelyAvailableBBs;
775 bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
776
777 // If we have exhausted our budget, mark this block as unavailable.
778 // Also, if this block has no predecessors, the value isn't live-in here.
779 if (OutOfBudget || pred_empty(CurrBB)) {
780 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
781 State = AvailabilityState::Unavailable;
782 UnavailableBB = CurrBB;
783 break; // Backpropagate unavailability info.
784 }
785
786 // Tentatively consider this block as speculatively available.
787 #ifndef NDEBUG
788 NewSpeculativelyAvailableBBs.insert(CurrBB);
789 #endif
790 // And further recurse into block's predecessors, in depth-first order!
791 Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
792 }
793
794 #if LLVM_ENABLE_STATS
795 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
796 NumNewNewSpeculativelyAvailableBBs);
797 #endif
798
799 // If the block isn't marked as fixpoint yet
800 // (the Unavailable and Available states are fixpoints)
801 auto MarkAsFixpointAndEnqueueSuccessors =
802 [&](BasicBlock *BB, AvailabilityState FixpointState) {
803 auto It = FullyAvailableBlocks.find(BB);
804 if (It == FullyAvailableBlocks.end())
805 return; // Never queried this block, leave as-is.
806 switch (AvailabilityState &State = It->second) {
807 case AvailabilityState::Unavailable:
808 case AvailabilityState::Available:
809 return; // Don't backpropagate further, continue processing worklist.
810 case AvailabilityState::SpeculativelyAvailable: // Fix it!
811 State = FixpointState;
812 #ifndef NDEBUG
813 assert(NewSpeculativelyAvailableBBs.erase(BB) &&
814 "Found a speculatively available successor leftover?");
815 #endif
816 // Queue successors for further processing.
817 Worklist.append(succ_begin(BB), succ_end(BB));
818 return;
819 }
820 };
821
822 if (UnavailableBB) {
823 // Okay, we have encountered an unavailable block.
824 // Mark speculatively available blocks reachable from UnavailableBB as
825 // unavailable as well. Paths are terminated when they reach blocks not in
826 // FullyAvailableBlocks or they are not marked as speculatively available.
827 Worklist.clear();
828 Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
829 while (!Worklist.empty())
830 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
831 AvailabilityState::Unavailable);
832 }
833
834 #ifndef NDEBUG
835 Worklist.clear();
836 for (BasicBlock *AvailableBB : AvailableBBs)
837 Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
838 while (!Worklist.empty())
839 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
840 AvailabilityState::Available);
841
842 assert(NewSpeculativelyAvailableBBs.empty() &&
843 "Must have fixed all the new speculatively available blocks.");
844 #endif
845
846 return !UnavailableBB;
847 }
848
849 /// Given a set of loads specified by ValuesPerBlock,
850 /// construct SSA form, allowing us to eliminate Load. This returns the value
851 /// that should be used at Load's definition site.
852 static Value *
ConstructSSAForLoadSet(LoadInst * Load,SmallVectorImpl<AvailableValueInBlock> & ValuesPerBlock,GVN & gvn)853 ConstructSSAForLoadSet(LoadInst *Load,
854 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
855 GVN &gvn) {
856 // Check for the fully redundant, dominating load case. In this case, we can
857 // just use the dominating value directly.
858 if (ValuesPerBlock.size() == 1 &&
859 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
860 Load->getParent())) {
861 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
862 "Dead BB dominate this block");
863 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
864 }
865
866 // Otherwise, we have to construct SSA form.
867 SmallVector<PHINode*, 8> NewPHIs;
868 SSAUpdater SSAUpdate(&NewPHIs);
869 SSAUpdate.Initialize(Load->getType(), Load->getName());
870
871 for (const AvailableValueInBlock &AV : ValuesPerBlock) {
872 BasicBlock *BB = AV.BB;
873
874 if (AV.AV.isUndefValue())
875 continue;
876
877 if (SSAUpdate.HasValueForBlock(BB))
878 continue;
879
880 // If the value is the load that we will be eliminating, and the block it's
881 // available in is the block that the load is in, then don't add it as
882 // SSAUpdater will resolve the value to the relevant phi which may let it
883 // avoid phi construction entirely if there's actually only one value.
884 if (BB == Load->getParent() &&
885 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
886 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
887 continue;
888
889 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
890 }
891
892 // Perform PHI construction.
893 return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent());
894 }
895
MaterializeAdjustedValue(LoadInst * Load,Instruction * InsertPt,GVN & gvn) const896 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load,
897 Instruction *InsertPt,
898 GVN &gvn) const {
899 Value *Res;
900 Type *LoadTy = Load->getType();
901 const DataLayout &DL = Load->getModule()->getDataLayout();
902 if (isSimpleValue()) {
903 Res = getSimpleValue();
904 if (Res->getType() != LoadTy) {
905 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
906
907 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
908 << " " << *getSimpleValue() << '\n'
909 << *Res << '\n'
910 << "\n\n\n");
911 }
912 } else if (isCoercedLoadValue()) {
913 LoadInst *CoercedLoad = getCoercedLoadValue();
914 if (CoercedLoad->getType() == LoadTy && Offset == 0) {
915 Res = CoercedLoad;
916 } else {
917 Res = getLoadValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL);
918 // We would like to use gvn.markInstructionForDeletion here, but we can't
919 // because the load is already memoized into the leader map table that GVN
920 // tracks. It is potentially possible to remove the load from the table,
921 // but then there all of the operations based on it would need to be
922 // rehashed. Just leave the dead load around.
923 gvn.getMemDep().removeInstruction(CoercedLoad);
924 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
925 << " " << *getCoercedLoadValue() << '\n'
926 << *Res << '\n'
927 << "\n\n\n");
928 }
929 } else if (isMemIntrinValue()) {
930 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
931 InsertPt, DL);
932 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
933 << " " << *getMemIntrinValue() << '\n'
934 << *Res << '\n'
935 << "\n\n\n");
936 } else {
937 llvm_unreachable("Should not materialize value from dead block");
938 }
939 assert(Res && "failed to materialize?");
940 return Res;
941 }
942
isLifetimeStart(const Instruction * Inst)943 static bool isLifetimeStart(const Instruction *Inst) {
944 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
945 return II->getIntrinsicID() == Intrinsic::lifetime_start;
946 return false;
947 }
948
949 /// Assuming To can be reached from both From and Between, does Between lie on
950 /// every path from From to To?
liesBetween(const Instruction * From,Instruction * Between,const Instruction * To,DominatorTree * DT)951 static bool liesBetween(const Instruction *From, Instruction *Between,
952 const Instruction *To, DominatorTree *DT) {
953 if (From->getParent() == Between->getParent())
954 return DT->dominates(From, Between);
955 SmallSet<BasicBlock *, 1> Exclusion;
956 Exclusion.insert(Between->getParent());
957 return !isPotentiallyReachable(From, To, &Exclusion, DT);
958 }
959
960 /// Try to locate the three instruction involved in a missed
961 /// load-elimination case that is due to an intervening store.
reportMayClobberedLoad(LoadInst * Load,MemDepResult DepInfo,DominatorTree * DT,OptimizationRemarkEmitter * ORE)962 static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo,
963 DominatorTree *DT,
964 OptimizationRemarkEmitter *ORE) {
965 using namespace ore;
966
967 User *OtherAccess = nullptr;
968
969 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load);
970 R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
971 << setExtraArgs();
972
973 for (auto *U : Load->getPointerOperand()->users()) {
974 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
975 cast<Instruction>(U)->getFunction() == Load->getFunction() &&
976 DT->dominates(cast<Instruction>(U), Load)) {
977 // Use the most immediately dominating value
978 if (OtherAccess) {
979 if (DT->dominates(cast<Instruction>(OtherAccess), cast<Instruction>(U)))
980 OtherAccess = U;
981 else
982 assert(DT->dominates(cast<Instruction>(U),
983 cast<Instruction>(OtherAccess)));
984 } else
985 OtherAccess = U;
986 }
987 }
988
989 if (!OtherAccess) {
990 // There is no dominating use, check if we can find a closest non-dominating
991 // use that lies between any other potentially available use and Load.
992 for (auto *U : Load->getPointerOperand()->users()) {
993 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
994 cast<Instruction>(U)->getFunction() == Load->getFunction() &&
995 isPotentiallyReachable(cast<Instruction>(U), Load, nullptr, DT)) {
996 if (OtherAccess) {
997 if (liesBetween(cast<Instruction>(OtherAccess), cast<Instruction>(U),
998 Load, DT)) {
999 OtherAccess = U;
1000 } else if (!liesBetween(cast<Instruction>(U),
1001 cast<Instruction>(OtherAccess), Load, DT)) {
1002 // These uses are both partially available at Load were it not for
1003 // the clobber, but neither lies strictly after the other.
1004 OtherAccess = nullptr;
1005 break;
1006 } // else: keep current OtherAccess since it lies between U and Load
1007 } else {
1008 OtherAccess = U;
1009 }
1010 }
1011 }
1012 }
1013
1014 if (OtherAccess)
1015 R << " in favor of " << NV("OtherAccess", OtherAccess);
1016
1017 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1018
1019 ORE->emit(R);
1020 }
1021
AnalyzeLoadAvailability(LoadInst * Load,MemDepResult DepInfo,Value * Address,AvailableValue & Res)1022 bool GVN::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1023 Value *Address, AvailableValue &Res) {
1024 assert((DepInfo.isDef() || DepInfo.isClobber()) &&
1025 "expected a local dependence");
1026 assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1027
1028 const DataLayout &DL = Load->getModule()->getDataLayout();
1029
1030 Instruction *DepInst = DepInfo.getInst();
1031 if (DepInfo.isClobber()) {
1032 // If the dependence is to a store that writes to a superset of the bits
1033 // read by the load, we can extract the bits we need for the load from the
1034 // stored value.
1035 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1036 // Can't forward from non-atomic to atomic without violating memory model.
1037 if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1038 int Offset =
1039 analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL);
1040 if (Offset != -1) {
1041 Res = AvailableValue::get(DepSI->getValueOperand(), Offset);
1042 return true;
1043 }
1044 }
1045 }
1046
1047 // Check to see if we have something like this:
1048 // load i32* P
1049 // load i8* (P+1)
1050 // if we have this, replace the later with an extraction from the former.
1051 if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1052 // If this is a clobber and L is the first instruction in its block, then
1053 // we have the first instruction in the entry block.
1054 // Can't forward from non-atomic to atomic without violating memory model.
1055 if (DepLoad != Load && Address &&
1056 Load->isAtomic() <= DepLoad->isAtomic()) {
1057 Type *LoadType = Load->getType();
1058 int Offset = -1;
1059
1060 // If MD reported clobber, check it was nested.
1061 if (DepInfo.isClobber() &&
1062 canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) {
1063 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1064 // GVN has no deal with a negative offset.
1065 Offset = (ClobberOff == None || ClobberOff.getValue() < 0)
1066 ? -1
1067 : ClobberOff.getValue();
1068 }
1069 if (Offset == -1)
1070 Offset =
1071 analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL);
1072 if (Offset != -1) {
1073 Res = AvailableValue::getLoad(DepLoad, Offset);
1074 return true;
1075 }
1076 }
1077 }
1078
1079 // If the clobbering value is a memset/memcpy/memmove, see if we can
1080 // forward a value on from it.
1081 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1082 if (Address && !Load->isAtomic()) {
1083 int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address,
1084 DepMI, DL);
1085 if (Offset != -1) {
1086 Res = AvailableValue::getMI(DepMI, Offset);
1087 return true;
1088 }
1089 }
1090 }
1091 // Nothing known about this clobber, have to be conservative
1092 LLVM_DEBUG(
1093 // fast print dep, using operator<< on instruction is too slow.
1094 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1095 dbgs() << " is clobbered by " << *DepInst << '\n';);
1096 if (ORE->allowExtraAnalysis(DEBUG_TYPE))
1097 reportMayClobberedLoad(Load, DepInfo, DT, ORE);
1098
1099 return false;
1100 }
1101 assert(DepInfo.isDef() && "follows from above");
1102
1103 // Loading the allocation -> undef.
1104 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
1105 isAlignedAllocLikeFn(DepInst, TLI) ||
1106 // Loading immediately after lifetime begin -> undef.
1107 isLifetimeStart(DepInst)) {
1108 Res = AvailableValue::get(UndefValue::get(Load->getType()));
1109 return true;
1110 }
1111
1112 // Loading from calloc (which zero initializes memory) -> zero
1113 if (isCallocLikeFn(DepInst, TLI)) {
1114 Res = AvailableValue::get(Constant::getNullValue(Load->getType()));
1115 return true;
1116 }
1117
1118 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1119 // Reject loads and stores that are to the same address but are of
1120 // different types if we have to. If the stored value is convertable to
1121 // the loaded value, we can reuse it.
1122 if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(),
1123 DL))
1124 return false;
1125
1126 // Can't forward from non-atomic to atomic without violating memory model.
1127 if (S->isAtomic() < Load->isAtomic())
1128 return false;
1129
1130 Res = AvailableValue::get(S->getValueOperand());
1131 return true;
1132 }
1133
1134 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1135 // If the types mismatch and we can't handle it, reject reuse of the load.
1136 // If the stored value is larger or equal to the loaded value, we can reuse
1137 // it.
1138 if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(), DL))
1139 return false;
1140
1141 // Can't forward from non-atomic to atomic without violating memory model.
1142 if (LD->isAtomic() < Load->isAtomic())
1143 return false;
1144
1145 Res = AvailableValue::getLoad(LD);
1146 return true;
1147 }
1148
1149 // Unknown def - must be conservative
1150 LLVM_DEBUG(
1151 // fast print dep, using operator<< on instruction is too slow.
1152 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1153 dbgs() << " has unknown def " << *DepInst << '\n';);
1154 return false;
1155 }
1156
AnalyzeLoadAvailability(LoadInst * Load,LoadDepVect & Deps,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1157 void GVN::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1158 AvailValInBlkVect &ValuesPerBlock,
1159 UnavailBlkVect &UnavailableBlocks) {
1160 // Filter out useless results (non-locals, etc). Keep track of the blocks
1161 // where we have a value available in repl, also keep track of whether we see
1162 // dependencies that produce an unknown value for the load (such as a call
1163 // that could potentially clobber the load).
1164 unsigned NumDeps = Deps.size();
1165 for (unsigned i = 0, e = NumDeps; i != e; ++i) {
1166 BasicBlock *DepBB = Deps[i].getBB();
1167 MemDepResult DepInfo = Deps[i].getResult();
1168
1169 if (DeadBlocks.count(DepBB)) {
1170 // Dead dependent mem-op disguise as a load evaluating the same value
1171 // as the load in question.
1172 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1173 continue;
1174 }
1175
1176 if (!DepInfo.isDef() && !DepInfo.isClobber()) {
1177 UnavailableBlocks.push_back(DepBB);
1178 continue;
1179 }
1180
1181 // The address being loaded in this non-local block may not be the same as
1182 // the pointer operand of the load if PHI translation occurs. Make sure
1183 // to consider the right address.
1184 Value *Address = Deps[i].getAddress();
1185
1186 AvailableValue AV;
1187 if (AnalyzeLoadAvailability(Load, DepInfo, Address, AV)) {
1188 // subtlety: because we know this was a non-local dependency, we know
1189 // it's safe to materialize anywhere between the instruction within
1190 // DepInfo and the end of it's block.
1191 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1192 std::move(AV)));
1193 } else {
1194 UnavailableBlocks.push_back(DepBB);
1195 }
1196 }
1197
1198 assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1199 "post condition violation");
1200 }
1201
eliminatePartiallyRedundantLoad(LoadInst * Load,AvailValInBlkVect & ValuesPerBlock,MapVector<BasicBlock *,Value * > & AvailableLoads)1202 void GVN::eliminatePartiallyRedundantLoad(
1203 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1204 MapVector<BasicBlock *, Value *> &AvailableLoads) {
1205 for (const auto &AvailableLoad : AvailableLoads) {
1206 BasicBlock *UnavailableBlock = AvailableLoad.first;
1207 Value *LoadPtr = AvailableLoad.second;
1208
1209 auto *NewLoad =
1210 new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre",
1211 Load->isVolatile(), Load->getAlign(), Load->getOrdering(),
1212 Load->getSyncScopeID(), UnavailableBlock->getTerminator());
1213 NewLoad->setDebugLoc(Load->getDebugLoc());
1214 if (MSSAU) {
1215 auto *MSSA = MSSAU->getMemorySSA();
1216 // Get the defining access of the original load or use the load if it is a
1217 // MemoryDef (e.g. because it is volatile). The inserted loads are
1218 // guaranteed to load from the same definition.
1219 auto *LoadAcc = MSSA->getMemoryAccess(Load);
1220 auto *DefiningAcc =
1221 isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess();
1222 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1223 NewLoad, DefiningAcc, NewLoad->getParent(),
1224 MemorySSA::BeforeTerminator);
1225 if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1226 MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1227 else
1228 MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1229 }
1230
1231 // Transfer the old load's AA tags to the new load.
1232 AAMDNodes Tags = Load->getAAMetadata();
1233 if (Tags)
1234 NewLoad->setAAMetadata(Tags);
1235
1236 if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1237 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1238 if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1239 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1240 if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1241 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1242 if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1243 if (LI &&
1244 LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1245 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1246
1247 // We do not propagate the old load's debug location, because the new
1248 // load now lives in a different BB, and we want to avoid a jumpy line
1249 // table.
1250 // FIXME: How do we retain source locations without causing poor debugging
1251 // behavior?
1252
1253 // Add the newly created load.
1254 ValuesPerBlock.push_back(
1255 AvailableValueInBlock::get(UnavailableBlock, NewLoad));
1256 MD->invalidateCachedPointerInfo(LoadPtr);
1257 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1258 }
1259
1260 // Perform PHI construction.
1261 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1262 Load->replaceAllUsesWith(V);
1263 if (isa<PHINode>(V))
1264 V->takeName(Load);
1265 if (Instruction *I = dyn_cast<Instruction>(V))
1266 I->setDebugLoc(Load->getDebugLoc());
1267 if (V->getType()->isPtrOrPtrVectorTy())
1268 MD->invalidateCachedPointerInfo(V);
1269 markInstructionForDeletion(Load);
1270 ORE->emit([&]() {
1271 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1272 << "load eliminated by PRE";
1273 });
1274 }
1275
PerformLoadPRE(LoadInst * Load,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1276 bool GVN::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1277 UnavailBlkVect &UnavailableBlocks) {
1278 // Okay, we have *some* definitions of the value. This means that the value
1279 // is available in some of our (transitive) predecessors. Lets think about
1280 // doing PRE of this load. This will involve inserting a new load into the
1281 // predecessor when it's not available. We could do this in general, but
1282 // prefer to not increase code size. As such, we only do this when we know
1283 // that we only have to insert *one* load (which means we're basically moving
1284 // the load, not inserting a new one).
1285
1286 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1287 UnavailableBlocks.end());
1288
1289 // Let's find the first basic block with more than one predecessor. Walk
1290 // backwards through predecessors if needed.
1291 BasicBlock *LoadBB = Load->getParent();
1292 BasicBlock *TmpBB = LoadBB;
1293
1294 // Check that there is no implicit control flow instructions above our load in
1295 // its block. If there is an instruction that doesn't always pass the
1296 // execution to the following instruction, then moving through it may become
1297 // invalid. For example:
1298 //
1299 // int arr[LEN];
1300 // int index = ???;
1301 // ...
1302 // guard(0 <= index && index < LEN);
1303 // use(arr[index]);
1304 //
1305 // It is illegal to move the array access to any point above the guard,
1306 // because if the index is out of bounds we should deoptimize rather than
1307 // access the array.
1308 // Check that there is no guard in this block above our instruction.
1309 bool MustEnsureSafetyOfSpeculativeExecution =
1310 ICF->isDominatedByICFIFromSameBlock(Load);
1311
1312 while (TmpBB->getSinglePredecessor()) {
1313 TmpBB = TmpBB->getSinglePredecessor();
1314 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1315 return false;
1316 if (Blockers.count(TmpBB))
1317 return false;
1318
1319 // If any of these blocks has more than one successor (i.e. if the edge we
1320 // just traversed was critical), then there are other paths through this
1321 // block along which the load may not be anticipated. Hoisting the load
1322 // above this block would be adding the load to execution paths along
1323 // which it was not previously executed.
1324 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1325 return false;
1326
1327 // Check that there is no implicit control flow in a block above.
1328 MustEnsureSafetyOfSpeculativeExecution =
1329 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1330 }
1331
1332 assert(TmpBB);
1333 LoadBB = TmpBB;
1334
1335 // Check to see how many predecessors have the loaded value fully
1336 // available.
1337 MapVector<BasicBlock *, Value *> PredLoads;
1338 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1339 for (const AvailableValueInBlock &AV : ValuesPerBlock)
1340 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1341 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1342 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1343
1344 SmallVector<BasicBlock *, 4> CriticalEdgePred;
1345 for (BasicBlock *Pred : predecessors(LoadBB)) {
1346 // If any predecessor block is an EH pad that does not allow non-PHI
1347 // instructions before the terminator, we can't PRE the load.
1348 if (Pred->getTerminator()->isEHPad()) {
1349 LLVM_DEBUG(
1350 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1351 << Pred->getName() << "': " << *Load << '\n');
1352 return false;
1353 }
1354
1355 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1356 continue;
1357 }
1358
1359 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1360 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1361 LLVM_DEBUG(
1362 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1363 << Pred->getName() << "': " << *Load << '\n');
1364 return false;
1365 }
1366
1367 // FIXME: Can we support the fallthrough edge?
1368 if (isa<CallBrInst>(Pred->getTerminator())) {
1369 LLVM_DEBUG(
1370 dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '"
1371 << Pred->getName() << "': " << *Load << '\n');
1372 return false;
1373 }
1374
1375 if (LoadBB->isEHPad()) {
1376 LLVM_DEBUG(
1377 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1378 << Pred->getName() << "': " << *Load << '\n');
1379 return false;
1380 }
1381
1382 // Do not split backedge as it will break the canonical loop form.
1383 if (!isLoadPRESplitBackedgeEnabled())
1384 if (DT->dominates(LoadBB, Pred)) {
1385 LLVM_DEBUG(
1386 dbgs()
1387 << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1388 << Pred->getName() << "': " << *Load << '\n');
1389 return false;
1390 }
1391
1392 CriticalEdgePred.push_back(Pred);
1393 } else {
1394 // Only add the predecessors that will not be split for now.
1395 PredLoads[Pred] = nullptr;
1396 }
1397 }
1398
1399 // Decide whether PRE is profitable for this load.
1400 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1401 assert(NumUnavailablePreds != 0 &&
1402 "Fully available value should already be eliminated!");
1403
1404 // If this load is unavailable in multiple predecessors, reject it.
1405 // FIXME: If we could restructure the CFG, we could make a common pred with
1406 // all the preds that don't have an available Load and insert a new load into
1407 // that one block.
1408 if (NumUnavailablePreds != 1)
1409 return false;
1410
1411 // Now we know where we will insert load. We must ensure that it is safe
1412 // to speculatively execute the load at that points.
1413 if (MustEnsureSafetyOfSpeculativeExecution) {
1414 if (CriticalEdgePred.size())
1415 if (!isSafeToSpeculativelyExecute(Load, LoadBB->getFirstNonPHI(), DT))
1416 return false;
1417 for (auto &PL : PredLoads)
1418 if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), DT))
1419 return false;
1420 }
1421
1422 // Split critical edges, and update the unavailable predecessors accordingly.
1423 for (BasicBlock *OrigPred : CriticalEdgePred) {
1424 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1425 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1426 PredLoads[NewPred] = nullptr;
1427 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1428 << LoadBB->getName() << '\n');
1429 }
1430
1431 // Check if the load can safely be moved to all the unavailable predecessors.
1432 bool CanDoPRE = true;
1433 const DataLayout &DL = Load->getModule()->getDataLayout();
1434 SmallVector<Instruction*, 8> NewInsts;
1435 for (auto &PredLoad : PredLoads) {
1436 BasicBlock *UnavailablePred = PredLoad.first;
1437
1438 // Do PHI translation to get its value in the predecessor if necessary. The
1439 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1440 // We do the translation for each edge we skipped by going from Load's block
1441 // to LoadBB, otherwise we might miss pieces needing translation.
1442
1443 // If all preds have a single successor, then we know it is safe to insert
1444 // the load on the pred (?!?), so we can insert code to materialize the
1445 // pointer if it is not available.
1446 Value *LoadPtr = Load->getPointerOperand();
1447 BasicBlock *Cur = Load->getParent();
1448 while (Cur != LoadBB) {
1449 PHITransAddr Address(LoadPtr, DL, AC);
1450 LoadPtr = Address.PHITranslateWithInsertion(
1451 Cur, Cur->getSinglePredecessor(), *DT, NewInsts);
1452 if (!LoadPtr) {
1453 CanDoPRE = false;
1454 break;
1455 }
1456 Cur = Cur->getSinglePredecessor();
1457 }
1458
1459 if (LoadPtr) {
1460 PHITransAddr Address(LoadPtr, DL, AC);
1461 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT,
1462 NewInsts);
1463 }
1464 // If we couldn't find or insert a computation of this phi translated value,
1465 // we fail PRE.
1466 if (!LoadPtr) {
1467 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1468 << *Load->getPointerOperand() << "\n");
1469 CanDoPRE = false;
1470 break;
1471 }
1472
1473 PredLoad.second = LoadPtr;
1474 }
1475
1476 if (!CanDoPRE) {
1477 while (!NewInsts.empty()) {
1478 // Erase instructions generated by the failed PHI translation before
1479 // trying to number them. PHI translation might insert instructions
1480 // in basic blocks other than the current one, and we delete them
1481 // directly, as markInstructionForDeletion only allows removing from the
1482 // current basic block.
1483 NewInsts.pop_back_val()->eraseFromParent();
1484 }
1485 // HINT: Don't revert the edge-splitting as following transformation may
1486 // also need to split these critical edges.
1487 return !CriticalEdgePred.empty();
1488 }
1489
1490 // Okay, we can eliminate this load by inserting a reload in the predecessor
1491 // and using PHI construction to get the value in the other predecessors, do
1492 // it.
1493 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1494 LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
1495 << " INSTS: " << *NewInsts.back()
1496 << '\n');
1497
1498 // Assign value numbers to the new instructions.
1499 for (Instruction *I : NewInsts) {
1500 // Instructions that have been inserted in predecessor(s) to materialize
1501 // the load address do not retain their original debug locations. Doing
1502 // so could lead to confusing (but correct) source attributions.
1503 I->updateLocationAfterHoist();
1504
1505 // FIXME: We really _ought_ to insert these value numbers into their
1506 // parent's availability map. However, in doing so, we risk getting into
1507 // ordering issues. If a block hasn't been processed yet, we would be
1508 // marking a value as AVAIL-IN, which isn't what we intend.
1509 VN.lookupOrAdd(I);
1510 }
1511
1512 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads);
1513 ++NumPRELoad;
1514 return true;
1515 }
1516
performLoopLoadPRE(LoadInst * Load,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1517 bool GVN::performLoopLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1518 UnavailBlkVect &UnavailableBlocks) {
1519 if (!LI)
1520 return false;
1521
1522 const Loop *L = LI->getLoopFor(Load->getParent());
1523 // TODO: Generalize to other loop blocks that dominate the latch.
1524 if (!L || L->getHeader() != Load->getParent())
1525 return false;
1526
1527 BasicBlock *Preheader = L->getLoopPreheader();
1528 BasicBlock *Latch = L->getLoopLatch();
1529 if (!Preheader || !Latch)
1530 return false;
1531
1532 Value *LoadPtr = Load->getPointerOperand();
1533 // Must be available in preheader.
1534 if (!L->isLoopInvariant(LoadPtr))
1535 return false;
1536
1537 // We plan to hoist the load to preheader without introducing a new fault.
1538 // In order to do it, we need to prove that we cannot side-exit the loop
1539 // once loop header is first entered before execution of the load.
1540 if (ICF->isDominatedByICFIFromSameBlock(Load))
1541 return false;
1542
1543 BasicBlock *LoopBlock = nullptr;
1544 for (auto *Blocker : UnavailableBlocks) {
1545 // Blockers from outside the loop are handled in preheader.
1546 if (!L->contains(Blocker))
1547 continue;
1548
1549 // Only allow one loop block. Loop header is not less frequently executed
1550 // than each loop block, and likely it is much more frequently executed. But
1551 // in case of multiple loop blocks, we need extra information (such as block
1552 // frequency info) to understand whether it is profitable to PRE into
1553 // multiple loop blocks.
1554 if (LoopBlock)
1555 return false;
1556
1557 // Do not sink into inner loops. This may be non-profitable.
1558 if (L != LI->getLoopFor(Blocker))
1559 return false;
1560
1561 // Blocks that dominate the latch execute on every single iteration, maybe
1562 // except the last one. So PREing into these blocks doesn't make much sense
1563 // in most cases. But the blocks that do not necessarily execute on each
1564 // iteration are sometimes much colder than the header, and this is when
1565 // PRE is potentially profitable.
1566 if (DT->dominates(Blocker, Latch))
1567 return false;
1568
1569 // Make sure that the terminator itself doesn't clobber.
1570 if (Blocker->getTerminator()->mayWriteToMemory())
1571 return false;
1572
1573 LoopBlock = Blocker;
1574 }
1575
1576 if (!LoopBlock)
1577 return false;
1578
1579 // Make sure the memory at this pointer cannot be freed, therefore we can
1580 // safely reload from it after clobber.
1581 if (LoadPtr->canBeFreed())
1582 return false;
1583
1584 // TODO: Support critical edge splitting if blocker has more than 1 successor.
1585 MapVector<BasicBlock *, Value *> AvailableLoads;
1586 AvailableLoads[LoopBlock] = LoadPtr;
1587 AvailableLoads[Preheader] = LoadPtr;
1588
1589 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1590 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads);
1591 ++NumPRELoopLoad;
1592 return true;
1593 }
1594
reportLoadElim(LoadInst * Load,Value * AvailableValue,OptimizationRemarkEmitter * ORE)1595 static void reportLoadElim(LoadInst *Load, Value *AvailableValue,
1596 OptimizationRemarkEmitter *ORE) {
1597 using namespace ore;
1598
1599 ORE->emit([&]() {
1600 return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load)
1601 << "load of type " << NV("Type", Load->getType()) << " eliminated"
1602 << setExtraArgs() << " in favor of "
1603 << NV("InfavorOfValue", AvailableValue);
1604 });
1605 }
1606
1607 /// Attempt to eliminate a load whose dependencies are
1608 /// non-local by performing PHI construction.
processNonLocalLoad(LoadInst * Load)1609 bool GVN::processNonLocalLoad(LoadInst *Load) {
1610 // non-local speculations are not allowed under asan.
1611 if (Load->getParent()->getParent()->hasFnAttribute(
1612 Attribute::SanitizeAddress) ||
1613 Load->getParent()->getParent()->hasFnAttribute(
1614 Attribute::SanitizeHWAddress))
1615 return false;
1616
1617 // Step 1: Find the non-local dependencies of the load.
1618 LoadDepVect Deps;
1619 MD->getNonLocalPointerDependency(Load, Deps);
1620
1621 // If we had to process more than one hundred blocks to find the
1622 // dependencies, this load isn't worth worrying about. Optimizing
1623 // it will be too expensive.
1624 unsigned NumDeps = Deps.size();
1625 if (NumDeps > MaxNumDeps)
1626 return false;
1627
1628 // If we had a phi translation failure, we'll have a single entry which is a
1629 // clobber in the current block. Reject this early.
1630 if (NumDeps == 1 &&
1631 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1632 LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs());
1633 dbgs() << " has unknown dependencies\n";);
1634 return false;
1635 }
1636
1637 bool Changed = false;
1638 // If this load follows a GEP, see if we can PRE the indices before analyzing.
1639 if (GetElementPtrInst *GEP =
1640 dyn_cast<GetElementPtrInst>(Load->getOperand(0))) {
1641 for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(),
1642 OE = GEP->idx_end();
1643 OI != OE; ++OI)
1644 if (Instruction *I = dyn_cast<Instruction>(OI->get()))
1645 Changed |= performScalarPRE(I);
1646 }
1647
1648 // Step 2: Analyze the availability of the load
1649 AvailValInBlkVect ValuesPerBlock;
1650 UnavailBlkVect UnavailableBlocks;
1651 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1652
1653 // If we have no predecessors that produce a known value for this load, exit
1654 // early.
1655 if (ValuesPerBlock.empty())
1656 return Changed;
1657
1658 // Step 3: Eliminate fully redundancy.
1659 //
1660 // If all of the instructions we depend on produce a known value for this
1661 // load, then it is fully redundant and we can use PHI insertion to compute
1662 // its value. Insert PHIs and remove the fully redundant value now.
1663 if (UnavailableBlocks.empty()) {
1664 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
1665
1666 // Perform PHI construction.
1667 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1668 Load->replaceAllUsesWith(V);
1669
1670 if (isa<PHINode>(V))
1671 V->takeName(Load);
1672 if (Instruction *I = dyn_cast<Instruction>(V))
1673 // If instruction I has debug info, then we should not update it.
1674 // Also, if I has a null DebugLoc, then it is still potentially incorrect
1675 // to propagate Load's DebugLoc because Load may not post-dominate I.
1676 if (Load->getDebugLoc() && Load->getParent() == I->getParent())
1677 I->setDebugLoc(Load->getDebugLoc());
1678 if (V->getType()->isPtrOrPtrVectorTy())
1679 MD->invalidateCachedPointerInfo(V);
1680 markInstructionForDeletion(Load);
1681 ++NumGVNLoad;
1682 reportLoadElim(Load, V, ORE);
1683 return true;
1684 }
1685
1686 // Step 4: Eliminate partial redundancy.
1687 if (!isPREEnabled() || !isLoadPREEnabled())
1688 return Changed;
1689 if (!isLoadInLoopPREEnabled() && LI && LI->getLoopFor(Load->getParent()))
1690 return Changed;
1691
1692 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1693 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
1694 return true;
1695
1696 return Changed;
1697 }
1698
impliesEquivalanceIfTrue(CmpInst * Cmp)1699 static bool impliesEquivalanceIfTrue(CmpInst* Cmp) {
1700 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1701 return true;
1702
1703 // Floating point comparisons can be equal, but not equivalent. Cases:
1704 // NaNs for unordered operators
1705 // +0.0 vs 0.0 for all operators
1706 if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1707 (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1708 Cmp->getFastMathFlags().noNaNs())) {
1709 Value *LHS = Cmp->getOperand(0);
1710 Value *RHS = Cmp->getOperand(1);
1711 // If we can prove either side non-zero, then equality must imply
1712 // equivalence.
1713 // FIXME: We should do this optimization if 'no signed zeros' is
1714 // applicable via an instruction-level fast-math-flag or some other
1715 // indicator that relaxed FP semantics are being used.
1716 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1717 return true;
1718 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1719 return true;;
1720 // TODO: Handle vector floating point constants
1721 }
1722 return false;
1723 }
1724
impliesEquivalanceIfFalse(CmpInst * Cmp)1725 static bool impliesEquivalanceIfFalse(CmpInst* Cmp) {
1726 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1727 return true;
1728
1729 // Floating point comparisons can be equal, but not equivelent. Cases:
1730 // NaNs for unordered operators
1731 // +0.0 vs 0.0 for all operators
1732 if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1733 Cmp->getFastMathFlags().noNaNs()) ||
1734 Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1735 Value *LHS = Cmp->getOperand(0);
1736 Value *RHS = Cmp->getOperand(1);
1737 // If we can prove either side non-zero, then equality must imply
1738 // equivalence.
1739 // FIXME: We should do this optimization if 'no signed zeros' is
1740 // applicable via an instruction-level fast-math-flag or some other
1741 // indicator that relaxed FP semantics are being used.
1742 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1743 return true;
1744 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1745 return true;;
1746 // TODO: Handle vector floating point constants
1747 }
1748 return false;
1749 }
1750
1751
hasUsersIn(Value * V,BasicBlock * BB)1752 static bool hasUsersIn(Value *V, BasicBlock *BB) {
1753 for (User *U : V->users())
1754 if (isa<Instruction>(U) &&
1755 cast<Instruction>(U)->getParent() == BB)
1756 return true;
1757 return false;
1758 }
1759
processAssumeIntrinsic(AssumeInst * IntrinsicI)1760 bool GVN::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
1761 Value *V = IntrinsicI->getArgOperand(0);
1762
1763 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1764 if (Cond->isZero()) {
1765 Type *Int8Ty = Type::getInt8Ty(V->getContext());
1766 // Insert a new store to null instruction before the load to indicate that
1767 // this code is not reachable. FIXME: We could insert unreachable
1768 // instruction directly because we can modify the CFG.
1769 auto *NewS = new StoreInst(UndefValue::get(Int8Ty),
1770 Constant::getNullValue(Int8Ty->getPointerTo()),
1771 IntrinsicI);
1772 if (MSSAU) {
1773 const MemoryUseOrDef *FirstNonDom = nullptr;
1774 const auto *AL =
1775 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
1776
1777 // If there are accesses in the current basic block, find the first one
1778 // that does not come before NewS. The new memory access is inserted
1779 // after the found access or before the terminator if no such access is
1780 // found.
1781 if (AL) {
1782 for (auto &Acc : *AL) {
1783 if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
1784 if (!Current->getMemoryInst()->comesBefore(NewS)) {
1785 FirstNonDom = Current;
1786 break;
1787 }
1788 }
1789 }
1790
1791 // This added store is to null, so it will never executed and we can
1792 // just use the LiveOnEntry def as defining access.
1793 auto *NewDef =
1794 FirstNonDom ? MSSAU->createMemoryAccessBefore(
1795 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1796 const_cast<MemoryUseOrDef *>(FirstNonDom))
1797 : MSSAU->createMemoryAccessInBB(
1798 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1799 NewS->getParent(), MemorySSA::BeforeTerminator);
1800
1801 MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
1802 }
1803 }
1804 if (isAssumeWithEmptyBundle(*IntrinsicI))
1805 markInstructionForDeletion(IntrinsicI);
1806 return false;
1807 } else if (isa<Constant>(V)) {
1808 // If it's not false, and constant, it must evaluate to true. This means our
1809 // assume is assume(true), and thus, pointless, and we don't want to do
1810 // anything more here.
1811 return false;
1812 }
1813
1814 Constant *True = ConstantInt::getTrue(V->getContext());
1815 bool Changed = false;
1816
1817 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1818 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1819
1820 // This property is only true in dominated successors, propagateEquality
1821 // will check dominance for us.
1822 Changed |= propagateEquality(V, True, Edge, false);
1823 }
1824
1825 // We can replace assume value with true, which covers cases like this:
1826 // call void @llvm.assume(i1 %cmp)
1827 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1828 ReplaceOperandsWithMap[V] = True;
1829
1830 // Similarly, after assume(!NotV) we know that NotV == false.
1831 Value *NotV;
1832 if (match(V, m_Not(m_Value(NotV))))
1833 ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
1834
1835 // If we find an equality fact, canonicalize all dominated uses in this block
1836 // to one of the two values. We heuristically choice the "oldest" of the
1837 // two where age is determined by value number. (Note that propagateEquality
1838 // above handles the cross block case.)
1839 //
1840 // Key case to cover are:
1841 // 1)
1842 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1843 // call void @llvm.assume(i1 %cmp)
1844 // ret float %0 ; will change it to ret float 3.000000e+00
1845 // 2)
1846 // %load = load float, float* %addr
1847 // %cmp = fcmp oeq float %load, %0
1848 // call void @llvm.assume(i1 %cmp)
1849 // ret float %load ; will change it to ret float %0
1850 if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1851 if (impliesEquivalanceIfTrue(CmpI)) {
1852 Value *CmpLHS = CmpI->getOperand(0);
1853 Value *CmpRHS = CmpI->getOperand(1);
1854 // Heuristically pick the better replacement -- the choice of heuristic
1855 // isn't terribly important here, but the fact we canonicalize on some
1856 // replacement is for exposing other simplifications.
1857 // TODO: pull this out as a helper function and reuse w/existing
1858 // (slightly different) logic.
1859 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1860 std::swap(CmpLHS, CmpRHS);
1861 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1862 std::swap(CmpLHS, CmpRHS);
1863 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1864 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
1865 // Move the 'oldest' value to the right-hand side, using the value
1866 // number as a proxy for age.
1867 uint32_t LVN = VN.lookupOrAdd(CmpLHS);
1868 uint32_t RVN = VN.lookupOrAdd(CmpRHS);
1869 if (LVN < RVN)
1870 std::swap(CmpLHS, CmpRHS);
1871 }
1872
1873 // Handle degenerate case where we either haven't pruned a dead path or a
1874 // removed a trivial assume yet.
1875 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
1876 return Changed;
1877
1878 LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
1879 << *CmpLHS << " with "
1880 << *CmpRHS << " in block "
1881 << IntrinsicI->getParent()->getName() << "\n");
1882
1883
1884 // Setup the replacement map - this handles uses within the same block
1885 if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
1886 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
1887
1888 // NOTE: The non-block local cases are handled by the call to
1889 // propagateEquality above; this block is just about handling the block
1890 // local cases. TODO: There's a bunch of logic in propagateEqualiy which
1891 // isn't duplicated for the block local case, can we share it somehow?
1892 }
1893 }
1894 return Changed;
1895 }
1896
patchAndReplaceAllUsesWith(Instruction * I,Value * Repl)1897 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
1898 patchReplacementInstruction(I, Repl);
1899 I->replaceAllUsesWith(Repl);
1900 }
1901
1902 /// Attempt to eliminate a load, first by eliminating it
1903 /// locally, and then attempting non-local elimination if that fails.
processLoad(LoadInst * L)1904 bool GVN::processLoad(LoadInst *L) {
1905 if (!MD)
1906 return false;
1907
1908 // This code hasn't been audited for ordered or volatile memory access
1909 if (!L->isUnordered())
1910 return false;
1911
1912 if (L->use_empty()) {
1913 markInstructionForDeletion(L);
1914 return true;
1915 }
1916
1917 // ... to a pointer that has been loaded from before...
1918 MemDepResult Dep = MD->getDependency(L);
1919
1920 // If it is defined in another block, try harder.
1921 if (Dep.isNonLocal())
1922 return processNonLocalLoad(L);
1923
1924 // Only handle the local case below
1925 if (!Dep.isDef() && !Dep.isClobber()) {
1926 // This might be a NonFuncLocal or an Unknown
1927 LLVM_DEBUG(
1928 // fast print dep, using operator<< on instruction is too slow.
1929 dbgs() << "GVN: load "; L->printAsOperand(dbgs());
1930 dbgs() << " has unknown dependence\n";);
1931 return false;
1932 }
1933
1934 AvailableValue AV;
1935 if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) {
1936 Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
1937
1938 // Replace the load!
1939 patchAndReplaceAllUsesWith(L, AvailableValue);
1940 markInstructionForDeletion(L);
1941 if (MSSAU)
1942 MSSAU->removeMemoryAccess(L);
1943 ++NumGVNLoad;
1944 reportLoadElim(L, AvailableValue, ORE);
1945 // Tell MDA to reexamine the reused pointer since we might have more
1946 // information after forwarding it.
1947 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
1948 MD->invalidateCachedPointerInfo(AvailableValue);
1949 return true;
1950 }
1951
1952 return false;
1953 }
1954
1955 /// Return a pair the first field showing the value number of \p Exp and the
1956 /// second field showing whether it is a value number newly created.
1957 std::pair<uint32_t, bool>
assignExpNewValueNum(Expression & Exp)1958 GVN::ValueTable::assignExpNewValueNum(Expression &Exp) {
1959 uint32_t &e = expressionNumbering[Exp];
1960 bool CreateNewValNum = !e;
1961 if (CreateNewValNum) {
1962 Expressions.push_back(Exp);
1963 if (ExprIdx.size() < nextValueNumber + 1)
1964 ExprIdx.resize(nextValueNumber * 2);
1965 e = nextValueNumber;
1966 ExprIdx[nextValueNumber++] = nextExprNumber++;
1967 }
1968 return {e, CreateNewValNum};
1969 }
1970
1971 /// Return whether all the values related with the same \p num are
1972 /// defined in \p BB.
areAllValsInBB(uint32_t Num,const BasicBlock * BB,GVN & Gvn)1973 bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
1974 GVN &Gvn) {
1975 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1976 while (Vals && Vals->BB == BB)
1977 Vals = Vals->Next;
1978 return !Vals;
1979 }
1980
1981 /// Wrap phiTranslateImpl to provide caching functionality.
phiTranslate(const BasicBlock * Pred,const BasicBlock * PhiBlock,uint32_t Num,GVN & Gvn)1982 uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred,
1983 const BasicBlock *PhiBlock, uint32_t Num,
1984 GVN &Gvn) {
1985 auto FindRes = PhiTranslateTable.find({Num, Pred});
1986 if (FindRes != PhiTranslateTable.end())
1987 return FindRes->second;
1988 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
1989 PhiTranslateTable.insert({{Num, Pred}, NewNum});
1990 return NewNum;
1991 }
1992
1993 // Return true if the value number \p Num and NewNum have equal value.
1994 // Return false if the result is unknown.
areCallValsEqual(uint32_t Num,uint32_t NewNum,const BasicBlock * Pred,const BasicBlock * PhiBlock,GVN & Gvn)1995 bool GVN::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
1996 const BasicBlock *Pred,
1997 const BasicBlock *PhiBlock, GVN &Gvn) {
1998 CallInst *Call = nullptr;
1999 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2000 while (Vals) {
2001 Call = dyn_cast<CallInst>(Vals->Val);
2002 if (Call && Call->getParent() == PhiBlock)
2003 break;
2004 Vals = Vals->Next;
2005 }
2006
2007 if (AA->doesNotAccessMemory(Call))
2008 return true;
2009
2010 if (!MD || !AA->onlyReadsMemory(Call))
2011 return false;
2012
2013 MemDepResult local_dep = MD->getDependency(Call);
2014 if (!local_dep.isNonLocal())
2015 return false;
2016
2017 const MemoryDependenceResults::NonLocalDepInfo &deps =
2018 MD->getNonLocalCallDependency(Call);
2019
2020 // Check to see if the Call has no function local clobber.
2021 for (const NonLocalDepEntry &D : deps) {
2022 if (D.getResult().isNonFuncLocal())
2023 return true;
2024 }
2025 return false;
2026 }
2027
2028 /// Translate value number \p Num using phis, so that it has the values of
2029 /// the phis in BB.
phiTranslateImpl(const BasicBlock * Pred,const BasicBlock * PhiBlock,uint32_t Num,GVN & Gvn)2030 uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2031 const BasicBlock *PhiBlock,
2032 uint32_t Num, GVN &Gvn) {
2033 if (PHINode *PN = NumberingPhi[Num]) {
2034 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2035 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2036 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
2037 return TransVal;
2038 }
2039 return Num;
2040 }
2041
2042 // If there is any value related with Num is defined in a BB other than
2043 // PhiBlock, it cannot depend on a phi in PhiBlock without going through
2044 // a backedge. We can do an early exit in that case to save compile time.
2045 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2046 return Num;
2047
2048 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2049 return Num;
2050 Expression Exp = Expressions[ExprIdx[Num]];
2051
2052 for (unsigned i = 0; i < Exp.varargs.size(); i++) {
2053 // For InsertValue and ExtractValue, some varargs are index numbers
2054 // instead of value numbers. Those index numbers should not be
2055 // translated.
2056 if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
2057 (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
2058 (i > 1 && Exp.opcode == Instruction::ShuffleVector))
2059 continue;
2060 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
2061 }
2062
2063 if (Exp.commutative) {
2064 assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
2065 if (Exp.varargs[0] > Exp.varargs[1]) {
2066 std::swap(Exp.varargs[0], Exp.varargs[1]);
2067 uint32_t Opcode = Exp.opcode >> 8;
2068 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2069 Exp.opcode = (Opcode << 8) |
2070 CmpInst::getSwappedPredicate(
2071 static_cast<CmpInst::Predicate>(Exp.opcode & 255));
2072 }
2073 }
2074
2075 if (uint32_t NewNum = expressionNumbering[Exp]) {
2076 if (Exp.opcode == Instruction::Call && NewNum != Num)
2077 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2078 return NewNum;
2079 }
2080 return Num;
2081 }
2082
2083 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed
2084 /// again.
eraseTranslateCacheEntry(uint32_t Num,const BasicBlock & CurrBlock)2085 void GVN::ValueTable::eraseTranslateCacheEntry(uint32_t Num,
2086 const BasicBlock &CurrBlock) {
2087 for (const BasicBlock *Pred : predecessors(&CurrBlock))
2088 PhiTranslateTable.erase({Num, Pred});
2089 }
2090
2091 // In order to find a leader for a given value number at a
2092 // specific basic block, we first obtain the list of all Values for that number,
2093 // and then scan the list to find one whose block dominates the block in
2094 // question. This is fast because dominator tree queries consist of only
2095 // a few comparisons of DFS numbers.
findLeader(const BasicBlock * BB,uint32_t num)2096 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
2097 LeaderTableEntry Vals = LeaderTable[num];
2098 if (!Vals.Val) return nullptr;
2099
2100 Value *Val = nullptr;
2101 if (DT->dominates(Vals.BB, BB)) {
2102 Val = Vals.Val;
2103 if (isa<Constant>(Val)) return Val;
2104 }
2105
2106 LeaderTableEntry* Next = Vals.Next;
2107 while (Next) {
2108 if (DT->dominates(Next->BB, BB)) {
2109 if (isa<Constant>(Next->Val)) return Next->Val;
2110 if (!Val) Val = Next->Val;
2111 }
2112
2113 Next = Next->Next;
2114 }
2115
2116 return Val;
2117 }
2118
2119 /// There is an edge from 'Src' to 'Dst'. Return
2120 /// true if every path from the entry block to 'Dst' passes via this edge. In
2121 /// particular 'Dst' must not be reachable via another edge from 'Src'.
isOnlyReachableViaThisEdge(const BasicBlockEdge & E,DominatorTree * DT)2122 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
2123 DominatorTree *DT) {
2124 // While in theory it is interesting to consider the case in which Dst has
2125 // more than one predecessor, because Dst might be part of a loop which is
2126 // only reachable from Src, in practice it is pointless since at the time
2127 // GVN runs all such loops have preheaders, which means that Dst will have
2128 // been changed to have only one predecessor, namely Src.
2129 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2130 assert((!Pred || Pred == E.getStart()) &&
2131 "No edge between these basic blocks!");
2132 return Pred != nullptr;
2133 }
2134
assignBlockRPONumber(Function & F)2135 void GVN::assignBlockRPONumber(Function &F) {
2136 BlockRPONumber.clear();
2137 uint32_t NextBlockNumber = 1;
2138 ReversePostOrderTraversal<Function *> RPOT(&F);
2139 for (BasicBlock *BB : RPOT)
2140 BlockRPONumber[BB] = NextBlockNumber++;
2141 InvalidBlockRPONumbers = false;
2142 }
2143
replaceOperandsForInBlockEquality(Instruction * Instr) const2144 bool GVN::replaceOperandsForInBlockEquality(Instruction *Instr) const {
2145 bool Changed = false;
2146 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
2147 Value *Operand = Instr->getOperand(OpNum);
2148 auto it = ReplaceOperandsWithMap.find(Operand);
2149 if (it != ReplaceOperandsWithMap.end()) {
2150 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
2151 << *it->second << " in instruction " << *Instr << '\n');
2152 Instr->setOperand(OpNum, it->second);
2153 Changed = true;
2154 }
2155 }
2156 return Changed;
2157 }
2158
2159 /// The given values are known to be equal in every block
2160 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
2161 /// 'RHS' everywhere in the scope. Returns whether a change was made.
2162 /// If DominatesByEdge is false, then it means that we will propagate the RHS
2163 /// value starting from the end of Root.Start.
propagateEquality(Value * LHS,Value * RHS,const BasicBlockEdge & Root,bool DominatesByEdge)2164 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
2165 bool DominatesByEdge) {
2166 SmallVector<std::pair<Value*, Value*>, 4> Worklist;
2167 Worklist.push_back(std::make_pair(LHS, RHS));
2168 bool Changed = false;
2169 // For speed, compute a conservative fast approximation to
2170 // DT->dominates(Root, Root.getEnd());
2171 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2172
2173 while (!Worklist.empty()) {
2174 std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2175 LHS = Item.first; RHS = Item.second;
2176
2177 if (LHS == RHS)
2178 continue;
2179 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2180
2181 // Don't try to propagate equalities between constants.
2182 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2183 continue;
2184
2185 // Prefer a constant on the right-hand side, or an Argument if no constants.
2186 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2187 std::swap(LHS, RHS);
2188 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2189
2190 // If there is no obvious reason to prefer the left-hand side over the
2191 // right-hand side, ensure the longest lived term is on the right-hand side,
2192 // so the shortest lived term will be replaced by the longest lived.
2193 // This tends to expose more simplifications.
2194 uint32_t LVN = VN.lookupOrAdd(LHS);
2195 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2196 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2197 // Move the 'oldest' value to the right-hand side, using the value number
2198 // as a proxy for age.
2199 uint32_t RVN = VN.lookupOrAdd(RHS);
2200 if (LVN < RVN) {
2201 std::swap(LHS, RHS);
2202 LVN = RVN;
2203 }
2204 }
2205
2206 // If value numbering later sees that an instruction in the scope is equal
2207 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
2208 // the invariant that instructions only occur in the leader table for their
2209 // own value number (this is used by removeFromLeaderTable), do not do this
2210 // if RHS is an instruction (if an instruction in the scope is morphed into
2211 // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2212 // using the leader table is about compiling faster, not optimizing better).
2213 // The leader table only tracks basic blocks, not edges. Only add to if we
2214 // have the simple case where the edge dominates the end.
2215 if (RootDominatesEnd && !isa<Instruction>(RHS))
2216 addToLeaderTable(LVN, RHS, Root.getEnd());
2217
2218 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
2219 // LHS always has at least one use that is not dominated by Root, this will
2220 // never do anything if LHS has only one use.
2221 if (!LHS->hasOneUse()) {
2222 unsigned NumReplacements =
2223 DominatesByEdge
2224 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
2225 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2226
2227 Changed |= NumReplacements > 0;
2228 NumGVNEqProp += NumReplacements;
2229 // Cached information for anything that uses LHS will be invalid.
2230 if (MD)
2231 MD->invalidateCachedPointerInfo(LHS);
2232 }
2233
2234 // Now try to deduce additional equalities from this one. For example, if
2235 // the known equality was "(A != B)" == "false" then it follows that A and B
2236 // are equal in the scope. Only boolean equalities with an explicit true or
2237 // false RHS are currently supported.
2238 if (!RHS->getType()->isIntegerTy(1))
2239 // Not a boolean equality - bail out.
2240 continue;
2241 ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2242 if (!CI)
2243 // RHS neither 'true' nor 'false' - bail out.
2244 continue;
2245 // Whether RHS equals 'true'. Otherwise it equals 'false'.
2246 bool isKnownTrue = CI->isMinusOne();
2247 bool isKnownFalse = !isKnownTrue;
2248
2249 // If "A && B" is known true then both A and B are known true. If "A || B"
2250 // is known false then both A and B are known false.
2251 Value *A, *B;
2252 if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2253 (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2254 Worklist.push_back(std::make_pair(A, RHS));
2255 Worklist.push_back(std::make_pair(B, RHS));
2256 continue;
2257 }
2258
2259 // If we are propagating an equality like "(A == B)" == "true" then also
2260 // propagate the equality A == B. When propagating a comparison such as
2261 // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2262 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2263 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2264
2265 // If "A == B" is known true, or "A != B" is known false, then replace
2266 // A with B everywhere in the scope. For floating point operations, we
2267 // have to be careful since equality does not always imply equivalance.
2268 if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) ||
2269 (isKnownFalse && impliesEquivalanceIfFalse(Cmp)))
2270 Worklist.push_back(std::make_pair(Op0, Op1));
2271
2272 // If "A >= B" is known true, replace "A < B" with false everywhere.
2273 CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2274 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2275 // Since we don't have the instruction "A < B" immediately to hand, work
2276 // out the value number that it would have and use that to find an
2277 // appropriate instruction (if any).
2278 uint32_t NextNum = VN.getNextUnusedValueNumber();
2279 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2280 // If the number we were assigned was brand new then there is no point in
2281 // looking for an instruction realizing it: there cannot be one!
2282 if (Num < NextNum) {
2283 Value *NotCmp = findLeader(Root.getEnd(), Num);
2284 if (NotCmp && isa<Instruction>(NotCmp)) {
2285 unsigned NumReplacements =
2286 DominatesByEdge
2287 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2288 : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2289 Root.getStart());
2290 Changed |= NumReplacements > 0;
2291 NumGVNEqProp += NumReplacements;
2292 // Cached information for anything that uses NotCmp will be invalid.
2293 if (MD)
2294 MD->invalidateCachedPointerInfo(NotCmp);
2295 }
2296 }
2297 // Ensure that any instruction in scope that gets the "A < B" value number
2298 // is replaced with false.
2299 // The leader table only tracks basic blocks, not edges. Only add to if we
2300 // have the simple case where the edge dominates the end.
2301 if (RootDominatesEnd)
2302 addToLeaderTable(Num, NotVal, Root.getEnd());
2303
2304 continue;
2305 }
2306 }
2307
2308 return Changed;
2309 }
2310
2311 /// When calculating availability, handle an instruction
2312 /// by inserting it into the appropriate sets
processInstruction(Instruction * I)2313 bool GVN::processInstruction(Instruction *I) {
2314 // Ignore dbg info intrinsics.
2315 if (isa<DbgInfoIntrinsic>(I))
2316 return false;
2317
2318 // If the instruction can be easily simplified then do so now in preference
2319 // to value numbering it. Value numbering often exposes redundancies, for
2320 // example if it determines that %y is equal to %x then the instruction
2321 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2322 const DataLayout &DL = I->getModule()->getDataLayout();
2323 if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) {
2324 bool Changed = false;
2325 if (!I->use_empty()) {
2326 // Simplification can cause a special instruction to become not special.
2327 // For example, devirtualization to a willreturn function.
2328 ICF->removeUsersOf(I);
2329 I->replaceAllUsesWith(V);
2330 Changed = true;
2331 }
2332 if (isInstructionTriviallyDead(I, TLI)) {
2333 markInstructionForDeletion(I);
2334 Changed = true;
2335 }
2336 if (Changed) {
2337 if (MD && V->getType()->isPtrOrPtrVectorTy())
2338 MD->invalidateCachedPointerInfo(V);
2339 ++NumGVNSimpl;
2340 return true;
2341 }
2342 }
2343
2344 if (auto *Assume = dyn_cast<AssumeInst>(I))
2345 return processAssumeIntrinsic(Assume);
2346
2347 if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
2348 if (processLoad(Load))
2349 return true;
2350
2351 unsigned Num = VN.lookupOrAdd(Load);
2352 addToLeaderTable(Num, Load, Load->getParent());
2353 return false;
2354 }
2355
2356 // For conditional branches, we can perform simple conditional propagation on
2357 // the condition value itself.
2358 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2359 if (!BI->isConditional())
2360 return false;
2361
2362 if (isa<Constant>(BI->getCondition()))
2363 return processFoldableCondBr(BI);
2364
2365 Value *BranchCond = BI->getCondition();
2366 BasicBlock *TrueSucc = BI->getSuccessor(0);
2367 BasicBlock *FalseSucc = BI->getSuccessor(1);
2368 // Avoid multiple edges early.
2369 if (TrueSucc == FalseSucc)
2370 return false;
2371
2372 BasicBlock *Parent = BI->getParent();
2373 bool Changed = false;
2374
2375 Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
2376 BasicBlockEdge TrueE(Parent, TrueSucc);
2377 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2378
2379 Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
2380 BasicBlockEdge FalseE(Parent, FalseSucc);
2381 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2382
2383 return Changed;
2384 }
2385
2386 // For switches, propagate the case values into the case destinations.
2387 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2388 Value *SwitchCond = SI->getCondition();
2389 BasicBlock *Parent = SI->getParent();
2390 bool Changed = false;
2391
2392 // Remember how many outgoing edges there are to every successor.
2393 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2394 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2395 ++SwitchEdges[SI->getSuccessor(i)];
2396
2397 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2398 i != e; ++i) {
2399 BasicBlock *Dst = i->getCaseSuccessor();
2400 // If there is only a single edge, propagate the case value into it.
2401 if (SwitchEdges.lookup(Dst) == 1) {
2402 BasicBlockEdge E(Parent, Dst);
2403 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2404 }
2405 }
2406 return Changed;
2407 }
2408
2409 // Instructions with void type don't return a value, so there's
2410 // no point in trying to find redundancies in them.
2411 if (I->getType()->isVoidTy())
2412 return false;
2413
2414 uint32_t NextNum = VN.getNextUnusedValueNumber();
2415 unsigned Num = VN.lookupOrAdd(I);
2416
2417 // Allocations are always uniquely numbered, so we can save time and memory
2418 // by fast failing them.
2419 if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2420 addToLeaderTable(Num, I, I->getParent());
2421 return false;
2422 }
2423
2424 // If the number we were assigned was a brand new VN, then we don't
2425 // need to do a lookup to see if the number already exists
2426 // somewhere in the domtree: it can't!
2427 if (Num >= NextNum) {
2428 addToLeaderTable(Num, I, I->getParent());
2429 return false;
2430 }
2431
2432 // Perform fast-path value-number based elimination of values inherited from
2433 // dominators.
2434 Value *Repl = findLeader(I->getParent(), Num);
2435 if (!Repl) {
2436 // Failure, just remember this instance for future use.
2437 addToLeaderTable(Num, I, I->getParent());
2438 return false;
2439 } else if (Repl == I) {
2440 // If I was the result of a shortcut PRE, it might already be in the table
2441 // and the best replacement for itself. Nothing to do.
2442 return false;
2443 }
2444
2445 // Remove it!
2446 patchAndReplaceAllUsesWith(I, Repl);
2447 if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2448 MD->invalidateCachedPointerInfo(Repl);
2449 markInstructionForDeletion(I);
2450 return true;
2451 }
2452
2453 /// runOnFunction - This is the main transformation entry point for a function.
runImpl(Function & F,AssumptionCache & RunAC,DominatorTree & RunDT,const TargetLibraryInfo & RunTLI,AAResults & RunAA,MemoryDependenceResults * RunMD,LoopInfo * LI,OptimizationRemarkEmitter * RunORE,MemorySSA * MSSA)2454 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2455 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2456 MemoryDependenceResults *RunMD, LoopInfo *LI,
2457 OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2458 AC = &RunAC;
2459 DT = &RunDT;
2460 VN.setDomTree(DT);
2461 TLI = &RunTLI;
2462 VN.setAliasAnalysis(&RunAA);
2463 MD = RunMD;
2464 ImplicitControlFlowTracking ImplicitCFT;
2465 ICF = &ImplicitCFT;
2466 this->LI = LI;
2467 VN.setMemDep(MD);
2468 ORE = RunORE;
2469 InvalidBlockRPONumbers = true;
2470 MemorySSAUpdater Updater(MSSA);
2471 MSSAU = MSSA ? &Updater : nullptr;
2472
2473 bool Changed = false;
2474 bool ShouldContinue = true;
2475
2476 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2477 // Merge unconditional branches, allowing PRE to catch more
2478 // optimization opportunities.
2479 for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
2480 bool removedBlock = MergeBlockIntoPredecessor(&BB, &DTU, LI, MSSAU, MD);
2481 if (removedBlock)
2482 ++NumGVNBlocks;
2483
2484 Changed |= removedBlock;
2485 }
2486
2487 unsigned Iteration = 0;
2488 while (ShouldContinue) {
2489 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2490 ShouldContinue = iterateOnFunction(F);
2491 Changed |= ShouldContinue;
2492 ++Iteration;
2493 }
2494
2495 if (isPREEnabled()) {
2496 // Fabricate val-num for dead-code in order to suppress assertion in
2497 // performPRE().
2498 assignValNumForDeadCode();
2499 bool PREChanged = true;
2500 while (PREChanged) {
2501 PREChanged = performPRE(F);
2502 Changed |= PREChanged;
2503 }
2504 }
2505
2506 // FIXME: Should perform GVN again after PRE does something. PRE can move
2507 // computations into blocks where they become fully redundant. Note that
2508 // we can't do this until PRE's critical edge splitting updates memdep.
2509 // Actually, when this happens, we should just fully integrate PRE into GVN.
2510
2511 cleanupGlobalSets();
2512 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2513 // iteration.
2514 DeadBlocks.clear();
2515
2516 if (MSSA && VerifyMemorySSA)
2517 MSSA->verifyMemorySSA();
2518
2519 return Changed;
2520 }
2521
processBlock(BasicBlock * BB)2522 bool GVN::processBlock(BasicBlock *BB) {
2523 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2524 // (and incrementing BI before processing an instruction).
2525 assert(InstrsToErase.empty() &&
2526 "We expect InstrsToErase to be empty across iterations");
2527 if (DeadBlocks.count(BB))
2528 return false;
2529
2530 // Clearing map before every BB because it can be used only for single BB.
2531 ReplaceOperandsWithMap.clear();
2532 bool ChangedFunction = false;
2533
2534 // Since we may not have visited the input blocks of the phis, we can't
2535 // use our normal hash approach for phis. Instead, simply look for
2536 // obvious duplicates. The first pass of GVN will tend to create
2537 // identical phis, and the second or later passes can eliminate them.
2538 ChangedFunction |= EliminateDuplicatePHINodes(BB);
2539
2540 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2541 BI != BE;) {
2542 if (!ReplaceOperandsWithMap.empty())
2543 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2544 ChangedFunction |= processInstruction(&*BI);
2545
2546 if (InstrsToErase.empty()) {
2547 ++BI;
2548 continue;
2549 }
2550
2551 // If we need some instructions deleted, do it now.
2552 NumGVNInstr += InstrsToErase.size();
2553
2554 // Avoid iterator invalidation.
2555 bool AtStart = BI == BB->begin();
2556 if (!AtStart)
2557 --BI;
2558
2559 for (auto *I : InstrsToErase) {
2560 assert(I->getParent() == BB && "Removing instruction from wrong block?");
2561 LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2562 salvageKnowledge(I, AC);
2563 salvageDebugInfo(*I);
2564 if (MD) MD->removeInstruction(I);
2565 if (MSSAU)
2566 MSSAU->removeMemoryAccess(I);
2567 LLVM_DEBUG(verifyRemoved(I));
2568 ICF->removeInstruction(I);
2569 I->eraseFromParent();
2570 }
2571 InstrsToErase.clear();
2572
2573 if (AtStart)
2574 BI = BB->begin();
2575 else
2576 ++BI;
2577 }
2578
2579 return ChangedFunction;
2580 }
2581
2582 // Instantiate an expression in a predecessor that lacked it.
performScalarPREInsertion(Instruction * Instr,BasicBlock * Pred,BasicBlock * Curr,unsigned int ValNo)2583 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2584 BasicBlock *Curr, unsigned int ValNo) {
2585 // Because we are going top-down through the block, all value numbers
2586 // will be available in the predecessor by the time we need them. Any
2587 // that weren't originally present will have been instantiated earlier
2588 // in this loop.
2589 bool success = true;
2590 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2591 Value *Op = Instr->getOperand(i);
2592 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2593 continue;
2594 // This could be a newly inserted instruction, in which case, we won't
2595 // find a value number, and should give up before we hurt ourselves.
2596 // FIXME: Rewrite the infrastructure to let it easier to value number
2597 // and process newly inserted instructions.
2598 if (!VN.exists(Op)) {
2599 success = false;
2600 break;
2601 }
2602 uint32_t TValNo =
2603 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2604 if (Value *V = findLeader(Pred, TValNo)) {
2605 Instr->setOperand(i, V);
2606 } else {
2607 success = false;
2608 break;
2609 }
2610 }
2611
2612 // Fail out if we encounter an operand that is not available in
2613 // the PRE predecessor. This is typically because of loads which
2614 // are not value numbered precisely.
2615 if (!success)
2616 return false;
2617
2618 Instr->insertBefore(Pred->getTerminator());
2619 Instr->setName(Instr->getName() + ".pre");
2620 Instr->setDebugLoc(Instr->getDebugLoc());
2621
2622 ICF->insertInstructionTo(Instr, Pred);
2623
2624 unsigned Num = VN.lookupOrAdd(Instr);
2625 VN.add(Instr, Num);
2626
2627 // Update the availability map to include the new instruction.
2628 addToLeaderTable(Num, Instr, Pred);
2629 return true;
2630 }
2631
performScalarPRE(Instruction * CurInst)2632 bool GVN::performScalarPRE(Instruction *CurInst) {
2633 if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2634 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2635 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2636 isa<DbgInfoIntrinsic>(CurInst))
2637 return false;
2638
2639 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2640 // sinking the compare again, and it would force the code generator to
2641 // move the i1 from processor flags or predicate registers into a general
2642 // purpose register.
2643 if (isa<CmpInst>(CurInst))
2644 return false;
2645
2646 // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2647 // sinking the addressing mode computation back to its uses. Extending the
2648 // GEP's live range increases the register pressure, and therefore it can
2649 // introduce unnecessary spills.
2650 //
2651 // This doesn't prevent Load PRE. PHI translation will make the GEP available
2652 // to the load by moving it to the predecessor block if necessary.
2653 if (isa<GetElementPtrInst>(CurInst))
2654 return false;
2655
2656 if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2657 // We don't currently value number ANY inline asm calls.
2658 if (CallB->isInlineAsm())
2659 return false;
2660 // Don't do PRE on convergent calls.
2661 if (CallB->isConvergent())
2662 return false;
2663 }
2664
2665 uint32_t ValNo = VN.lookup(CurInst);
2666
2667 // Look for the predecessors for PRE opportunities. We're
2668 // only trying to solve the basic diamond case, where
2669 // a value is computed in the successor and one predecessor,
2670 // but not the other. We also explicitly disallow cases
2671 // where the successor is its own predecessor, because they're
2672 // more complicated to get right.
2673 unsigned NumWith = 0;
2674 unsigned NumWithout = 0;
2675 BasicBlock *PREPred = nullptr;
2676 BasicBlock *CurrentBlock = CurInst->getParent();
2677
2678 // Update the RPO numbers for this function.
2679 if (InvalidBlockRPONumbers)
2680 assignBlockRPONumber(*CurrentBlock->getParent());
2681
2682 SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
2683 for (BasicBlock *P : predecessors(CurrentBlock)) {
2684 // We're not interested in PRE where blocks with predecessors that are
2685 // not reachable.
2686 if (!DT->isReachableFromEntry(P)) {
2687 NumWithout = 2;
2688 break;
2689 }
2690 // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
2691 // when CurInst has operand defined in CurrentBlock (so it may be defined
2692 // by phi in the loop header).
2693 assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2694 "Invalid BlockRPONumber map.");
2695 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
2696 llvm::any_of(CurInst->operands(), [&](const Use &U) {
2697 if (auto *Inst = dyn_cast<Instruction>(U.get()))
2698 return Inst->getParent() == CurrentBlock;
2699 return false;
2700 })) {
2701 NumWithout = 2;
2702 break;
2703 }
2704
2705 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2706 Value *predV = findLeader(P, TValNo);
2707 if (!predV) {
2708 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2709 PREPred = P;
2710 ++NumWithout;
2711 } else if (predV == CurInst) {
2712 /* CurInst dominates this predecessor. */
2713 NumWithout = 2;
2714 break;
2715 } else {
2716 predMap.push_back(std::make_pair(predV, P));
2717 ++NumWith;
2718 }
2719 }
2720
2721 // Don't do PRE when it might increase code size, i.e. when
2722 // we would need to insert instructions in more than one pred.
2723 if (NumWithout > 1 || NumWith == 0)
2724 return false;
2725
2726 // We may have a case where all predecessors have the instruction,
2727 // and we just need to insert a phi node. Otherwise, perform
2728 // insertion.
2729 Instruction *PREInstr = nullptr;
2730
2731 if (NumWithout != 0) {
2732 if (!isSafeToSpeculativelyExecute(CurInst)) {
2733 // It is only valid to insert a new instruction if the current instruction
2734 // is always executed. An instruction with implicit control flow could
2735 // prevent us from doing it. If we cannot speculate the execution, then
2736 // PRE should be prohibited.
2737 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2738 return false;
2739 }
2740
2741 // Don't do PRE across indirect branch.
2742 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2743 return false;
2744
2745 // Don't do PRE across callbr.
2746 // FIXME: Can we do this across the fallthrough edge?
2747 if (isa<CallBrInst>(PREPred->getTerminator()))
2748 return false;
2749
2750 // We can't do PRE safely on a critical edge, so instead we schedule
2751 // the edge to be split and perform the PRE the next time we iterate
2752 // on the function.
2753 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2754 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2755 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2756 return false;
2757 }
2758 // We need to insert somewhere, so let's give it a shot
2759 PREInstr = CurInst->clone();
2760 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2761 // If we failed insertion, make sure we remove the instruction.
2762 LLVM_DEBUG(verifyRemoved(PREInstr));
2763 PREInstr->deleteValue();
2764 return false;
2765 }
2766 }
2767
2768 // Either we should have filled in the PRE instruction, or we should
2769 // not have needed insertions.
2770 assert(PREInstr != nullptr || NumWithout == 0);
2771
2772 ++NumGVNPRE;
2773
2774 // Create a PHI to make the value available in this block.
2775 PHINode *Phi =
2776 PHINode::Create(CurInst->getType(), predMap.size(),
2777 CurInst->getName() + ".pre-phi", &CurrentBlock->front());
2778 for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
2779 if (Value *V = predMap[i].first) {
2780 // If we use an existing value in this phi, we have to patch the original
2781 // value because the phi will be used to replace a later value.
2782 patchReplacementInstruction(CurInst, V);
2783 Phi->addIncoming(V, predMap[i].second);
2784 } else
2785 Phi->addIncoming(PREInstr, PREPred);
2786 }
2787
2788 VN.add(Phi, ValNo);
2789 // After creating a new PHI for ValNo, the phi translate result for ValNo will
2790 // be changed, so erase the related stale entries in phi translate cache.
2791 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
2792 addToLeaderTable(ValNo, Phi, CurrentBlock);
2793 Phi->setDebugLoc(CurInst->getDebugLoc());
2794 CurInst->replaceAllUsesWith(Phi);
2795 if (MD && Phi->getType()->isPtrOrPtrVectorTy())
2796 MD->invalidateCachedPointerInfo(Phi);
2797 VN.erase(CurInst);
2798 removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2799
2800 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2801 if (MD)
2802 MD->removeInstruction(CurInst);
2803 if (MSSAU)
2804 MSSAU->removeMemoryAccess(CurInst);
2805 LLVM_DEBUG(verifyRemoved(CurInst));
2806 // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
2807 // some assertion failures.
2808 ICF->removeInstruction(CurInst);
2809 CurInst->eraseFromParent();
2810 ++NumGVNInstr;
2811
2812 return true;
2813 }
2814
2815 /// Perform a purely local form of PRE that looks for diamond
2816 /// control flow patterns and attempts to perform simple PRE at the join point.
performPRE(Function & F)2817 bool GVN::performPRE(Function &F) {
2818 bool Changed = false;
2819 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
2820 // Nothing to PRE in the entry block.
2821 if (CurrentBlock == &F.getEntryBlock())
2822 continue;
2823
2824 // Don't perform PRE on an EH pad.
2825 if (CurrentBlock->isEHPad())
2826 continue;
2827
2828 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2829 BE = CurrentBlock->end();
2830 BI != BE;) {
2831 Instruction *CurInst = &*BI++;
2832 Changed |= performScalarPRE(CurInst);
2833 }
2834 }
2835
2836 if (splitCriticalEdges())
2837 Changed = true;
2838
2839 return Changed;
2840 }
2841
2842 /// Split the critical edge connecting the given two blocks, and return
2843 /// the block inserted to the critical edge.
splitCriticalEdges(BasicBlock * Pred,BasicBlock * Succ)2844 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
2845 // GVN does not require loop-simplify, do not try to preserve it if it is not
2846 // possible.
2847 BasicBlock *BB = SplitCriticalEdge(
2848 Pred, Succ,
2849 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
2850 if (BB) {
2851 if (MD)
2852 MD->invalidateCachedPredecessors();
2853 InvalidBlockRPONumbers = true;
2854 }
2855 return BB;
2856 }
2857
2858 /// Split critical edges found during the previous
2859 /// iteration that may enable further optimization.
splitCriticalEdges()2860 bool GVN::splitCriticalEdges() {
2861 if (toSplit.empty())
2862 return false;
2863
2864 bool Changed = false;
2865 do {
2866 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
2867 Changed |= SplitCriticalEdge(Edge.first, Edge.second,
2868 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
2869 nullptr;
2870 } while (!toSplit.empty());
2871 if (Changed) {
2872 if (MD)
2873 MD->invalidateCachedPredecessors();
2874 InvalidBlockRPONumbers = true;
2875 }
2876 return Changed;
2877 }
2878
2879 /// Executes one iteration of GVN
iterateOnFunction(Function & F)2880 bool GVN::iterateOnFunction(Function &F) {
2881 cleanupGlobalSets();
2882
2883 // Top-down walk of the dominator tree
2884 bool Changed = false;
2885 // Needed for value numbering with phi construction to work.
2886 // RPOT walks the graph in its constructor and will not be invalidated during
2887 // processBlock.
2888 ReversePostOrderTraversal<Function *> RPOT(&F);
2889
2890 for (BasicBlock *BB : RPOT)
2891 Changed |= processBlock(BB);
2892
2893 return Changed;
2894 }
2895
cleanupGlobalSets()2896 void GVN::cleanupGlobalSets() {
2897 VN.clear();
2898 LeaderTable.clear();
2899 BlockRPONumber.clear();
2900 TableAllocator.Reset();
2901 ICF->clear();
2902 InvalidBlockRPONumbers = true;
2903 }
2904
2905 /// Verify that the specified instruction does not occur in our
2906 /// internal data structures.
verifyRemoved(const Instruction * Inst) const2907 void GVN::verifyRemoved(const Instruction *Inst) const {
2908 VN.verifyRemoved(Inst);
2909
2910 // Walk through the value number scope to make sure the instruction isn't
2911 // ferreted away in it.
2912 for (const auto &I : LeaderTable) {
2913 const LeaderTableEntry *Node = &I.second;
2914 assert(Node->Val != Inst && "Inst still in value numbering scope!");
2915
2916 while (Node->Next) {
2917 Node = Node->Next;
2918 assert(Node->Val != Inst && "Inst still in value numbering scope!");
2919 }
2920 }
2921 }
2922
2923 /// BB is declared dead, which implied other blocks become dead as well. This
2924 /// function is to add all these blocks to "DeadBlocks". For the dead blocks'
2925 /// live successors, update their phi nodes by replacing the operands
2926 /// corresponding to dead blocks with UndefVal.
addDeadBlock(BasicBlock * BB)2927 void GVN::addDeadBlock(BasicBlock *BB) {
2928 SmallVector<BasicBlock *, 4> NewDead;
2929 SmallSetVector<BasicBlock *, 4> DF;
2930
2931 NewDead.push_back(BB);
2932 while (!NewDead.empty()) {
2933 BasicBlock *D = NewDead.pop_back_val();
2934 if (DeadBlocks.count(D))
2935 continue;
2936
2937 // All blocks dominated by D are dead.
2938 SmallVector<BasicBlock *, 8> Dom;
2939 DT->getDescendants(D, Dom);
2940 DeadBlocks.insert(Dom.begin(), Dom.end());
2941
2942 // Figure out the dominance-frontier(D).
2943 for (BasicBlock *B : Dom) {
2944 for (BasicBlock *S : successors(B)) {
2945 if (DeadBlocks.count(S))
2946 continue;
2947
2948 bool AllPredDead = true;
2949 for (BasicBlock *P : predecessors(S))
2950 if (!DeadBlocks.count(P)) {
2951 AllPredDead = false;
2952 break;
2953 }
2954
2955 if (!AllPredDead) {
2956 // S could be proved dead later on. That is why we don't update phi
2957 // operands at this moment.
2958 DF.insert(S);
2959 } else {
2960 // While S is not dominated by D, it is dead by now. This could take
2961 // place if S already have a dead predecessor before D is declared
2962 // dead.
2963 NewDead.push_back(S);
2964 }
2965 }
2966 }
2967 }
2968
2969 // For the dead blocks' live successors, update their phi nodes by replacing
2970 // the operands corresponding to dead blocks with UndefVal.
2971 for (BasicBlock *B : DF) {
2972 if (DeadBlocks.count(B))
2973 continue;
2974
2975 // First, split the critical edges. This might also create additional blocks
2976 // to preserve LoopSimplify form and adjust edges accordingly.
2977 SmallVector<BasicBlock *, 4> Preds(predecessors(B));
2978 for (BasicBlock *P : Preds) {
2979 if (!DeadBlocks.count(P))
2980 continue;
2981
2982 if (llvm::is_contained(successors(P), B) &&
2983 isCriticalEdge(P->getTerminator(), B)) {
2984 if (BasicBlock *S = splitCriticalEdges(P, B))
2985 DeadBlocks.insert(P = S);
2986 }
2987 }
2988
2989 // Now undef the incoming values from the dead predecessors.
2990 for (BasicBlock *P : predecessors(B)) {
2991 if (!DeadBlocks.count(P))
2992 continue;
2993 for (PHINode &Phi : B->phis()) {
2994 Phi.setIncomingValueForBlock(P, UndefValue::get(Phi.getType()));
2995 if (MD)
2996 MD->invalidateCachedPointerInfo(&Phi);
2997 }
2998 }
2999 }
3000 }
3001
3002 // If the given branch is recognized as a foldable branch (i.e. conditional
3003 // branch with constant condition), it will perform following analyses and
3004 // transformation.
3005 // 1) If the dead out-coming edge is a critical-edge, split it. Let
3006 // R be the target of the dead out-coming edge.
3007 // 1) Identify the set of dead blocks implied by the branch's dead outcoming
3008 // edge. The result of this step will be {X| X is dominated by R}
3009 // 2) Identify those blocks which haves at least one dead predecessor. The
3010 // result of this step will be dominance-frontier(R).
3011 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to
3012 // dead blocks with "UndefVal" in an hope these PHIs will optimized away.
3013 //
3014 // Return true iff *NEW* dead code are found.
processFoldableCondBr(BranchInst * BI)3015 bool GVN::processFoldableCondBr(BranchInst *BI) {
3016 if (!BI || BI->isUnconditional())
3017 return false;
3018
3019 // If a branch has two identical successors, we cannot declare either dead.
3020 if (BI->getSuccessor(0) == BI->getSuccessor(1))
3021 return false;
3022
3023 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
3024 if (!Cond)
3025 return false;
3026
3027 BasicBlock *DeadRoot =
3028 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
3029 if (DeadBlocks.count(DeadRoot))
3030 return false;
3031
3032 if (!DeadRoot->getSinglePredecessor())
3033 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
3034
3035 addDeadBlock(DeadRoot);
3036 return true;
3037 }
3038
3039 // performPRE() will trigger assert if it comes across an instruction without
3040 // associated val-num. As it normally has far more live instructions than dead
3041 // instructions, it makes more sense just to "fabricate" a val-number for the
3042 // dead code than checking if instruction involved is dead or not.
assignValNumForDeadCode()3043 void GVN::assignValNumForDeadCode() {
3044 for (BasicBlock *BB : DeadBlocks) {
3045 for (Instruction &Inst : *BB) {
3046 unsigned ValNum = VN.lookupOrAdd(&Inst);
3047 addToLeaderTable(ValNum, &Inst, BB);
3048 }
3049 }
3050 }
3051
3052 class llvm::gvn::GVNLegacyPass : public FunctionPass {
3053 public:
3054 static char ID; // Pass identification, replacement for typeid
3055
GVNLegacyPass(bool NoMemDepAnalysis=!GVNEnableMemDep)3056 explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
3057 : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
3058 initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry());
3059 }
3060
runOnFunction(Function & F)3061 bool runOnFunction(Function &F) override {
3062 if (skipFunction(F))
3063 return false;
3064
3065 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
3066
3067 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3068 return Impl.runImpl(
3069 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
3070 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3071 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
3072 getAnalysis<AAResultsWrapperPass>().getAAResults(),
3073 Impl.isMemDepEnabled()
3074 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3075 : nullptr,
3076 LIWP ? &LIWP->getLoopInfo() : nullptr,
3077 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3078 MSSAWP ? &MSSAWP->getMSSA() : nullptr);
3079 }
3080
getAnalysisUsage(AnalysisUsage & AU) const3081 void getAnalysisUsage(AnalysisUsage &AU) const override {
3082 AU.addRequired<AssumptionCacheTracker>();
3083 AU.addRequired<DominatorTreeWrapperPass>();
3084 AU.addRequired<TargetLibraryInfoWrapperPass>();
3085 AU.addRequired<LoopInfoWrapperPass>();
3086 if (Impl.isMemDepEnabled())
3087 AU.addRequired<MemoryDependenceWrapperPass>();
3088 AU.addRequired<AAResultsWrapperPass>();
3089 AU.addPreserved<DominatorTreeWrapperPass>();
3090 AU.addPreserved<GlobalsAAWrapperPass>();
3091 AU.addPreserved<TargetLibraryInfoWrapperPass>();
3092 AU.addPreserved<LoopInfoWrapperPass>();
3093 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
3094 AU.addPreserved<MemorySSAWrapperPass>();
3095 }
3096
3097 private:
3098 GVN Impl;
3099 };
3100
3101 char GVNLegacyPass::ID = 0;
3102
3103 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)3104 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
3105 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
3106 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
3107 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3108 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
3109 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
3110 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
3111 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3112
3113 // The public interface to this file...
3114 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
3115 return new GVNLegacyPass(NoMemDepAnalysis);
3116 }
3117