1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interface for lazy computation of value constraint
10 // information.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/LazyValueInfo.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/Analysis/ValueLattice.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/AssemblyAnnotationWriter.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/ConstantRange.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/Dominators.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/PatternMatch.h"
34 #include "llvm/IR/ValueHandle.h"
35 #include "llvm/InitializePasses.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/FormattedStream.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include <optional>
41 using namespace llvm;
42 using namespace PatternMatch;
43
44 #define DEBUG_TYPE "lazy-value-info"
45
46 // This is the number of worklist items we will process to try to discover an
47 // answer for a given value.
48 static const unsigned MaxProcessedPerValue = 500;
49
50 char LazyValueInfoWrapperPass::ID = 0;
LazyValueInfoWrapperPass()51 LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) {
52 initializeLazyValueInfoWrapperPassPass(*PassRegistry::getPassRegistry());
53 }
54 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
55 "Lazy Value Information Analysis", false, true)
56 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
57 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
58 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
59 "Lazy Value Information Analysis", false, true)
60
61 namespace llvm {
createLazyValueInfoPass()62 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
63 }
64
65 AnalysisKey LazyValueAnalysis::Key;
66
67 /// Returns true if this lattice value represents at most one possible value.
68 /// This is as precise as any lattice value can get while still representing
69 /// reachable code.
hasSingleValue(const ValueLatticeElement & Val)70 static bool hasSingleValue(const ValueLatticeElement &Val) {
71 if (Val.isConstantRange() &&
72 Val.getConstantRange().isSingleElement())
73 // Integer constants are single element ranges
74 return true;
75 if (Val.isConstant())
76 // Non integer constants
77 return true;
78 return false;
79 }
80
81 /// Combine two sets of facts about the same value into a single set of
82 /// facts. Note that this method is not suitable for merging facts along
83 /// different paths in a CFG; that's what the mergeIn function is for. This
84 /// is for merging facts gathered about the same value at the same location
85 /// through two independent means.
86 /// Notes:
87 /// * This method does not promise to return the most precise possible lattice
88 /// value implied by A and B. It is allowed to return any lattice element
89 /// which is at least as strong as *either* A or B (unless our facts
90 /// conflict, see below).
91 /// * Due to unreachable code, the intersection of two lattice values could be
92 /// contradictory. If this happens, we return some valid lattice value so as
93 /// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
94 /// we do not make this guarantee. TODO: This would be a useful enhancement.
intersect(const ValueLatticeElement & A,const ValueLatticeElement & B)95 static ValueLatticeElement intersect(const ValueLatticeElement &A,
96 const ValueLatticeElement &B) {
97 // Undefined is the strongest state. It means the value is known to be along
98 // an unreachable path.
99 if (A.isUnknown())
100 return A;
101 if (B.isUnknown())
102 return B;
103
104 // If we gave up for one, but got a useable fact from the other, use it.
105 if (A.isOverdefined())
106 return B;
107 if (B.isOverdefined())
108 return A;
109
110 // Can't get any more precise than constants.
111 if (hasSingleValue(A))
112 return A;
113 if (hasSingleValue(B))
114 return B;
115
116 // Could be either constant range or not constant here.
117 if (!A.isConstantRange() || !B.isConstantRange()) {
118 // TODO: Arbitrary choice, could be improved
119 return A;
120 }
121
122 // Intersect two constant ranges
123 ConstantRange Range =
124 A.getConstantRange().intersectWith(B.getConstantRange());
125 // Note: An empty range is implicitly converted to unknown or undef depending
126 // on MayIncludeUndef internally.
127 return ValueLatticeElement::getRange(
128 std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() ||
129 B.isConstantRangeIncludingUndef());
130 }
131
132 //===----------------------------------------------------------------------===//
133 // LazyValueInfoCache Decl
134 //===----------------------------------------------------------------------===//
135
136 namespace {
137 /// A callback value handle updates the cache when values are erased.
138 class LazyValueInfoCache;
139 struct LVIValueHandle final : public CallbackVH {
140 LazyValueInfoCache *Parent;
141
LVIValueHandle__anonbc0fbe590111::LVIValueHandle142 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
143 : CallbackVH(V), Parent(P) { }
144
145 void deleted() override;
allUsesReplacedWith__anonbc0fbe590111::LVIValueHandle146 void allUsesReplacedWith(Value *V) override {
147 deleted();
148 }
149 };
150 } // end anonymous namespace
151
152 namespace {
153 using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
154
155 /// This is the cache kept by LazyValueInfo which
156 /// maintains information about queries across the clients' queries.
157 class LazyValueInfoCache {
158 /// This is all of the cached information for one basic block. It contains
159 /// the per-value lattice elements, as well as a separate set for
160 /// overdefined values to reduce memory usage. Additionally pointers
161 /// dereferenced in the block are cached for nullability queries.
162 struct BlockCacheEntry {
163 SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
164 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
165 // None indicates that the nonnull pointers for this basic block
166 // block have not been computed yet.
167 std::optional<NonNullPointerSet> NonNullPointers;
168 };
169
170 /// Cached information per basic block.
171 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
172 BlockCache;
173 /// Set of value handles used to erase values from the cache on deletion.
174 DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
175
getBlockEntry(BasicBlock * BB) const176 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
177 auto It = BlockCache.find_as(BB);
178 if (It == BlockCache.end())
179 return nullptr;
180 return It->second.get();
181 }
182
getOrCreateBlockEntry(BasicBlock * BB)183 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
184 auto It = BlockCache.find_as(BB);
185 if (It == BlockCache.end())
186 It = BlockCache.insert({ BB, std::make_unique<BlockCacheEntry>() })
187 .first;
188
189 return It->second.get();
190 }
191
addValueHandle(Value * Val)192 void addValueHandle(Value *Val) {
193 auto HandleIt = ValueHandles.find_as(Val);
194 if (HandleIt == ValueHandles.end())
195 ValueHandles.insert({ Val, this });
196 }
197
198 public:
insertResult(Value * Val,BasicBlock * BB,const ValueLatticeElement & Result)199 void insertResult(Value *Val, BasicBlock *BB,
200 const ValueLatticeElement &Result) {
201 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
202
203 // Insert over-defined values into their own cache to reduce memory
204 // overhead.
205 if (Result.isOverdefined())
206 Entry->OverDefined.insert(Val);
207 else
208 Entry->LatticeElements.insert({ Val, Result });
209
210 addValueHandle(Val);
211 }
212
213 std::optional<ValueLatticeElement>
getCachedValueInfo(Value * V,BasicBlock * BB) const214 getCachedValueInfo(Value *V, BasicBlock *BB) const {
215 const BlockCacheEntry *Entry = getBlockEntry(BB);
216 if (!Entry)
217 return std::nullopt;
218
219 if (Entry->OverDefined.count(V))
220 return ValueLatticeElement::getOverdefined();
221
222 auto LatticeIt = Entry->LatticeElements.find_as(V);
223 if (LatticeIt == Entry->LatticeElements.end())
224 return std::nullopt;
225
226 return LatticeIt->second;
227 }
228
isNonNullAtEndOfBlock(Value * V,BasicBlock * BB,function_ref<NonNullPointerSet (BasicBlock *)> InitFn)229 bool isNonNullAtEndOfBlock(
230 Value *V, BasicBlock *BB,
231 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
232 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
233 if (!Entry->NonNullPointers) {
234 Entry->NonNullPointers = InitFn(BB);
235 for (Value *V : *Entry->NonNullPointers)
236 addValueHandle(V);
237 }
238
239 return Entry->NonNullPointers->count(V);
240 }
241
242 /// clear - Empty the cache.
clear()243 void clear() {
244 BlockCache.clear();
245 ValueHandles.clear();
246 }
247
248 /// Inform the cache that a given value has been deleted.
249 void eraseValue(Value *V);
250
251 /// This is part of the update interface to inform the cache
252 /// that a block has been deleted.
253 void eraseBlock(BasicBlock *BB);
254
255 /// Updates the cache to remove any influence an overdefined value in
256 /// OldSucc might have (unless also overdefined in NewSucc). This just
257 /// flushes elements from the cache and does not add any.
258 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
259 };
260 }
261
eraseValue(Value * V)262 void LazyValueInfoCache::eraseValue(Value *V) {
263 for (auto &Pair : BlockCache) {
264 Pair.second->LatticeElements.erase(V);
265 Pair.second->OverDefined.erase(V);
266 if (Pair.second->NonNullPointers)
267 Pair.second->NonNullPointers->erase(V);
268 }
269
270 auto HandleIt = ValueHandles.find_as(V);
271 if (HandleIt != ValueHandles.end())
272 ValueHandles.erase(HandleIt);
273 }
274
deleted()275 void LVIValueHandle::deleted() {
276 // This erasure deallocates *this, so it MUST happen after we're done
277 // using any and all members of *this.
278 Parent->eraseValue(*this);
279 }
280
eraseBlock(BasicBlock * BB)281 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
282 BlockCache.erase(BB);
283 }
284
threadEdgeImpl(BasicBlock * OldSucc,BasicBlock * NewSucc)285 void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
286 BasicBlock *NewSucc) {
287 // When an edge in the graph has been threaded, values that we could not
288 // determine a value for before (i.e. were marked overdefined) may be
289 // possible to solve now. We do NOT try to proactively update these values.
290 // Instead, we clear their entries from the cache, and allow lazy updating to
291 // recompute them when needed.
292
293 // The updating process is fairly simple: we need to drop cached info
294 // for all values that were marked overdefined in OldSucc, and for those same
295 // values in any successor of OldSucc (except NewSucc) in which they were
296 // also marked overdefined.
297 std::vector<BasicBlock*> worklist;
298 worklist.push_back(OldSucc);
299
300 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
301 if (!Entry || Entry->OverDefined.empty())
302 return; // Nothing to process here.
303 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
304 Entry->OverDefined.end());
305
306 // Use a worklist to perform a depth-first search of OldSucc's successors.
307 // NOTE: We do not need a visited list since any blocks we have already
308 // visited will have had their overdefined markers cleared already, and we
309 // thus won't loop to their successors.
310 while (!worklist.empty()) {
311 BasicBlock *ToUpdate = worklist.back();
312 worklist.pop_back();
313
314 // Skip blocks only accessible through NewSucc.
315 if (ToUpdate == NewSucc) continue;
316
317 // If a value was marked overdefined in OldSucc, and is here too...
318 auto OI = BlockCache.find_as(ToUpdate);
319 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
320 continue;
321 auto &ValueSet = OI->second->OverDefined;
322
323 bool changed = false;
324 for (Value *V : ValsToClear) {
325 if (!ValueSet.erase(V))
326 continue;
327
328 // If we removed anything, then we potentially need to update
329 // blocks successors too.
330 changed = true;
331 }
332
333 if (!changed) continue;
334
335 llvm::append_range(worklist, successors(ToUpdate));
336 }
337 }
338
339
340 namespace {
341 /// An assembly annotator class to print LazyValueCache information in
342 /// comments.
343 class LazyValueInfoImpl;
344 class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
345 LazyValueInfoImpl *LVIImpl;
346 // While analyzing which blocks we can solve values for, we need the dominator
347 // information.
348 DominatorTree &DT;
349
350 public:
LazyValueInfoAnnotatedWriter(LazyValueInfoImpl * L,DominatorTree & DTree)351 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
352 : LVIImpl(L), DT(DTree) {}
353
354 void emitBasicBlockStartAnnot(const BasicBlock *BB,
355 formatted_raw_ostream &OS) override;
356
357 void emitInstructionAnnot(const Instruction *I,
358 formatted_raw_ostream &OS) override;
359 };
360 }
361 namespace {
362 // The actual implementation of the lazy analysis and update. Note that the
363 // inheritance from LazyValueInfoCache is intended to be temporary while
364 // splitting the code and then transitioning to a has-a relationship.
365 class LazyValueInfoImpl {
366
367 /// Cached results from previous queries
368 LazyValueInfoCache TheCache;
369
370 /// This stack holds the state of the value solver during a query.
371 /// It basically emulates the callstack of the naive
372 /// recursive value lookup process.
373 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
374
375 /// Keeps track of which block-value pairs are in BlockValueStack.
376 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
377
378 /// Push BV onto BlockValueStack unless it's already in there.
379 /// Returns true on success.
pushBlockValue(const std::pair<BasicBlock *,Value * > & BV)380 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
381 if (!BlockValueSet.insert(BV).second)
382 return false; // It's already in the stack.
383
384 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
385 << BV.first->getName() << "\n");
386 BlockValueStack.push_back(BV);
387 return true;
388 }
389
390 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
391 const DataLayout &DL; ///< A mandatory DataLayout
392
393 /// Declaration of the llvm.experimental.guard() intrinsic,
394 /// if it exists in the module.
395 Function *GuardDecl;
396
397 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
398 Instruction *CxtI);
399 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
400 BasicBlock *T,
401 Instruction *CxtI = nullptr);
402
403 // These methods process one work item and may add more. A false value
404 // returned means that the work item was not completely processed and must
405 // be revisited after going through the new items.
406 bool solveBlockValue(Value *Val, BasicBlock *BB);
407 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
408 BasicBlock *BB);
409 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
410 BasicBlock *BB);
411 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
412 BasicBlock *BB);
413 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
414 BasicBlock *BB);
415 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
416 BasicBlock *BB);
417 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
418 Instruction *I, BasicBlock *BB,
419 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
420 OpFn);
421 std::optional<ValueLatticeElement>
422 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
423 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
424 BasicBlock *BB);
425 std::optional<ValueLatticeElement>
426 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
427 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
428 BasicBlock *BB);
429 std::optional<ValueLatticeElement>
430 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
431 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
432 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
433 ValueLatticeElement &BBLV,
434 Instruction *BBI);
435
436 void solve();
437
438 public:
439 /// This is the query interface to determine the lattice value for the
440 /// specified Value* at the context instruction (if specified) or at the
441 /// start of the block.
442 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
443 Instruction *CxtI = nullptr);
444
445 /// This is the query interface to determine the lattice value for the
446 /// specified Value* at the specified instruction using only information
447 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
448 /// recursive query is performed.
449 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
450
451 /// This is the query interface to determine the lattice
452 /// value for the specified Value* that is true on the specified edge.
453 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
454 BasicBlock *ToBB,
455 Instruction *CxtI = nullptr);
456
457 /// Complete flush all previously computed values
clear()458 void clear() {
459 TheCache.clear();
460 }
461
462 /// Printing the LazyValueInfo Analysis.
printLVI(Function & F,DominatorTree & DTree,raw_ostream & OS)463 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
464 LazyValueInfoAnnotatedWriter Writer(this, DTree);
465 F.print(OS, &Writer);
466 }
467
468 /// This is part of the update interface to inform the cache
469 /// that a block has been deleted.
eraseBlock(BasicBlock * BB)470 void eraseBlock(BasicBlock *BB) {
471 TheCache.eraseBlock(BB);
472 }
473
474 /// This is the update interface to inform the cache that an edge from
475 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
476 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
477
LazyValueInfoImpl(AssumptionCache * AC,const DataLayout & DL,Function * GuardDecl)478 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
479 Function *GuardDecl)
480 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
481 };
482 } // end anonymous namespace
483
484
solve()485 void LazyValueInfoImpl::solve() {
486 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack(
487 BlockValueStack.begin(), BlockValueStack.end());
488
489 unsigned processedCount = 0;
490 while (!BlockValueStack.empty()) {
491 processedCount++;
492 // Abort if we have to process too many values to get a result for this one.
493 // Because of the design of the overdefined cache currently being per-block
494 // to avoid naming-related issues (IE it wants to try to give different
495 // results for the same name in different blocks), overdefined results don't
496 // get cached globally, which in turn means we will often try to rediscover
497 // the same overdefined result again and again. Once something like
498 // PredicateInfo is used in LVI or CVP, we should be able to make the
499 // overdefined cache global, and remove this throttle.
500 if (processedCount > MaxProcessedPerValue) {
501 LLVM_DEBUG(
502 dbgs() << "Giving up on stack because we are getting too deep\n");
503 // Fill in the original values
504 while (!StartingStack.empty()) {
505 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
506 TheCache.insertResult(e.second, e.first,
507 ValueLatticeElement::getOverdefined());
508 StartingStack.pop_back();
509 }
510 BlockValueSet.clear();
511 BlockValueStack.clear();
512 return;
513 }
514 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
515 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
516
517 if (solveBlockValue(e.second, e.first)) {
518 // The work item was completely processed.
519 assert(BlockValueStack.back() == e && "Nothing should have been pushed!");
520 #ifndef NDEBUG
521 std::optional<ValueLatticeElement> BBLV =
522 TheCache.getCachedValueInfo(e.second, e.first);
523 assert(BBLV && "Result should be in cache!");
524 LLVM_DEBUG(
525 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
526 << *BBLV << "\n");
527 #endif
528
529 BlockValueStack.pop_back();
530 BlockValueSet.erase(e);
531 } else {
532 // More work needs to be done before revisiting.
533 assert(BlockValueStack.back() != e && "Stack should have been pushed!");
534 }
535 }
536 }
537
538 std::optional<ValueLatticeElement>
getBlockValue(Value * Val,BasicBlock * BB,Instruction * CxtI)539 LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
540 Instruction *CxtI) {
541 // If already a constant, there is nothing to compute.
542 if (Constant *VC = dyn_cast<Constant>(Val))
543 return ValueLatticeElement::get(VC);
544
545 if (std::optional<ValueLatticeElement> OptLatticeVal =
546 TheCache.getCachedValueInfo(Val, BB)) {
547 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
548 return OptLatticeVal;
549 }
550
551 // We have hit a cycle, assume overdefined.
552 if (!pushBlockValue({ BB, Val }))
553 return ValueLatticeElement::getOverdefined();
554
555 // Yet to be resolved.
556 return std::nullopt;
557 }
558
getFromRangeMetadata(Instruction * BBI)559 static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
560 switch (BBI->getOpcode()) {
561 default: break;
562 case Instruction::Load:
563 case Instruction::Call:
564 case Instruction::Invoke:
565 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
566 if (isa<IntegerType>(BBI->getType())) {
567 return ValueLatticeElement::getRange(
568 getConstantRangeFromMetadata(*Ranges));
569 }
570 break;
571 };
572 // Nothing known - will be intersected with other facts
573 return ValueLatticeElement::getOverdefined();
574 }
575
solveBlockValue(Value * Val,BasicBlock * BB)576 bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
577 assert(!isa<Constant>(Val) && "Value should not be constant");
578 assert(!TheCache.getCachedValueInfo(Val, BB) &&
579 "Value should not be in cache");
580
581 // Hold off inserting this value into the Cache in case we have to return
582 // false and come back later.
583 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
584 if (!Res)
585 // Work pushed, will revisit
586 return false;
587
588 TheCache.insertResult(Val, BB, *Res);
589 return true;
590 }
591
592 std::optional<ValueLatticeElement>
solveBlockValueImpl(Value * Val,BasicBlock * BB)593 LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
594 Instruction *BBI = dyn_cast<Instruction>(Val);
595 if (!BBI || BBI->getParent() != BB)
596 return solveBlockValueNonLocal(Val, BB);
597
598 if (PHINode *PN = dyn_cast<PHINode>(BBI))
599 return solveBlockValuePHINode(PN, BB);
600
601 if (auto *SI = dyn_cast<SelectInst>(BBI))
602 return solveBlockValueSelect(SI, BB);
603
604 // If this value is a nonnull pointer, record it's range and bailout. Note
605 // that for all other pointer typed values, we terminate the search at the
606 // definition. We could easily extend this to look through geps, bitcasts,
607 // and the like to prove non-nullness, but it's not clear that's worth it
608 // compile time wise. The context-insensitive value walk done inside
609 // isKnownNonZero gets most of the profitable cases at much less expense.
610 // This does mean that we have a sensitivity to where the defining
611 // instruction is placed, even if it could legally be hoisted much higher.
612 // That is unfortunate.
613 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
614 if (PT && isKnownNonZero(BBI, DL))
615 return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
616
617 if (BBI->getType()->isIntegerTy()) {
618 if (auto *CI = dyn_cast<CastInst>(BBI))
619 return solveBlockValueCast(CI, BB);
620
621 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
622 return solveBlockValueBinaryOp(BO, BB);
623
624 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
625 return solveBlockValueExtractValue(EVI, BB);
626
627 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
628 return solveBlockValueIntrinsic(II, BB);
629 }
630
631 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
632 << "' - unknown inst def found.\n");
633 return getFromRangeMetadata(BBI);
634 }
635
AddNonNullPointer(Value * Ptr,NonNullPointerSet & PtrSet)636 static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) {
637 // TODO: Use NullPointerIsDefined instead.
638 if (Ptr->getType()->getPointerAddressSpace() == 0)
639 PtrSet.insert(getUnderlyingObject(Ptr));
640 }
641
AddNonNullPointersByInstruction(Instruction * I,NonNullPointerSet & PtrSet)642 static void AddNonNullPointersByInstruction(
643 Instruction *I, NonNullPointerSet &PtrSet) {
644 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
645 AddNonNullPointer(L->getPointerOperand(), PtrSet);
646 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
647 AddNonNullPointer(S->getPointerOperand(), PtrSet);
648 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
649 if (MI->isVolatile()) return;
650
651 // FIXME: check whether it has a valuerange that excludes zero?
652 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
653 if (!Len || Len->isZero()) return;
654
655 AddNonNullPointer(MI->getRawDest(), PtrSet);
656 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
657 AddNonNullPointer(MTI->getRawSource(), PtrSet);
658 }
659 }
660
isNonNullAtEndOfBlock(Value * Val,BasicBlock * BB)661 bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
662 if (NullPointerIsDefined(BB->getParent(),
663 Val->getType()->getPointerAddressSpace()))
664 return false;
665
666 Val = Val->stripInBoundsOffsets();
667 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
668 NonNullPointerSet NonNullPointers;
669 for (Instruction &I : *BB)
670 AddNonNullPointersByInstruction(&I, NonNullPointers);
671 return NonNullPointers;
672 });
673 }
674
675 std::optional<ValueLatticeElement>
solveBlockValueNonLocal(Value * Val,BasicBlock * BB)676 LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
677 ValueLatticeElement Result; // Start Undefined.
678
679 // If this is the entry block, we must be asking about an argument. The
680 // value is overdefined.
681 if (BB->isEntryBlock()) {
682 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
683 return ValueLatticeElement::getOverdefined();
684 }
685
686 // Loop over all of our predecessors, merging what we know from them into
687 // result. If we encounter an unexplored predecessor, we eagerly explore it
688 // in a depth first manner. In practice, this has the effect of discovering
689 // paths we can't analyze eagerly without spending compile times analyzing
690 // other paths. This heuristic benefits from the fact that predecessors are
691 // frequently arranged such that dominating ones come first and we quickly
692 // find a path to function entry. TODO: We should consider explicitly
693 // canonicalizing to make this true rather than relying on this happy
694 // accident.
695 for (BasicBlock *Pred : predecessors(BB)) {
696 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
697 if (!EdgeResult)
698 // Explore that input, then return here
699 return std::nullopt;
700
701 Result.mergeIn(*EdgeResult);
702
703 // If we hit overdefined, exit early. The BlockVals entry is already set
704 // to overdefined.
705 if (Result.isOverdefined()) {
706 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
707 << "' - overdefined because of pred '"
708 << Pred->getName() << "' (non local).\n");
709 return Result;
710 }
711 }
712
713 // Return the merged value, which is more precise than 'overdefined'.
714 assert(!Result.isOverdefined());
715 return Result;
716 }
717
718 std::optional<ValueLatticeElement>
solveBlockValuePHINode(PHINode * PN,BasicBlock * BB)719 LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
720 ValueLatticeElement Result; // Start Undefined.
721
722 // Loop over all of our predecessors, merging what we know from them into
723 // result. See the comment about the chosen traversal order in
724 // solveBlockValueNonLocal; the same reasoning applies here.
725 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
726 BasicBlock *PhiBB = PN->getIncomingBlock(i);
727 Value *PhiVal = PN->getIncomingValue(i);
728 // Note that we can provide PN as the context value to getEdgeValue, even
729 // though the results will be cached, because PN is the value being used as
730 // the cache key in the caller.
731 std::optional<ValueLatticeElement> EdgeResult =
732 getEdgeValue(PhiVal, PhiBB, BB, PN);
733 if (!EdgeResult)
734 // Explore that input, then return here
735 return std::nullopt;
736
737 Result.mergeIn(*EdgeResult);
738
739 // If we hit overdefined, exit early. The BlockVals entry is already set
740 // to overdefined.
741 if (Result.isOverdefined()) {
742 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
743 << "' - overdefined because of pred (local).\n");
744
745 return Result;
746 }
747 }
748
749 // Return the merged value, which is more precise than 'overdefined'.
750 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
751 return Result;
752 }
753
754 static ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
755 bool isTrueDest = true);
756
757 // If we can determine a constraint on the value given conditions assumed by
758 // the program, intersect those constraints with BBLV
intersectAssumeOrGuardBlockValueConstantRange(Value * Val,ValueLatticeElement & BBLV,Instruction * BBI)759 void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
760 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
761 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
762 if (!BBI)
763 return;
764
765 BasicBlock *BB = BBI->getParent();
766 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
767 if (!AssumeVH)
768 continue;
769
770 // Only check assumes in the block of the context instruction. Other
771 // assumes will have already been taken into account when the value was
772 // propagated from predecessor blocks.
773 auto *I = cast<CallInst>(AssumeVH);
774 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
775 continue;
776
777 BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0)));
778 }
779
780 // If guards are not used in the module, don't spend time looking for them
781 if (GuardDecl && !GuardDecl->use_empty() &&
782 BBI->getIterator() != BB->begin()) {
783 for (Instruction &I : make_range(std::next(BBI->getIterator().getReverse()),
784 BB->rend())) {
785 Value *Cond = nullptr;
786 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
787 BBLV = intersect(BBLV, getValueFromCondition(Val, Cond));
788 }
789 }
790
791 if (BBLV.isOverdefined()) {
792 // Check whether we're checking at the terminator, and the pointer has
793 // been dereferenced in this block.
794 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
795 if (PTy && BB->getTerminator() == BBI &&
796 isNonNullAtEndOfBlock(Val, BB))
797 BBLV = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
798 }
799 }
800
getConstantRangeOrFull(const ValueLatticeElement & Val,Type * Ty,const DataLayout & DL)801 static ConstantRange getConstantRangeOrFull(const ValueLatticeElement &Val,
802 Type *Ty, const DataLayout &DL) {
803 if (Val.isConstantRange())
804 return Val.getConstantRange();
805 return ConstantRange::getFull(DL.getTypeSizeInBits(Ty));
806 }
807
808 std::optional<ValueLatticeElement>
solveBlockValueSelect(SelectInst * SI,BasicBlock * BB)809 LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
810 // Recurse on our inputs if needed
811 std::optional<ValueLatticeElement> OptTrueVal =
812 getBlockValue(SI->getTrueValue(), BB, SI);
813 if (!OptTrueVal)
814 return std::nullopt;
815 ValueLatticeElement &TrueVal = *OptTrueVal;
816
817 std::optional<ValueLatticeElement> OptFalseVal =
818 getBlockValue(SI->getFalseValue(), BB, SI);
819 if (!OptFalseVal)
820 return std::nullopt;
821 ValueLatticeElement &FalseVal = *OptFalseVal;
822
823 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
824 const ConstantRange &TrueCR =
825 getConstantRangeOrFull(TrueVal, SI->getType(), DL);
826 const ConstantRange &FalseCR =
827 getConstantRangeOrFull(FalseVal, SI->getType(), DL);
828 Value *LHS = nullptr;
829 Value *RHS = nullptr;
830 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
831 // Is this a min specifically of our two inputs? (Avoid the risk of
832 // ValueTracking getting smarter looking back past our immediate inputs.)
833 if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
834 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
835 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
836 ConstantRange ResultCR = [&]() {
837 switch (SPR.Flavor) {
838 default:
839 llvm_unreachable("unexpected minmax type!");
840 case SPF_SMIN: /// Signed minimum
841 return TrueCR.smin(FalseCR);
842 case SPF_UMIN: /// Unsigned minimum
843 return TrueCR.umin(FalseCR);
844 case SPF_SMAX: /// Signed maximum
845 return TrueCR.smax(FalseCR);
846 case SPF_UMAX: /// Unsigned maximum
847 return TrueCR.umax(FalseCR);
848 };
849 }();
850 return ValueLatticeElement::getRange(
851 ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
852 FalseVal.isConstantRangeIncludingUndef());
853 }
854
855 if (SPR.Flavor == SPF_ABS) {
856 if (LHS == SI->getTrueValue())
857 return ValueLatticeElement::getRange(
858 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
859 if (LHS == SI->getFalseValue())
860 return ValueLatticeElement::getRange(
861 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
862 }
863
864 if (SPR.Flavor == SPF_NABS) {
865 ConstantRange Zero(APInt::getZero(TrueCR.getBitWidth()));
866 if (LHS == SI->getTrueValue())
867 return ValueLatticeElement::getRange(
868 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
869 if (LHS == SI->getFalseValue())
870 return ValueLatticeElement::getRange(
871 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
872 }
873 }
874
875 // Can we constrain the facts about the true and false values by using the
876 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
877 // TODO: We could potentially refine an overdefined true value above.
878 Value *Cond = SI->getCondition();
879 TrueVal = intersect(TrueVal,
880 getValueFromCondition(SI->getTrueValue(), Cond, true));
881 FalseVal = intersect(FalseVal,
882 getValueFromCondition(SI->getFalseValue(), Cond, false));
883
884 ValueLatticeElement Result = TrueVal;
885 Result.mergeIn(FalseVal);
886 return Result;
887 }
888
889 std::optional<ConstantRange>
getRangeFor(Value * V,Instruction * CxtI,BasicBlock * BB)890 LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
891 std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
892 if (!OptVal)
893 return std::nullopt;
894 return getConstantRangeOrFull(*OptVal, V->getType(), DL);
895 }
896
897 std::optional<ValueLatticeElement>
solveBlockValueCast(CastInst * CI,BasicBlock * BB)898 LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
899 // Without knowing how wide the input is, we can't analyze it in any useful
900 // way.
901 if (!CI->getOperand(0)->getType()->isSized())
902 return ValueLatticeElement::getOverdefined();
903
904 // Filter out casts we don't know how to reason about before attempting to
905 // recurse on our operand. This can cut a long search short if we know we're
906 // not going to be able to get any useful information anways.
907 switch (CI->getOpcode()) {
908 case Instruction::Trunc:
909 case Instruction::SExt:
910 case Instruction::ZExt:
911 case Instruction::BitCast:
912 break;
913 default:
914 // Unhandled instructions are overdefined.
915 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
916 << "' - overdefined (unknown cast).\n");
917 return ValueLatticeElement::getOverdefined();
918 }
919
920 // Figure out the range of the LHS. If that fails, we still apply the
921 // transfer rule on the full set since we may be able to locally infer
922 // interesting facts.
923 std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
924 if (!LHSRes)
925 // More work to do before applying this transfer rule.
926 return std::nullopt;
927 const ConstantRange &LHSRange = *LHSRes;
928
929 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
930
931 // NOTE: We're currently limited by the set of operations that ConstantRange
932 // can evaluate symbolically. Enhancing that set will allows us to analyze
933 // more definitions.
934 return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
935 ResultBitWidth));
936 }
937
938 std::optional<ValueLatticeElement>
solveBlockValueBinaryOpImpl(Instruction * I,BasicBlock * BB,std::function<ConstantRange (const ConstantRange &,const ConstantRange &)> OpFn)939 LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
940 Instruction *I, BasicBlock *BB,
941 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
942 OpFn) {
943 // Figure out the ranges of the operands. If that fails, use a
944 // conservative range, but apply the transfer rule anyways. This
945 // lets us pick up facts from expressions like "and i32 (call i32
946 // @foo()), 32"
947 std::optional<ConstantRange> LHSRes = getRangeFor(I->getOperand(0), I, BB);
948 std::optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB);
949 if (!LHSRes || !RHSRes)
950 // More work to do before applying this transfer rule.
951 return std::nullopt;
952
953 const ConstantRange &LHSRange = *LHSRes;
954 const ConstantRange &RHSRange = *RHSRes;
955 return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
956 }
957
958 std::optional<ValueLatticeElement>
solveBlockValueBinaryOp(BinaryOperator * BO,BasicBlock * BB)959 LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
960 assert(BO->getOperand(0)->getType()->isSized() &&
961 "all operands to binary operators are sized");
962 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
963 unsigned NoWrapKind = 0;
964 if (OBO->hasNoUnsignedWrap())
965 NoWrapKind |= OverflowingBinaryOperator::NoUnsignedWrap;
966 if (OBO->hasNoSignedWrap())
967 NoWrapKind |= OverflowingBinaryOperator::NoSignedWrap;
968
969 return solveBlockValueBinaryOpImpl(
970 BO, BB,
971 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
972 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
973 });
974 }
975
976 return solveBlockValueBinaryOpImpl(
977 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
978 return CR1.binaryOp(BO->getOpcode(), CR2);
979 });
980 }
981
982 std::optional<ValueLatticeElement>
solveBlockValueOverflowIntrinsic(WithOverflowInst * WO,BasicBlock * BB)983 LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
984 BasicBlock *BB) {
985 return solveBlockValueBinaryOpImpl(
986 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
987 return CR1.binaryOp(WO->getBinaryOp(), CR2);
988 });
989 }
990
991 std::optional<ValueLatticeElement>
solveBlockValueIntrinsic(IntrinsicInst * II,BasicBlock * BB)992 LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
993 if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
994 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
995 << "' - unknown intrinsic.\n");
996 return getFromRangeMetadata(II);
997 }
998
999 SmallVector<ConstantRange, 2> OpRanges;
1000 for (Value *Op : II->args()) {
1001 std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1002 if (!Range)
1003 return std::nullopt;
1004 OpRanges.push_back(*Range);
1005 }
1006
1007 return ValueLatticeElement::getRange(
1008 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges));
1009 }
1010
1011 std::optional<ValueLatticeElement>
solveBlockValueExtractValue(ExtractValueInst * EVI,BasicBlock * BB)1012 LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1013 BasicBlock *BB) {
1014 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1015 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1016 return solveBlockValueOverflowIntrinsic(WO, BB);
1017
1018 // Handle extractvalue of insertvalue to allow further simplification
1019 // based on replaced with.overflow intrinsics.
1020 if (Value *V = simplifyExtractValueInst(
1021 EVI->getAggregateOperand(), EVI->getIndices(),
1022 EVI->getModule()->getDataLayout()))
1023 return getBlockValue(V, BB, EVI);
1024
1025 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1026 << "' - overdefined (unknown extractvalue).\n");
1027 return ValueLatticeElement::getOverdefined();
1028 }
1029
matchICmpOperand(APInt & Offset,Value * LHS,Value * Val,ICmpInst::Predicate Pred)1030 static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1031 ICmpInst::Predicate Pred) {
1032 if (LHS == Val)
1033 return true;
1034
1035 // Handle range checking idiom produced by InstCombine. We will subtract the
1036 // offset from the allowed range for RHS in this case.
1037 const APInt *C;
1038 if (match(LHS, m_Add(m_Specific(Val), m_APInt(C)))) {
1039 Offset = *C;
1040 return true;
1041 }
1042
1043 // Handle the symmetric case. This appears in saturation patterns like
1044 // (x == 16) ? 16 : (x + 1).
1045 if (match(Val, m_Add(m_Specific(LHS), m_APInt(C)))) {
1046 Offset = -*C;
1047 return true;
1048 }
1049
1050 // If (x | y) < C, then (x < C) && (y < C).
1051 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1052 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1053 return true;
1054
1055 // If (x & y) > C, then (x > C) && (y > C).
1056 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1057 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1058 return true;
1059
1060 return false;
1061 }
1062
1063 /// Get value range for a "(Val + Offset) Pred RHS" condition.
getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,Value * RHS,const APInt & Offset)1064 static ValueLatticeElement getValueFromSimpleICmpCondition(
1065 CmpInst::Predicate Pred, Value *RHS, const APInt &Offset) {
1066 ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
1067 /*isFullSet=*/true);
1068 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
1069 RHSRange = ConstantRange(CI->getValue());
1070 else if (Instruction *I = dyn_cast<Instruction>(RHS))
1071 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
1072 RHSRange = getConstantRangeFromMetadata(*Ranges);
1073
1074 ConstantRange TrueValues =
1075 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
1076 return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1077 }
1078
getValueFromICmpCondition(Value * Val,ICmpInst * ICI,bool isTrueDest)1079 static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
1080 bool isTrueDest) {
1081 Value *LHS = ICI->getOperand(0);
1082 Value *RHS = ICI->getOperand(1);
1083
1084 // Get the predicate that must hold along the considered edge.
1085 CmpInst::Predicate EdgePred =
1086 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1087
1088 if (isa<Constant>(RHS)) {
1089 if (ICI->isEquality() && LHS == Val) {
1090 if (EdgePred == ICmpInst::ICMP_EQ)
1091 return ValueLatticeElement::get(cast<Constant>(RHS));
1092 else if (!isa<UndefValue>(RHS))
1093 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1094 }
1095 }
1096
1097 Type *Ty = Val->getType();
1098 if (!Ty->isIntegerTy())
1099 return ValueLatticeElement::getOverdefined();
1100
1101 unsigned BitWidth = Ty->getScalarSizeInBits();
1102 APInt Offset(BitWidth, 0);
1103 if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1104 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset);
1105
1106 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1107 if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1108 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset);
1109
1110 const APInt *Mask, *C;
1111 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1112 match(RHS, m_APInt(C))) {
1113 // If (Val & Mask) == C then all the masked bits are known and we can
1114 // compute a value range based on that.
1115 if (EdgePred == ICmpInst::ICMP_EQ) {
1116 KnownBits Known;
1117 Known.Zero = ~*C & *Mask;
1118 Known.One = *C & *Mask;
1119 return ValueLatticeElement::getRange(
1120 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1121 }
1122 // If (Val & Mask) != 0 then the value must be larger than the lowest set
1123 // bit of Mask.
1124 if (EdgePred == ICmpInst::ICMP_NE && !Mask->isZero() && C->isZero()) {
1125 return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
1126 APInt::getOneBitSet(BitWidth, Mask->countTrailingZeros()),
1127 APInt::getZero(BitWidth)));
1128 }
1129 }
1130
1131 // If (X urem Modulus) >= C, then X >= C.
1132 // If trunc X >= C, then X >= C.
1133 // TODO: An upper bound could be computed as well.
1134 if (match(LHS, m_CombineOr(m_URem(m_Specific(Val), m_Value()),
1135 m_Trunc(m_Specific(Val)))) &&
1136 match(RHS, m_APInt(C))) {
1137 // Use the icmp region so we don't have to deal with different predicates.
1138 ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C);
1139 if (!CR.isEmptySet())
1140 return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
1141 CR.getUnsignedMin().zext(BitWidth), APInt(BitWidth, 0)));
1142 }
1143
1144 return ValueLatticeElement::getOverdefined();
1145 }
1146
1147 // Handle conditions of the form
1148 // extractvalue(op.with.overflow(%x, C), 1).
getValueFromOverflowCondition(Value * Val,WithOverflowInst * WO,bool IsTrueDest)1149 static ValueLatticeElement getValueFromOverflowCondition(
1150 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1151 // TODO: This only works with a constant RHS for now. We could also compute
1152 // the range of the RHS, but this doesn't fit into the current structure of
1153 // the edge value calculation.
1154 const APInt *C;
1155 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1156 return ValueLatticeElement::getOverdefined();
1157
1158 // Calculate the possible values of %x for which no overflow occurs.
1159 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
1160 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1161
1162 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1163 // constrained to it's inverse (all values that might cause overflow).
1164 if (IsTrueDest)
1165 NWR = NWR.inverse();
1166 return ValueLatticeElement::getRange(NWR);
1167 }
1168
1169 // Tracks a Value * condition and whether we're interested in it or its inverse
1170 typedef PointerIntPair<Value *, 1, bool> CondValue;
1171
getValueFromConditionImpl(Value * Val,CondValue CondVal,bool isRevisit,SmallDenseMap<CondValue,ValueLatticeElement> & Visited,SmallVectorImpl<CondValue> & Worklist)1172 static std::optional<ValueLatticeElement> getValueFromConditionImpl(
1173 Value *Val, CondValue CondVal, bool isRevisit,
1174 SmallDenseMap<CondValue, ValueLatticeElement> &Visited,
1175 SmallVectorImpl<CondValue> &Worklist) {
1176
1177 Value *Cond = CondVal.getPointer();
1178 bool isTrueDest = CondVal.getInt();
1179 if (!isRevisit) {
1180 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1181 return getValueFromICmpCondition(Val, ICI, isTrueDest);
1182
1183 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1184 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1185 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1186 return getValueFromOverflowCondition(Val, WO, isTrueDest);
1187 }
1188
1189 Value *N;
1190 if (match(Cond, m_Not(m_Value(N)))) {
1191 CondValue NKey(N, !isTrueDest);
1192 auto NV = Visited.find(NKey);
1193 if (NV == Visited.end()) {
1194 Worklist.push_back(NKey);
1195 return std::nullopt;
1196 }
1197 return NV->second;
1198 }
1199
1200 Value *L, *R;
1201 bool IsAnd;
1202 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1203 IsAnd = true;
1204 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1205 IsAnd = false;
1206 else
1207 return ValueLatticeElement::getOverdefined();
1208
1209 auto LV = Visited.find(CondValue(L, isTrueDest));
1210 auto RV = Visited.find(CondValue(R, isTrueDest));
1211
1212 // if (L && R) -> intersect L and R
1213 // if (!(L || R)) -> intersect !L and !R
1214 // if (L || R) -> union L and R
1215 // if (!(L && R)) -> union !L and !R
1216 if ((isTrueDest ^ IsAnd) && (LV != Visited.end())) {
1217 ValueLatticeElement V = LV->second;
1218 if (V.isOverdefined())
1219 return V;
1220 if (RV != Visited.end()) {
1221 V.mergeIn(RV->second);
1222 return V;
1223 }
1224 }
1225
1226 if (LV == Visited.end() || RV == Visited.end()) {
1227 assert(!isRevisit);
1228 if (LV == Visited.end())
1229 Worklist.push_back(CondValue(L, isTrueDest));
1230 if (RV == Visited.end())
1231 Worklist.push_back(CondValue(R, isTrueDest));
1232 return std::nullopt;
1233 }
1234
1235 return intersect(LV->second, RV->second);
1236 }
1237
getValueFromCondition(Value * Val,Value * Cond,bool isTrueDest)1238 ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
1239 bool isTrueDest) {
1240 assert(Cond && "precondition");
1241 SmallDenseMap<CondValue, ValueLatticeElement> Visited;
1242 SmallVector<CondValue> Worklist;
1243
1244 CondValue CondKey(Cond, isTrueDest);
1245 Worklist.push_back(CondKey);
1246 do {
1247 CondValue CurrentCond = Worklist.back();
1248 // Insert an Overdefined placeholder into the set to prevent
1249 // infinite recursion if there exists IRs that use not
1250 // dominated by its def as in this example:
1251 // "%tmp3 = or i1 undef, %tmp4"
1252 // "%tmp4 = or i1 undef, %tmp3"
1253 auto Iter =
1254 Visited.try_emplace(CurrentCond, ValueLatticeElement::getOverdefined());
1255 bool isRevisit = !Iter.second;
1256 std::optional<ValueLatticeElement> Result = getValueFromConditionImpl(
1257 Val, CurrentCond, isRevisit, Visited, Worklist);
1258 if (Result) {
1259 Visited[CurrentCond] = *Result;
1260 Worklist.pop_back();
1261 }
1262 } while (!Worklist.empty());
1263
1264 auto Result = Visited.find(CondKey);
1265 assert(Result != Visited.end());
1266 return Result->second;
1267 }
1268
1269 // Return true if Usr has Op as an operand, otherwise false.
usesOperand(User * Usr,Value * Op)1270 static bool usesOperand(User *Usr, Value *Op) {
1271 return is_contained(Usr->operands(), Op);
1272 }
1273
1274 // Return true if the instruction type of Val is supported by
1275 // constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1276 // Call this before calling constantFoldUser() to find out if it's even worth
1277 // attempting to call it.
isOperationFoldable(User * Usr)1278 static bool isOperationFoldable(User *Usr) {
1279 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1280 }
1281
1282 // Check if Usr can be simplified to an integer constant when the value of one
1283 // of its operands Op is an integer constant OpConstVal. If so, return it as an
1284 // lattice value range with a single element or otherwise return an overdefined
1285 // lattice value.
constantFoldUser(User * Usr,Value * Op,const APInt & OpConstVal,const DataLayout & DL)1286 static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1287 const APInt &OpConstVal,
1288 const DataLayout &DL) {
1289 assert(isOperationFoldable(Usr) && "Precondition");
1290 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1291 // Check if Usr can be simplified to a constant.
1292 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1293 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1294 if (auto *C = dyn_cast_or_null<ConstantInt>(
1295 simplifyCastInst(CI->getOpcode(), OpConst,
1296 CI->getDestTy(), DL))) {
1297 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1298 }
1299 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1300 bool Op0Match = BO->getOperand(0) == Op;
1301 bool Op1Match = BO->getOperand(1) == Op;
1302 assert((Op0Match || Op1Match) &&
1303 "Operand 0 nor Operand 1 isn't a match");
1304 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1305 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1306 if (auto *C = dyn_cast_or_null<ConstantInt>(
1307 simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1308 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1309 }
1310 } else if (isa<FreezeInst>(Usr)) {
1311 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1312 return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1313 }
1314 return ValueLatticeElement::getOverdefined();
1315 }
1316
1317 /// Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
1318 /// Val is not constrained on the edge. Result is unspecified if return value
1319 /// is false.
getEdgeValueLocal(Value * Val,BasicBlock * BBFrom,BasicBlock * BBTo)1320 static std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
1321 BasicBlock *BBFrom,
1322 BasicBlock *BBTo) {
1323 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1324 // know that v != 0.
1325 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1326 // If this is a conditional branch and only one successor goes to BBTo, then
1327 // we may be able to infer something from the condition.
1328 if (BI->isConditional() &&
1329 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1330 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1331 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1332 "BBTo isn't a successor of BBFrom");
1333 Value *Condition = BI->getCondition();
1334
1335 // If V is the condition of the branch itself, then we know exactly what
1336 // it is.
1337 if (Condition == Val)
1338 return ValueLatticeElement::get(ConstantInt::get(
1339 Type::getInt1Ty(Val->getContext()), isTrueDest));
1340
1341 // If the condition of the branch is an equality comparison, we may be
1342 // able to infer the value.
1343 ValueLatticeElement Result = getValueFromCondition(Val, Condition,
1344 isTrueDest);
1345 if (!Result.isOverdefined())
1346 return Result;
1347
1348 if (User *Usr = dyn_cast<User>(Val)) {
1349 assert(Result.isOverdefined() && "Result isn't overdefined");
1350 // Check with isOperationFoldable() first to avoid linearly iterating
1351 // over the operands unnecessarily which can be expensive for
1352 // instructions with many operands.
1353 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1354 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1355 if (usesOperand(Usr, Condition)) {
1356 // If Val has Condition as an operand and Val can be folded into a
1357 // constant with either Condition == true or Condition == false,
1358 // propagate the constant.
1359 // eg.
1360 // ; %Val is true on the edge to %then.
1361 // %Val = and i1 %Condition, true.
1362 // br %Condition, label %then, label %else
1363 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1364 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1365 } else {
1366 // If one of Val's operand has an inferred value, we may be able to
1367 // infer the value of Val.
1368 // eg.
1369 // ; %Val is 94 on the edge to %then.
1370 // %Val = add i8 %Op, 1
1371 // %Condition = icmp eq i8 %Op, 93
1372 // br i1 %Condition, label %then, label %else
1373 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1374 Value *Op = Usr->getOperand(i);
1375 ValueLatticeElement OpLatticeVal =
1376 getValueFromCondition(Op, Condition, isTrueDest);
1377 if (std::optional<APInt> OpConst =
1378 OpLatticeVal.asConstantInteger()) {
1379 Result = constantFoldUser(Usr, Op, *OpConst, DL);
1380 break;
1381 }
1382 }
1383 }
1384 }
1385 }
1386 if (!Result.isOverdefined())
1387 return Result;
1388 }
1389 }
1390
1391 // If the edge was formed by a switch on the value, then we may know exactly
1392 // what it is.
1393 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1394 Value *Condition = SI->getCondition();
1395 if (!isa<IntegerType>(Val->getType()))
1396 return std::nullopt;
1397 bool ValUsesConditionAndMayBeFoldable = false;
1398 if (Condition != Val) {
1399 // Check if Val has Condition as an operand.
1400 if (User *Usr = dyn_cast<User>(Val))
1401 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1402 usesOperand(Usr, Condition);
1403 if (!ValUsesConditionAndMayBeFoldable)
1404 return std::nullopt;
1405 }
1406 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1407 "Condition != Val nor Val doesn't use Condition");
1408
1409 bool DefaultCase = SI->getDefaultDest() == BBTo;
1410 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1411 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1412
1413 for (auto Case : SI->cases()) {
1414 APInt CaseValue = Case.getCaseValue()->getValue();
1415 ConstantRange EdgeVal(CaseValue);
1416 if (ValUsesConditionAndMayBeFoldable) {
1417 User *Usr = cast<User>(Val);
1418 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1419 ValueLatticeElement EdgeLatticeVal =
1420 constantFoldUser(Usr, Condition, CaseValue, DL);
1421 if (EdgeLatticeVal.isOverdefined())
1422 return std::nullopt;
1423 EdgeVal = EdgeLatticeVal.getConstantRange();
1424 }
1425 if (DefaultCase) {
1426 // It is possible that the default destination is the destination of
1427 // some cases. We cannot perform difference for those cases.
1428 // We know Condition != CaseValue in BBTo. In some cases we can use
1429 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1430 // only do this when f is identity (i.e. Val == Condition), but we
1431 // should be able to do this for any injective f.
1432 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1433 EdgesVals = EdgesVals.difference(EdgeVal);
1434 } else if (Case.getCaseSuccessor() == BBTo)
1435 EdgesVals = EdgesVals.unionWith(EdgeVal);
1436 }
1437 return ValueLatticeElement::getRange(std::move(EdgesVals));
1438 }
1439 return std::nullopt;
1440 }
1441
1442 /// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1443 /// the basic block if the edge does not constrain Val.
1444 std::optional<ValueLatticeElement>
getEdgeValue(Value * Val,BasicBlock * BBFrom,BasicBlock * BBTo,Instruction * CxtI)1445 LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1446 BasicBlock *BBTo, Instruction *CxtI) {
1447 // If already a constant, there is nothing to compute.
1448 if (Constant *VC = dyn_cast<Constant>(Val))
1449 return ValueLatticeElement::get(VC);
1450
1451 ValueLatticeElement LocalResult =
1452 getEdgeValueLocal(Val, BBFrom, BBTo)
1453 .value_or(ValueLatticeElement::getOverdefined());
1454 if (hasSingleValue(LocalResult))
1455 // Can't get any more precise here
1456 return LocalResult;
1457
1458 std::optional<ValueLatticeElement> OptInBlock =
1459 getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1460 if (!OptInBlock)
1461 return std::nullopt;
1462 ValueLatticeElement &InBlock = *OptInBlock;
1463
1464 // We can use the context instruction (generically the ultimate instruction
1465 // the calling pass is trying to simplify) here, even though the result of
1466 // this function is generally cached when called from the solve* functions
1467 // (and that cached result might be used with queries using a different
1468 // context instruction), because when this function is called from the solve*
1469 // functions, the context instruction is not provided. When called from
1470 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1471 // but then the result is not cached.
1472 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1473
1474 return intersect(LocalResult, InBlock);
1475 }
1476
getValueInBlock(Value * V,BasicBlock * BB,Instruction * CxtI)1477 ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1478 Instruction *CxtI) {
1479 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1480 << BB->getName() << "'\n");
1481
1482 assert(BlockValueStack.empty() && BlockValueSet.empty());
1483 std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1484 if (!OptResult) {
1485 solve();
1486 OptResult = getBlockValue(V, BB, CxtI);
1487 assert(OptResult && "Value not available after solving");
1488 }
1489
1490 ValueLatticeElement Result = *OptResult;
1491 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1492 return Result;
1493 }
1494
getValueAt(Value * V,Instruction * CxtI)1495 ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1496 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1497 << "'\n");
1498
1499 if (auto *C = dyn_cast<Constant>(V))
1500 return ValueLatticeElement::get(C);
1501
1502 ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1503 if (auto *I = dyn_cast<Instruction>(V))
1504 Result = getFromRangeMetadata(I);
1505 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1506
1507 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1508 return Result;
1509 }
1510
1511 ValueLatticeElement LazyValueInfoImpl::
getValueOnEdge(Value * V,BasicBlock * FromBB,BasicBlock * ToBB,Instruction * CxtI)1512 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1513 Instruction *CxtI) {
1514 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1515 << FromBB->getName() << "' to '" << ToBB->getName()
1516 << "'\n");
1517
1518 std::optional<ValueLatticeElement> Result =
1519 getEdgeValue(V, FromBB, ToBB, CxtI);
1520 if (!Result) {
1521 solve();
1522 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1523 assert(Result && "More work to do after problem solved?");
1524 }
1525
1526 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1527 return *Result;
1528 }
1529
threadEdge(BasicBlock * PredBB,BasicBlock * OldSucc,BasicBlock * NewSucc)1530 void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1531 BasicBlock *NewSucc) {
1532 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1533 }
1534
1535 //===----------------------------------------------------------------------===//
1536 // LazyValueInfo Impl
1537 //===----------------------------------------------------------------------===//
1538
1539 /// This lazily constructs the LazyValueInfoImpl.
getImpl(void * & PImpl,AssumptionCache * AC,const Module * M)1540 static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC,
1541 const Module *M) {
1542 if (!PImpl) {
1543 assert(M && "getCache() called with a null Module");
1544 const DataLayout &DL = M->getDataLayout();
1545 Function *GuardDecl = M->getFunction(
1546 Intrinsic::getName(Intrinsic::experimental_guard));
1547 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1548 }
1549 return *static_cast<LazyValueInfoImpl*>(PImpl);
1550 }
1551
runOnFunction(Function & F)1552 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1553 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1554 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1555
1556 if (Info.PImpl)
1557 getImpl(Info.PImpl, Info.AC, F.getParent()).clear();
1558
1559 // Fully lazy.
1560 return false;
1561 }
1562
getAnalysisUsage(AnalysisUsage & AU) const1563 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1564 AU.setPreservesAll();
1565 AU.addRequired<AssumptionCacheTracker>();
1566 AU.addRequired<TargetLibraryInfoWrapperPass>();
1567 }
1568
getLVI()1569 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1570
~LazyValueInfo()1571 LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1572
releaseMemory()1573 void LazyValueInfo::releaseMemory() {
1574 // If the cache was allocated, free it.
1575 if (PImpl) {
1576 delete &getImpl(PImpl, AC, nullptr);
1577 PImpl = nullptr;
1578 }
1579 }
1580
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)1581 bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1582 FunctionAnalysisManager::Invalidator &Inv) {
1583 // We need to invalidate if we have either failed to preserve this analyses
1584 // result directly or if any of its dependencies have been invalidated.
1585 auto PAC = PA.getChecker<LazyValueAnalysis>();
1586 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1587 return true;
1588
1589 return false;
1590 }
1591
releaseMemory()1592 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1593
run(Function & F,FunctionAnalysisManager & FAM)1594 LazyValueInfo LazyValueAnalysis::run(Function &F,
1595 FunctionAnalysisManager &FAM) {
1596 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1597 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1598
1599 return LazyValueInfo(&AC, &F.getParent()->getDataLayout(), &TLI);
1600 }
1601
1602 /// Returns true if we can statically tell that this value will never be a
1603 /// "useful" constant. In practice, this means we've got something like an
1604 /// alloca or a malloc call for which a comparison against a constant can
1605 /// only be guarding dead code. Note that we are potentially giving up some
1606 /// precision in dead code (a constant result) in favour of avoiding a
1607 /// expensive search for a easily answered common query.
isKnownNonConstant(Value * V)1608 static bool isKnownNonConstant(Value *V) {
1609 V = V->stripPointerCasts();
1610 // The return val of alloc cannot be a Constant.
1611 if (isa<AllocaInst>(V))
1612 return true;
1613 return false;
1614 }
1615
getConstant(Value * V,Instruction * CxtI)1616 Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
1617 // Bail out early if V is known not to be a Constant.
1618 if (isKnownNonConstant(V))
1619 return nullptr;
1620
1621 BasicBlock *BB = CxtI->getParent();
1622 ValueLatticeElement Result =
1623 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
1624
1625 if (Result.isConstant())
1626 return Result.getConstant();
1627 if (Result.isConstantRange()) {
1628 const ConstantRange &CR = Result.getConstantRange();
1629 if (const APInt *SingleVal = CR.getSingleElement())
1630 return ConstantInt::get(V->getContext(), *SingleVal);
1631 }
1632 return nullptr;
1633 }
1634
getConstantRange(Value * V,Instruction * CxtI,bool UndefAllowed)1635 ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI,
1636 bool UndefAllowed) {
1637 assert(V->getType()->isIntegerTy());
1638 unsigned Width = V->getType()->getIntegerBitWidth();
1639 BasicBlock *BB = CxtI->getParent();
1640 ValueLatticeElement Result =
1641 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
1642 if (Result.isUnknown())
1643 return ConstantRange::getEmpty(Width);
1644 if (Result.isConstantRange(UndefAllowed))
1645 return Result.getConstantRange(UndefAllowed);
1646 // We represent ConstantInt constants as constant ranges but other kinds
1647 // of integer constants, i.e. ConstantExpr will be tagged as constants
1648 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1649 "ConstantInt value must be represented as constantrange");
1650 return ConstantRange::getFull(Width);
1651 }
1652
getConstantRangeAtUse(const Use & U,bool UndefAllowed)1653 ConstantRange LazyValueInfo::getConstantRangeAtUse(const Use &U,
1654 bool UndefAllowed) {
1655 Value *V = U.get();
1656 ConstantRange CR =
1657 getConstantRange(V, cast<Instruction>(U.getUser()), UndefAllowed);
1658
1659 // Check whether the only (possibly transitive) use of the value is in a
1660 // position where V can be constrained by a select or branch condition.
1661 const Use *CurrU = &U;
1662 // TODO: Increase limit?
1663 const unsigned MaxUsesToInspect = 3;
1664 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1665 std::optional<ValueLatticeElement> CondVal;
1666 auto *CurrI = cast<Instruction>(CurrU->getUser());
1667 if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1668 if (CurrU->getOperandNo() == 1)
1669 CondVal = getValueFromCondition(V, SI->getCondition(), true);
1670 else if (CurrU->getOperandNo() == 2)
1671 CondVal = getValueFromCondition(V, SI->getCondition(), false);
1672 } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1673 // TODO: Use non-local query?
1674 CondVal =
1675 getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU), PHI->getParent());
1676 }
1677 if (CondVal && CondVal->isConstantRange())
1678 CR = CR.intersectWith(CondVal->getConstantRange());
1679
1680 // Only follow one-use chain, to allow direct intersection of conditions.
1681 // If there are multiple uses, we would have to intersect with the union of
1682 // all conditions at different uses.
1683 // Stop walking if we hit a non-speculatable instruction. Even if the
1684 // result is only used under a specific condition, executing the
1685 // instruction itself may cause side effects or UB already.
1686 // This also disallows looking through phi nodes: If the phi node is part
1687 // of a cycle, we might end up reasoning about values from different cycle
1688 // iterations (PR60629).
1689 if (!CurrI->hasOneUse() || !isSafeToSpeculativelyExecute(CurrI))
1690 break;
1691 CurrU = &*CurrI->use_begin();
1692 }
1693 return CR;
1694 }
1695
1696 /// Determine whether the specified value is known to be a
1697 /// constant on the specified edge. Return null if not.
getConstantOnEdge(Value * V,BasicBlock * FromBB,BasicBlock * ToBB,Instruction * CxtI)1698 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1699 BasicBlock *ToBB,
1700 Instruction *CxtI) {
1701 Module *M = FromBB->getModule();
1702 ValueLatticeElement Result =
1703 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1704
1705 if (Result.isConstant())
1706 return Result.getConstant();
1707 if (Result.isConstantRange()) {
1708 const ConstantRange &CR = Result.getConstantRange();
1709 if (const APInt *SingleVal = CR.getSingleElement())
1710 return ConstantInt::get(V->getContext(), *SingleVal);
1711 }
1712 return nullptr;
1713 }
1714
getConstantRangeOnEdge(Value * V,BasicBlock * FromBB,BasicBlock * ToBB,Instruction * CxtI)1715 ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1716 BasicBlock *FromBB,
1717 BasicBlock *ToBB,
1718 Instruction *CxtI) {
1719 unsigned Width = V->getType()->getIntegerBitWidth();
1720 Module *M = FromBB->getModule();
1721 ValueLatticeElement Result =
1722 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1723
1724 if (Result.isUnknown())
1725 return ConstantRange::getEmpty(Width);
1726 if (Result.isConstantRange())
1727 return Result.getConstantRange();
1728 // We represent ConstantInt constants as constant ranges but other kinds
1729 // of integer constants, i.e. ConstantExpr will be tagged as constants
1730 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1731 "ConstantInt value must be represented as constantrange");
1732 return ConstantRange::getFull(Width);
1733 }
1734
1735 static LazyValueInfo::Tristate
getPredicateResult(unsigned Pred,Constant * C,const ValueLatticeElement & Val,const DataLayout & DL,TargetLibraryInfo * TLI)1736 getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
1737 const DataLayout &DL, TargetLibraryInfo *TLI) {
1738 // If we know the value is a constant, evaluate the conditional.
1739 Constant *Res = nullptr;
1740 if (Val.isConstant()) {
1741 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL, TLI);
1742 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
1743 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1744 return LazyValueInfo::Unknown;
1745 }
1746
1747 if (Val.isConstantRange()) {
1748 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1749 if (!CI) return LazyValueInfo::Unknown;
1750
1751 const ConstantRange &CR = Val.getConstantRange();
1752 if (Pred == ICmpInst::ICMP_EQ) {
1753 if (!CR.contains(CI->getValue()))
1754 return LazyValueInfo::False;
1755
1756 if (CR.isSingleElement())
1757 return LazyValueInfo::True;
1758 } else if (Pred == ICmpInst::ICMP_NE) {
1759 if (!CR.contains(CI->getValue()))
1760 return LazyValueInfo::True;
1761
1762 if (CR.isSingleElement())
1763 return LazyValueInfo::False;
1764 } else {
1765 // Handle more complex predicates.
1766 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
1767 (ICmpInst::Predicate)Pred, CI->getValue());
1768 if (TrueValues.contains(CR))
1769 return LazyValueInfo::True;
1770 if (TrueValues.inverse().contains(CR))
1771 return LazyValueInfo::False;
1772 }
1773 return LazyValueInfo::Unknown;
1774 }
1775
1776 if (Val.isNotConstant()) {
1777 // If this is an equality comparison, we can try to fold it knowing that
1778 // "V != C1".
1779 if (Pred == ICmpInst::ICMP_EQ) {
1780 // !C1 == C -> false iff C1 == C.
1781 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1782 Val.getNotConstant(), C, DL,
1783 TLI);
1784 if (Res->isNullValue())
1785 return LazyValueInfo::False;
1786 } else if (Pred == ICmpInst::ICMP_NE) {
1787 // !C1 != C -> true iff C1 == C.
1788 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1789 Val.getNotConstant(), C, DL,
1790 TLI);
1791 if (Res->isNullValue())
1792 return LazyValueInfo::True;
1793 }
1794 return LazyValueInfo::Unknown;
1795 }
1796
1797 return LazyValueInfo::Unknown;
1798 }
1799
1800 /// Determine whether the specified value comparison with a constant is known to
1801 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1802 LazyValueInfo::Tristate
getPredicateOnEdge(unsigned Pred,Value * V,Constant * C,BasicBlock * FromBB,BasicBlock * ToBB,Instruction * CxtI)1803 LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
1804 BasicBlock *FromBB, BasicBlock *ToBB,
1805 Instruction *CxtI) {
1806 Module *M = FromBB->getModule();
1807 ValueLatticeElement Result =
1808 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1809
1810 return getPredicateResult(Pred, C, Result, M->getDataLayout(), TLI);
1811 }
1812
1813 LazyValueInfo::Tristate
getPredicateAt(unsigned Pred,Value * V,Constant * C,Instruction * CxtI,bool UseBlockValue)1814 LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
1815 Instruction *CxtI, bool UseBlockValue) {
1816 // Is or is not NonNull are common predicates being queried. If
1817 // isKnownNonZero can tell us the result of the predicate, we can
1818 // return it quickly. But this is only a fastpath, and falling
1819 // through would still be correct.
1820 Module *M = CxtI->getModule();
1821 const DataLayout &DL = M->getDataLayout();
1822 if (V->getType()->isPointerTy() && C->isNullValue() &&
1823 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1824 if (Pred == ICmpInst::ICMP_EQ)
1825 return LazyValueInfo::False;
1826 else if (Pred == ICmpInst::ICMP_NE)
1827 return LazyValueInfo::True;
1828 }
1829
1830 ValueLatticeElement Result = UseBlockValue
1831 ? getImpl(PImpl, AC, M).getValueInBlock(V, CxtI->getParent(), CxtI)
1832 : getImpl(PImpl, AC, M).getValueAt(V, CxtI);
1833 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
1834 if (Ret != Unknown)
1835 return Ret;
1836
1837 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1838 // LVI as a whole tries to compute a lattice value which is conservatively
1839 // correct at a given location. In this case, we have a predicate which we
1840 // weren't able to prove about the merged result, and we're pushing that
1841 // predicate back along each incoming edge to see if we can prove it
1842 // separately for each input. As a motivating example, consider:
1843 // bb1:
1844 // %v1 = ... ; constantrange<1, 5>
1845 // br label %merge
1846 // bb2:
1847 // %v2 = ... ; constantrange<10, 20>
1848 // br label %merge
1849 // merge:
1850 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1851 // %pred = icmp eq i32 %phi, 8
1852 // We can't tell from the lattice value for '%phi' that '%pred' is false
1853 // along each path, but by checking the predicate over each input separately,
1854 // we can.
1855 // We limit the search to one step backwards from the current BB and value.
1856 // We could consider extending this to search further backwards through the
1857 // CFG and/or value graph, but there are non-obvious compile time vs quality
1858 // tradeoffs.
1859 BasicBlock *BB = CxtI->getParent();
1860
1861 // Function entry or an unreachable block. Bail to avoid confusing
1862 // analysis below.
1863 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1864 if (PI == PE)
1865 return Unknown;
1866
1867 // If V is a PHI node in the same block as the context, we need to ask
1868 // questions about the predicate as applied to the incoming value along
1869 // each edge. This is useful for eliminating cases where the predicate is
1870 // known along all incoming edges.
1871 if (auto *PHI = dyn_cast<PHINode>(V))
1872 if (PHI->getParent() == BB) {
1873 Tristate Baseline = Unknown;
1874 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1875 Value *Incoming = PHI->getIncomingValue(i);
1876 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1877 // Note that PredBB may be BB itself.
1878 Tristate Result =
1879 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
1880
1881 // Keep going as long as we've seen a consistent known result for
1882 // all inputs.
1883 Baseline = (i == 0) ? Result /* First iteration */
1884 : (Baseline == Result ? Baseline
1885 : Unknown); /* All others */
1886 if (Baseline == Unknown)
1887 break;
1888 }
1889 if (Baseline != Unknown)
1890 return Baseline;
1891 }
1892
1893 // For a comparison where the V is outside this block, it's possible
1894 // that we've branched on it before. Look to see if the value is known
1895 // on all incoming edges.
1896 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
1897 // For predecessor edge, determine if the comparison is true or false
1898 // on that edge. If they're all true or all false, we can conclude
1899 // the value of the comparison in this block.
1900 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1901 if (Baseline != Unknown) {
1902 // Check that all remaining incoming values match the first one.
1903 while (++PI != PE) {
1904 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1905 if (Ret != Baseline)
1906 break;
1907 }
1908 // If we terminated early, then one of the values didn't match.
1909 if (PI == PE) {
1910 return Baseline;
1911 }
1912 }
1913 }
1914
1915 return Unknown;
1916 }
1917
getPredicateAt(unsigned P,Value * LHS,Value * RHS,Instruction * CxtI,bool UseBlockValue)1918 LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(unsigned P, Value *LHS,
1919 Value *RHS,
1920 Instruction *CxtI,
1921 bool UseBlockValue) {
1922 CmpInst::Predicate Pred = (CmpInst::Predicate)P;
1923
1924 if (auto *C = dyn_cast<Constant>(RHS))
1925 return getPredicateAt(P, LHS, C, CxtI, UseBlockValue);
1926 if (auto *C = dyn_cast<Constant>(LHS))
1927 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
1928 UseBlockValue);
1929
1930 // Got two non-Constant values. Try to determine the comparison results based
1931 // on the block values of the two operands, e.g. because they have
1932 // non-overlapping ranges.
1933 if (UseBlockValue) {
1934 Module *M = CxtI->getModule();
1935 ValueLatticeElement L =
1936 getImpl(PImpl, AC, M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
1937 if (L.isOverdefined())
1938 return LazyValueInfo::Unknown;
1939
1940 ValueLatticeElement R =
1941 getImpl(PImpl, AC, M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
1942 Type *Ty = CmpInst::makeCmpResultType(LHS->getType());
1943 if (Constant *Res = L.getCompare((CmpInst::Predicate)P, Ty, R,
1944 M->getDataLayout())) {
1945 if (Res->isNullValue())
1946 return LazyValueInfo::False;
1947 if (Res->isOneValue())
1948 return LazyValueInfo::True;
1949 }
1950 }
1951 return LazyValueInfo::Unknown;
1952 }
1953
threadEdge(BasicBlock * PredBB,BasicBlock * OldSucc,BasicBlock * NewSucc)1954 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1955 BasicBlock *NewSucc) {
1956 if (PImpl) {
1957 getImpl(PImpl, AC, PredBB->getModule())
1958 .threadEdge(PredBB, OldSucc, NewSucc);
1959 }
1960 }
1961
eraseBlock(BasicBlock * BB)1962 void LazyValueInfo::eraseBlock(BasicBlock *BB) {
1963 if (PImpl) {
1964 getImpl(PImpl, AC, BB->getModule()).eraseBlock(BB);
1965 }
1966 }
1967
clear(const Module * M)1968 void LazyValueInfo::clear(const Module *M) {
1969 if (PImpl) {
1970 getImpl(PImpl, AC, M).clear();
1971 }
1972 }
1973
printLVI(Function & F,DominatorTree & DTree,raw_ostream & OS)1974 void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
1975 if (PImpl) {
1976 getImpl(PImpl, AC, F.getParent()).printLVI(F, DTree, OS);
1977 }
1978 }
1979
1980 // Print the LVI for the function arguments at the start of each basic block.
emitBasicBlockStartAnnot(const BasicBlock * BB,formatted_raw_ostream & OS)1981 void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
1982 const BasicBlock *BB, formatted_raw_ostream &OS) {
1983 // Find if there are latticevalues defined for arguments of the function.
1984 auto *F = BB->getParent();
1985 for (const auto &Arg : F->args()) {
1986 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1987 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
1988 if (Result.isUnknown())
1989 continue;
1990 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
1991 }
1992 }
1993
1994 // This function prints the LVI analysis for the instruction I at the beginning
1995 // of various basic blocks. It relies on calculated values that are stored in
1996 // the LazyValueInfoCache, and in the absence of cached values, recalculate the
1997 // LazyValueInfo for `I`, and print that info.
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)1998 void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
1999 const Instruction *I, formatted_raw_ostream &OS) {
2000
2001 auto *ParentBB = I->getParent();
2002 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2003 // We can generate (solve) LVI values only for blocks that are dominated by
2004 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2005 // that contain redundant/uninteresting information, we print LVI for
2006 // blocks that may use this LVI information (such as immediate successor
2007 // blocks, and blocks that contain uses of `I`).
2008 auto printResult = [&](const BasicBlock *BB) {
2009 if (!BlocksContainingLVI.insert(BB).second)
2010 return;
2011 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2012 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2013 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2014 BB->printAsOperand(OS, false);
2015 OS << "' is: " << Result << "\n";
2016 };
2017
2018 printResult(ParentBB);
2019 // Print the LVI analysis results for the immediate successor blocks, that
2020 // are dominated by `ParentBB`.
2021 for (const auto *BBSucc : successors(ParentBB))
2022 if (DT.dominates(ParentBB, BBSucc))
2023 printResult(BBSucc);
2024
2025 // Print LVI in blocks where `I` is used.
2026 for (const auto *U : I->users())
2027 if (auto *UseI = dyn_cast<Instruction>(U))
2028 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2029 printResult(UseI->getParent());
2030
2031 }
2032
2033 namespace {
2034 // Printer class for LazyValueInfo results.
2035 class LazyValueInfoPrinter : public FunctionPass {
2036 public:
2037 static char ID; // Pass identification, replacement for typeid
LazyValueInfoPrinter()2038 LazyValueInfoPrinter() : FunctionPass(ID) {
2039 initializeLazyValueInfoPrinterPass(*PassRegistry::getPassRegistry());
2040 }
2041
getAnalysisUsage(AnalysisUsage & AU) const2042 void getAnalysisUsage(AnalysisUsage &AU) const override {
2043 AU.setPreservesAll();
2044 AU.addRequired<LazyValueInfoWrapperPass>();
2045 AU.addRequired<DominatorTreeWrapperPass>();
2046 }
2047
2048 // Get the mandatory dominator tree analysis and pass this in to the
2049 // LVIPrinter. We cannot rely on the LVI's DT, since it's optional.
runOnFunction(Function & F)2050 bool runOnFunction(Function &F) override {
2051 dbgs() << "LVI for function '" << F.getName() << "':\n";
2052 auto &LVI = getAnalysis<LazyValueInfoWrapperPass>().getLVI();
2053 auto &DTree = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2054 LVI.printLVI(F, DTree, dbgs());
2055 return false;
2056 }
2057 };
2058 }
2059
2060 char LazyValueInfoPrinter::ID = 0;
2061 INITIALIZE_PASS_BEGIN(LazyValueInfoPrinter, "print-lazy-value-info",
2062 "Lazy Value Info Printer Pass", false, false)
2063 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
2064 INITIALIZE_PASS_END(LazyValueInfoPrinter, "print-lazy-value-info",
2065 "Lazy Value Info Printer Pass", false, false)
2066