1 //===- Local.cpp - Functions to perform local transformations -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This family of functions perform various local transformations to the
10 // program.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Utils/Local.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/DomTreeUpdater.h"
28 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/MemorySSAUpdater.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/BinaryFormat/Dwarf.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DIBuilder.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/DebugInfo.h"
45 #include "llvm/IR/DebugInfoMetadata.h"
46 #include "llvm/IR/DebugLoc.h"
47 #include "llvm/IR/DerivedTypes.h"
48 #include "llvm/IR/Dominators.h"
49 #include "llvm/IR/EHPersonalities.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GetElementPtrTypeIterator.h"
52 #include "llvm/IR/GlobalObject.h"
53 #include "llvm/IR/IRBuilder.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/IntrinsicsWebAssembly.h"
60 #include "llvm/IR/LLVMContext.h"
61 #include "llvm/IR/MDBuilder.h"
62 #include "llvm/IR/Metadata.h"
63 #include "llvm/IR/Module.h"
64 #include "llvm/IR/PatternMatch.h"
65 #include "llvm/IR/ProfDataUtils.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/Use.h"
68 #include "llvm/IR/User.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/IR/ValueHandle.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/ErrorHandling.h"
74 #include "llvm/Support/KnownBits.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
77 #include "llvm/Transforms/Utils/ValueMapper.h"
78 #include <algorithm>
79 #include <cassert>
80 #include <cstdint>
81 #include <iterator>
82 #include <map>
83 #include <optional>
84 #include <utility>
85 
86 using namespace llvm;
87 using namespace llvm::PatternMatch;
88 
89 #define DEBUG_TYPE "local"
90 
91 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed");
92 STATISTIC(NumPHICSEs, "Number of PHI's that got CSE'd");
93 
94 static cl::opt<bool> PHICSEDebugHash(
95     "phicse-debug-hash",
96 #ifdef EXPENSIVE_CHECKS
97     cl::init(true),
98 #else
99     cl::init(false),
100 #endif
101     cl::Hidden,
102     cl::desc("Perform extra assertion checking to verify that PHINodes's hash "
103              "function is well-behaved w.r.t. its isEqual predicate"));
104 
105 static cl::opt<unsigned> PHICSENumPHISmallSize(
106     "phicse-num-phi-smallsize", cl::init(32), cl::Hidden,
107     cl::desc(
108         "When the basic block contains not more than this number of PHI nodes, "
109         "perform a (faster!) exhaustive search instead of set-driven one."));
110 
111 // Max recursion depth for collectBitParts used when detecting bswap and
112 // bitreverse idioms.
113 static const unsigned BitPartRecursionMaxDepth = 48;
114 
115 //===----------------------------------------------------------------------===//
116 //  Local constant propagation.
117 //
118 
119 /// ConstantFoldTerminator - If a terminator instruction is predicated on a
120 /// constant value, convert it into an unconditional branch to the constant
121 /// destination.  This is a nontrivial operation because the successors of this
122 /// basic block must have their PHI nodes updated.
123 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
124 /// conditions and indirectbr addresses this might make dead if
125 /// DeleteDeadConditions is true.
126 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
127                                   const TargetLibraryInfo *TLI,
128                                   DomTreeUpdater *DTU) {
129   Instruction *T = BB->getTerminator();
130   IRBuilder<> Builder(T);
131 
132   // Branch - See if we are conditional jumping on constant
133   if (auto *BI = dyn_cast<BranchInst>(T)) {
134     if (BI->isUnconditional()) return false;  // Can't optimize uncond branch
135 
136     BasicBlock *Dest1 = BI->getSuccessor(0);
137     BasicBlock *Dest2 = BI->getSuccessor(1);
138 
139     if (Dest2 == Dest1) {       // Conditional branch to same location?
140       // This branch matches something like this:
141       //     br bool %cond, label %Dest, label %Dest
142       // and changes it into:  br label %Dest
143 
144       // Let the basic block know that we are letting go of one copy of it.
145       assert(BI->getParent() && "Terminator not inserted in block!");
146       Dest1->removePredecessor(BI->getParent());
147 
148       // Replace the conditional branch with an unconditional one.
149       BranchInst *NewBI = Builder.CreateBr(Dest1);
150 
151       // Transfer the metadata to the new branch instruction.
152       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
153                                 LLVMContext::MD_annotation});
154 
155       Value *Cond = BI->getCondition();
156       BI->eraseFromParent();
157       if (DeleteDeadConditions)
158         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
159       return true;
160     }
161 
162     if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
163       // Are we branching on constant?
164       // YES.  Change to unconditional branch...
165       BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2;
166       BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1;
167 
168       // Let the basic block know that we are letting go of it.  Based on this,
169       // it will adjust it's PHI nodes.
170       OldDest->removePredecessor(BB);
171 
172       // Replace the conditional branch with an unconditional one.
173       BranchInst *NewBI = Builder.CreateBr(Destination);
174 
175       // Transfer the metadata to the new branch instruction.
176       NewBI->copyMetadata(*BI, {LLVMContext::MD_loop, LLVMContext::MD_dbg,
177                                 LLVMContext::MD_annotation});
178 
179       BI->eraseFromParent();
180       if (DTU)
181         DTU->applyUpdates({{DominatorTree::Delete, BB, OldDest}});
182       return true;
183     }
184 
185     return false;
186   }
187 
188   if (auto *SI = dyn_cast<SwitchInst>(T)) {
189     // If we are switching on a constant, we can convert the switch to an
190     // unconditional branch.
191     auto *CI = dyn_cast<ConstantInt>(SI->getCondition());
192     BasicBlock *DefaultDest = SI->getDefaultDest();
193     BasicBlock *TheOnlyDest = DefaultDest;
194 
195     // If the default is unreachable, ignore it when searching for TheOnlyDest.
196     if (isa<UnreachableInst>(DefaultDest->getFirstNonPHIOrDbg()) &&
197         SI->getNumCases() > 0) {
198       TheOnlyDest = SI->case_begin()->getCaseSuccessor();
199     }
200 
201     bool Changed = false;
202 
203     // Figure out which case it goes to.
204     for (auto It = SI->case_begin(), End = SI->case_end(); It != End;) {
205       // Found case matching a constant operand?
206       if (It->getCaseValue() == CI) {
207         TheOnlyDest = It->getCaseSuccessor();
208         break;
209       }
210 
211       // Check to see if this branch is going to the same place as the default
212       // dest.  If so, eliminate it as an explicit compare.
213       if (It->getCaseSuccessor() == DefaultDest) {
214         MDNode *MD = getValidBranchWeightMDNode(*SI);
215         unsigned NCases = SI->getNumCases();
216         // Fold the case metadata into the default if there will be any branches
217         // left, unless the metadata doesn't match the switch.
218         if (NCases > 1 && MD) {
219           // Collect branch weights into a vector.
220           SmallVector<uint32_t, 8> Weights;
221           extractBranchWeights(MD, Weights);
222 
223           // Merge weight of this case to the default weight.
224           unsigned Idx = It->getCaseIndex();
225           // TODO: Add overflow check.
226           Weights[0] += Weights[Idx + 1];
227           // Remove weight for this case.
228           std::swap(Weights[Idx + 1], Weights.back());
229           Weights.pop_back();
230           SI->setMetadata(LLVMContext::MD_prof,
231                           MDBuilder(BB->getContext()).
232                           createBranchWeights(Weights));
233         }
234         // Remove this entry.
235         BasicBlock *ParentBB = SI->getParent();
236         DefaultDest->removePredecessor(ParentBB);
237         It = SI->removeCase(It);
238         End = SI->case_end();
239 
240         // Removing this case may have made the condition constant. In that
241         // case, update CI and restart iteration through the cases.
242         if (auto *NewCI = dyn_cast<ConstantInt>(SI->getCondition())) {
243           CI = NewCI;
244           It = SI->case_begin();
245         }
246 
247         Changed = true;
248         continue;
249       }
250 
251       // Otherwise, check to see if the switch only branches to one destination.
252       // We do this by reseting "TheOnlyDest" to null when we find two non-equal
253       // destinations.
254       if (It->getCaseSuccessor() != TheOnlyDest)
255         TheOnlyDest = nullptr;
256 
257       // Increment this iterator as we haven't removed the case.
258       ++It;
259     }
260 
261     if (CI && !TheOnlyDest) {
262       // Branching on a constant, but not any of the cases, go to the default
263       // successor.
264       TheOnlyDest = SI->getDefaultDest();
265     }
266 
267     // If we found a single destination that we can fold the switch into, do so
268     // now.
269     if (TheOnlyDest) {
270       // Insert the new branch.
271       Builder.CreateBr(TheOnlyDest);
272       BasicBlock *BB = SI->getParent();
273 
274       SmallSet<BasicBlock *, 8> RemovedSuccessors;
275 
276       // Remove entries from PHI nodes which we no longer branch to...
277       BasicBlock *SuccToKeep = TheOnlyDest;
278       for (BasicBlock *Succ : successors(SI)) {
279         if (DTU && Succ != TheOnlyDest)
280           RemovedSuccessors.insert(Succ);
281         // Found case matching a constant operand?
282         if (Succ == SuccToKeep) {
283           SuccToKeep = nullptr; // Don't modify the first branch to TheOnlyDest
284         } else {
285           Succ->removePredecessor(BB);
286         }
287       }
288 
289       // Delete the old switch.
290       Value *Cond = SI->getCondition();
291       SI->eraseFromParent();
292       if (DeleteDeadConditions)
293         RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
294       if (DTU) {
295         std::vector<DominatorTree::UpdateType> Updates;
296         Updates.reserve(RemovedSuccessors.size());
297         for (auto *RemovedSuccessor : RemovedSuccessors)
298           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
299         DTU->applyUpdates(Updates);
300       }
301       return true;
302     }
303 
304     if (SI->getNumCases() == 1) {
305       // Otherwise, we can fold this switch into a conditional branch
306       // instruction if it has only one non-default destination.
307       auto FirstCase = *SI->case_begin();
308       Value *Cond = Builder.CreateICmpEQ(SI->getCondition(),
309           FirstCase.getCaseValue(), "cond");
310 
311       // Insert the new branch.
312       BranchInst *NewBr = Builder.CreateCondBr(Cond,
313                                                FirstCase.getCaseSuccessor(),
314                                                SI->getDefaultDest());
315       SmallVector<uint32_t> Weights;
316       if (extractBranchWeights(*SI, Weights) && Weights.size() == 2) {
317         uint32_t DefWeight = Weights[0];
318         uint32_t CaseWeight = Weights[1];
319         // The TrueWeight should be the weight for the single case of SI.
320         NewBr->setMetadata(LLVMContext::MD_prof,
321                            MDBuilder(BB->getContext())
322                                .createBranchWeights(CaseWeight, DefWeight));
323       }
324 
325       // Update make.implicit metadata to the newly-created conditional branch.
326       MDNode *MakeImplicitMD = SI->getMetadata(LLVMContext::MD_make_implicit);
327       if (MakeImplicitMD)
328         NewBr->setMetadata(LLVMContext::MD_make_implicit, MakeImplicitMD);
329 
330       // Delete the old switch.
331       SI->eraseFromParent();
332       return true;
333     }
334     return Changed;
335   }
336 
337   if (auto *IBI = dyn_cast<IndirectBrInst>(T)) {
338     // indirectbr blockaddress(@F, @BB) -> br label @BB
339     if (auto *BA =
340           dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) {
341       BasicBlock *TheOnlyDest = BA->getBasicBlock();
342       SmallSet<BasicBlock *, 8> RemovedSuccessors;
343 
344       // Insert the new branch.
345       Builder.CreateBr(TheOnlyDest);
346 
347       BasicBlock *SuccToKeep = TheOnlyDest;
348       for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
349         BasicBlock *DestBB = IBI->getDestination(i);
350         if (DTU && DestBB != TheOnlyDest)
351           RemovedSuccessors.insert(DestBB);
352         if (IBI->getDestination(i) == SuccToKeep) {
353           SuccToKeep = nullptr;
354         } else {
355           DestBB->removePredecessor(BB);
356         }
357       }
358       Value *Address = IBI->getAddress();
359       IBI->eraseFromParent();
360       if (DeleteDeadConditions)
361         // Delete pointer cast instructions.
362         RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
363 
364       // Also zap the blockaddress constant if there are no users remaining,
365       // otherwise the destination is still marked as having its address taken.
366       if (BA->use_empty())
367         BA->destroyConstant();
368 
369       // If we didn't find our destination in the IBI successor list, then we
370       // have undefined behavior.  Replace the unconditional branch with an
371       // 'unreachable' instruction.
372       if (SuccToKeep) {
373         BB->getTerminator()->eraseFromParent();
374         new UnreachableInst(BB->getContext(), BB);
375       }
376 
377       if (DTU) {
378         std::vector<DominatorTree::UpdateType> Updates;
379         Updates.reserve(RemovedSuccessors.size());
380         for (auto *RemovedSuccessor : RemovedSuccessors)
381           Updates.push_back({DominatorTree::Delete, BB, RemovedSuccessor});
382         DTU->applyUpdates(Updates);
383       }
384       return true;
385     }
386   }
387 
388   return false;
389 }
390 
391 //===----------------------------------------------------------------------===//
392 //  Local dead code elimination.
393 //
394 
395 /// isInstructionTriviallyDead - Return true if the result produced by the
396 /// instruction is not used, and the instruction has no side effects.
397 ///
398 bool llvm::isInstructionTriviallyDead(Instruction *I,
399                                       const TargetLibraryInfo *TLI) {
400   if (!I->use_empty())
401     return false;
402   return wouldInstructionBeTriviallyDead(I, TLI);
403 }
404 
405 bool llvm::wouldInstructionBeTriviallyDeadOnUnusedPaths(
406     Instruction *I, const TargetLibraryInfo *TLI) {
407   // Instructions that are "markers" and have implied meaning on code around
408   // them (without explicit uses), are not dead on unused paths.
409   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
410     if (II->getIntrinsicID() == Intrinsic::stacksave ||
411         II->getIntrinsicID() == Intrinsic::launder_invariant_group ||
412         II->isLifetimeStartOrEnd())
413       return false;
414   return wouldInstructionBeTriviallyDead(I, TLI);
415 }
416 
417 bool llvm::wouldInstructionBeTriviallyDead(Instruction *I,
418                                            const TargetLibraryInfo *TLI) {
419   if (I->isTerminator())
420     return false;
421 
422   // We don't want the landingpad-like instructions removed by anything this
423   // general.
424   if (I->isEHPad())
425     return false;
426 
427   // We don't want debug info removed by anything this general.
428   if (isa<DbgVariableIntrinsic>(I))
429     return false;
430 
431   if (DbgLabelInst *DLI = dyn_cast<DbgLabelInst>(I)) {
432     if (DLI->getLabel())
433       return false;
434     return true;
435   }
436 
437   if (auto *CB = dyn_cast<CallBase>(I))
438     if (isRemovableAlloc(CB, TLI))
439       return true;
440 
441   if (!I->willReturn()) {
442     auto *II = dyn_cast<IntrinsicInst>(I);
443     if (!II)
444       return false;
445 
446     // TODO: These intrinsics are not safe to remove, because this may remove
447     // a well-defined trap.
448     switch (II->getIntrinsicID()) {
449     case Intrinsic::wasm_trunc_signed:
450     case Intrinsic::wasm_trunc_unsigned:
451     case Intrinsic::ptrauth_auth:
452     case Intrinsic::ptrauth_resign:
453       return true;
454     default:
455       return false;
456     }
457   }
458 
459   if (!I->mayHaveSideEffects())
460     return true;
461 
462   // Special case intrinsics that "may have side effects" but can be deleted
463   // when dead.
464   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
465     // Safe to delete llvm.stacksave and launder.invariant.group if dead.
466     if (II->getIntrinsicID() == Intrinsic::stacksave ||
467         II->getIntrinsicID() == Intrinsic::launder_invariant_group)
468       return true;
469 
470     if (II->isLifetimeStartOrEnd()) {
471       auto *Arg = II->getArgOperand(1);
472       // Lifetime intrinsics are dead when their right-hand is undef.
473       if (isa<UndefValue>(Arg))
474         return true;
475       // If the right-hand is an alloc, global, or argument and the only uses
476       // are lifetime intrinsics then the intrinsics are dead.
477       if (isa<AllocaInst>(Arg) || isa<GlobalValue>(Arg) || isa<Argument>(Arg))
478         return llvm::all_of(Arg->uses(), [](Use &Use) {
479           if (IntrinsicInst *IntrinsicUse =
480                   dyn_cast<IntrinsicInst>(Use.getUser()))
481             return IntrinsicUse->isLifetimeStartOrEnd();
482           return false;
483         });
484       return false;
485     }
486 
487     // Assumptions are dead if their condition is trivially true.  Guards on
488     // true are operationally no-ops.  In the future we can consider more
489     // sophisticated tradeoffs for guards considering potential for check
490     // widening, but for now we keep things simple.
491     if ((II->getIntrinsicID() == Intrinsic::assume &&
492          isAssumeWithEmptyBundle(cast<AssumeInst>(*II))) ||
493         II->getIntrinsicID() == Intrinsic::experimental_guard) {
494       if (ConstantInt *Cond = dyn_cast<ConstantInt>(II->getArgOperand(0)))
495         return !Cond->isZero();
496 
497       return false;
498     }
499 
500     if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(I)) {
501       std::optional<fp::ExceptionBehavior> ExBehavior =
502           FPI->getExceptionBehavior();
503       return *ExBehavior != fp::ebStrict;
504     }
505   }
506 
507   if (auto *Call = dyn_cast<CallBase>(I)) {
508     if (Value *FreedOp = getFreedOperand(Call, TLI))
509       if (Constant *C = dyn_cast<Constant>(FreedOp))
510         return C->isNullValue() || isa<UndefValue>(C);
511     if (isMathLibCallNoop(Call, TLI))
512       return true;
513   }
514 
515   // Non-volatile atomic loads from constants can be removed.
516   if (auto *LI = dyn_cast<LoadInst>(I))
517     if (auto *GV = dyn_cast<GlobalVariable>(
518             LI->getPointerOperand()->stripPointerCasts()))
519       if (!LI->isVolatile() && GV->isConstant())
520         return true;
521 
522   return false;
523 }
524 
525 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
526 /// trivially dead instruction, delete it.  If that makes any of its operands
527 /// trivially dead, delete them too, recursively.  Return true if any
528 /// instructions were deleted.
529 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(
530     Value *V, const TargetLibraryInfo *TLI, MemorySSAUpdater *MSSAU,
531     std::function<void(Value *)> AboutToDeleteCallback) {
532   Instruction *I = dyn_cast<Instruction>(V);
533   if (!I || !isInstructionTriviallyDead(I, TLI))
534     return false;
535 
536   SmallVector<WeakTrackingVH, 16> DeadInsts;
537   DeadInsts.push_back(I);
538   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
539                                              AboutToDeleteCallback);
540 
541   return true;
542 }
543 
544 bool llvm::RecursivelyDeleteTriviallyDeadInstructionsPermissive(
545     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
546     MemorySSAUpdater *MSSAU,
547     std::function<void(Value *)> AboutToDeleteCallback) {
548   unsigned S = 0, E = DeadInsts.size(), Alive = 0;
549   for (; S != E; ++S) {
550     auto *I = dyn_cast_or_null<Instruction>(DeadInsts[S]);
551     if (!I || !isInstructionTriviallyDead(I)) {
552       DeadInsts[S] = nullptr;
553       ++Alive;
554     }
555   }
556   if (Alive == E)
557     return false;
558   RecursivelyDeleteTriviallyDeadInstructions(DeadInsts, TLI, MSSAU,
559                                              AboutToDeleteCallback);
560   return true;
561 }
562 
563 void llvm::RecursivelyDeleteTriviallyDeadInstructions(
564     SmallVectorImpl<WeakTrackingVH> &DeadInsts, const TargetLibraryInfo *TLI,
565     MemorySSAUpdater *MSSAU,
566     std::function<void(Value *)> AboutToDeleteCallback) {
567   // Process the dead instruction list until empty.
568   while (!DeadInsts.empty()) {
569     Value *V = DeadInsts.pop_back_val();
570     Instruction *I = cast_or_null<Instruction>(V);
571     if (!I)
572       continue;
573     assert(isInstructionTriviallyDead(I, TLI) &&
574            "Live instruction found in dead worklist!");
575     assert(I->use_empty() && "Instructions with uses are not dead.");
576 
577     // Don't lose the debug info while deleting the instructions.
578     salvageDebugInfo(*I);
579 
580     if (AboutToDeleteCallback)
581       AboutToDeleteCallback(I);
582 
583     // Null out all of the instruction's operands to see if any operand becomes
584     // dead as we go.
585     for (Use &OpU : I->operands()) {
586       Value *OpV = OpU.get();
587       OpU.set(nullptr);
588 
589       if (!OpV->use_empty())
590         continue;
591 
592       // If the operand is an instruction that became dead as we nulled out the
593       // operand, and if it is 'trivially' dead, delete it in a future loop
594       // iteration.
595       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
596         if (isInstructionTriviallyDead(OpI, TLI))
597           DeadInsts.push_back(OpI);
598     }
599     if (MSSAU)
600       MSSAU->removeMemoryAccess(I);
601 
602     I->eraseFromParent();
603   }
604 }
605 
606 bool llvm::replaceDbgUsesWithUndef(Instruction *I) {
607   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
608   findDbgUsers(DbgUsers, I);
609   for (auto *DII : DbgUsers)
610     DII->setKillLocation();
611   return !DbgUsers.empty();
612 }
613 
614 /// areAllUsesEqual - Check whether the uses of a value are all the same.
615 /// This is similar to Instruction::hasOneUse() except this will also return
616 /// true when there are no uses or multiple uses that all refer to the same
617 /// value.
618 static bool areAllUsesEqual(Instruction *I) {
619   Value::user_iterator UI = I->user_begin();
620   Value::user_iterator UE = I->user_end();
621   if (UI == UE)
622     return true;
623 
624   User *TheUse = *UI;
625   for (++UI; UI != UE; ++UI) {
626     if (*UI != TheUse)
627       return false;
628   }
629   return true;
630 }
631 
632 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
633 /// dead PHI node, due to being a def-use chain of single-use nodes that
634 /// either forms a cycle or is terminated by a trivially dead instruction,
635 /// delete it.  If that makes any of its operands trivially dead, delete them
636 /// too, recursively.  Return true if a change was made.
637 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
638                                         const TargetLibraryInfo *TLI,
639                                         llvm::MemorySSAUpdater *MSSAU) {
640   SmallPtrSet<Instruction*, 4> Visited;
641   for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
642        I = cast<Instruction>(*I->user_begin())) {
643     if (I->use_empty())
644       return RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
645 
646     // If we find an instruction more than once, we're on a cycle that
647     // won't prove fruitful.
648     if (!Visited.insert(I).second) {
649       // Break the cycle and delete the instruction and its operands.
650       I->replaceAllUsesWith(PoisonValue::get(I->getType()));
651       (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI, MSSAU);
652       return true;
653     }
654   }
655   return false;
656 }
657 
658 static bool
659 simplifyAndDCEInstruction(Instruction *I,
660                           SmallSetVector<Instruction *, 16> &WorkList,
661                           const DataLayout &DL,
662                           const TargetLibraryInfo *TLI) {
663   if (isInstructionTriviallyDead(I, TLI)) {
664     salvageDebugInfo(*I);
665 
666     // Null out all of the instruction's operands to see if any operand becomes
667     // dead as we go.
668     for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
669       Value *OpV = I->getOperand(i);
670       I->setOperand(i, nullptr);
671 
672       if (!OpV->use_empty() || I == OpV)
673         continue;
674 
675       // If the operand is an instruction that became dead as we nulled out the
676       // operand, and if it is 'trivially' dead, delete it in a future loop
677       // iteration.
678       if (Instruction *OpI = dyn_cast<Instruction>(OpV))
679         if (isInstructionTriviallyDead(OpI, TLI))
680           WorkList.insert(OpI);
681     }
682 
683     I->eraseFromParent();
684 
685     return true;
686   }
687 
688   if (Value *SimpleV = simplifyInstruction(I, DL)) {
689     // Add the users to the worklist. CAREFUL: an instruction can use itself,
690     // in the case of a phi node.
691     for (User *U : I->users()) {
692       if (U != I) {
693         WorkList.insert(cast<Instruction>(U));
694       }
695     }
696 
697     // Replace the instruction with its simplified value.
698     bool Changed = false;
699     if (!I->use_empty()) {
700       I->replaceAllUsesWith(SimpleV);
701       Changed = true;
702     }
703     if (isInstructionTriviallyDead(I, TLI)) {
704       I->eraseFromParent();
705       Changed = true;
706     }
707     return Changed;
708   }
709   return false;
710 }
711 
712 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
713 /// simplify any instructions in it and recursively delete dead instructions.
714 ///
715 /// This returns true if it changed the code, note that it can delete
716 /// instructions in other blocks as well in this block.
717 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB,
718                                        const TargetLibraryInfo *TLI) {
719   bool MadeChange = false;
720   const DataLayout &DL = BB->getModule()->getDataLayout();
721 
722 #ifndef NDEBUG
723   // In debug builds, ensure that the terminator of the block is never replaced
724   // or deleted by these simplifications. The idea of simplification is that it
725   // cannot introduce new instructions, and there is no way to replace the
726   // terminator of a block without introducing a new instruction.
727   AssertingVH<Instruction> TerminatorVH(&BB->back());
728 #endif
729 
730   SmallSetVector<Instruction *, 16> WorkList;
731   // Iterate over the original function, only adding insts to the worklist
732   // if they actually need to be revisited. This avoids having to pre-init
733   // the worklist with the entire function's worth of instructions.
734   for (BasicBlock::iterator BI = BB->begin(), E = std::prev(BB->end());
735        BI != E;) {
736     assert(!BI->isTerminator());
737     Instruction *I = &*BI;
738     ++BI;
739 
740     // We're visiting this instruction now, so make sure it's not in the
741     // worklist from an earlier visit.
742     if (!WorkList.count(I))
743       MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
744   }
745 
746   while (!WorkList.empty()) {
747     Instruction *I = WorkList.pop_back_val();
748     MadeChange |= simplifyAndDCEInstruction(I, WorkList, DL, TLI);
749   }
750   return MadeChange;
751 }
752 
753 //===----------------------------------------------------------------------===//
754 //  Control Flow Graph Restructuring.
755 //
756 
757 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB,
758                                        DomTreeUpdater *DTU) {
759 
760   // If BB has single-entry PHI nodes, fold them.
761   while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
762     Value *NewVal = PN->getIncomingValue(0);
763     // Replace self referencing PHI with poison, it must be dead.
764     if (NewVal == PN) NewVal = PoisonValue::get(PN->getType());
765     PN->replaceAllUsesWith(NewVal);
766     PN->eraseFromParent();
767   }
768 
769   BasicBlock *PredBB = DestBB->getSinglePredecessor();
770   assert(PredBB && "Block doesn't have a single predecessor!");
771 
772   bool ReplaceEntryBB = PredBB->isEntryBlock();
773 
774   // DTU updates: Collect all the edges that enter
775   // PredBB. These dominator edges will be redirected to DestBB.
776   SmallVector<DominatorTree::UpdateType, 32> Updates;
777 
778   if (DTU) {
779     // To avoid processing the same predecessor more than once.
780     SmallPtrSet<BasicBlock *, 2> SeenPreds;
781     Updates.reserve(Updates.size() + 2 * pred_size(PredBB) + 1);
782     for (BasicBlock *PredOfPredBB : predecessors(PredBB))
783       // This predecessor of PredBB may already have DestBB as a successor.
784       if (PredOfPredBB != PredBB)
785         if (SeenPreds.insert(PredOfPredBB).second)
786           Updates.push_back({DominatorTree::Insert, PredOfPredBB, DestBB});
787     SeenPreds.clear();
788     for (BasicBlock *PredOfPredBB : predecessors(PredBB))
789       if (SeenPreds.insert(PredOfPredBB).second)
790         Updates.push_back({DominatorTree::Delete, PredOfPredBB, PredBB});
791     Updates.push_back({DominatorTree::Delete, PredBB, DestBB});
792   }
793 
794   // Zap anything that took the address of DestBB.  Not doing this will give the
795   // address an invalid value.
796   if (DestBB->hasAddressTaken()) {
797     BlockAddress *BA = BlockAddress::get(DestBB);
798     Constant *Replacement =
799       ConstantInt::get(Type::getInt32Ty(BA->getContext()), 1);
800     BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
801                                                      BA->getType()));
802     BA->destroyConstant();
803   }
804 
805   // Anything that branched to PredBB now branches to DestBB.
806   PredBB->replaceAllUsesWith(DestBB);
807 
808   // Splice all the instructions from PredBB to DestBB.
809   PredBB->getTerminator()->eraseFromParent();
810   DestBB->splice(DestBB->begin(), PredBB);
811   new UnreachableInst(PredBB->getContext(), PredBB);
812 
813   // If the PredBB is the entry block of the function, move DestBB up to
814   // become the entry block after we erase PredBB.
815   if (ReplaceEntryBB)
816     DestBB->moveAfter(PredBB);
817 
818   if (DTU) {
819     assert(PredBB->size() == 1 &&
820            isa<UnreachableInst>(PredBB->getTerminator()) &&
821            "The successor list of PredBB isn't empty before "
822            "applying corresponding DTU updates.");
823     DTU->applyUpdatesPermissive(Updates);
824     DTU->deleteBB(PredBB);
825     // Recalculation of DomTree is needed when updating a forward DomTree and
826     // the Entry BB is replaced.
827     if (ReplaceEntryBB && DTU->hasDomTree()) {
828       // The entry block was removed and there is no external interface for
829       // the dominator tree to be notified of this change. In this corner-case
830       // we recalculate the entire tree.
831       DTU->recalculate(*(DestBB->getParent()));
832     }
833   }
834 
835   else {
836     PredBB->eraseFromParent(); // Nuke BB if DTU is nullptr.
837   }
838 }
839 
840 /// Return true if we can choose one of these values to use in place of the
841 /// other. Note that we will always choose the non-undef value to keep.
842 static bool CanMergeValues(Value *First, Value *Second) {
843   return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second);
844 }
845 
846 /// Return true if we can fold BB, an almost-empty BB ending in an unconditional
847 /// branch to Succ, into Succ.
848 ///
849 /// Assumption: Succ is the single successor for BB.
850 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) {
851   assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!");
852 
853   LLVM_DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into "
854                     << Succ->getName() << "\n");
855   // Shortcut, if there is only a single predecessor it must be BB and merging
856   // is always safe
857   if (Succ->getSinglePredecessor()) return true;
858 
859   // Make a list of the predecessors of BB
860   SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB));
861 
862   // Look at all the phi nodes in Succ, to see if they present a conflict when
863   // merging these blocks
864   for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
865     PHINode *PN = cast<PHINode>(I);
866 
867     // If the incoming value from BB is again a PHINode in
868     // BB which has the same incoming value for *PI as PN does, we can
869     // merge the phi nodes and then the blocks can still be merged
870     PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB));
871     if (BBPN && BBPN->getParent() == BB) {
872       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
873         BasicBlock *IBB = PN->getIncomingBlock(PI);
874         if (BBPreds.count(IBB) &&
875             !CanMergeValues(BBPN->getIncomingValueForBlock(IBB),
876                             PN->getIncomingValue(PI))) {
877           LLVM_DEBUG(dbgs()
878                      << "Can't fold, phi node " << PN->getName() << " in "
879                      << Succ->getName() << " is conflicting with "
880                      << BBPN->getName() << " with regard to common predecessor "
881                      << IBB->getName() << "\n");
882           return false;
883         }
884       }
885     } else {
886       Value* Val = PN->getIncomingValueForBlock(BB);
887       for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) {
888         // See if the incoming value for the common predecessor is equal to the
889         // one for BB, in which case this phi node will not prevent the merging
890         // of the block.
891         BasicBlock *IBB = PN->getIncomingBlock(PI);
892         if (BBPreds.count(IBB) &&
893             !CanMergeValues(Val, PN->getIncomingValue(PI))) {
894           LLVM_DEBUG(dbgs() << "Can't fold, phi node " << PN->getName()
895                             << " in " << Succ->getName()
896                             << " is conflicting with regard to common "
897                             << "predecessor " << IBB->getName() << "\n");
898           return false;
899         }
900       }
901     }
902   }
903 
904   return true;
905 }
906 
907 using PredBlockVector = SmallVector<BasicBlock *, 16>;
908 using IncomingValueMap = DenseMap<BasicBlock *, Value *>;
909 
910 /// Determines the value to use as the phi node input for a block.
911 ///
912 /// Select between \p OldVal any value that we know flows from \p BB
913 /// to a particular phi on the basis of which one (if either) is not
914 /// undef. Update IncomingValues based on the selected value.
915 ///
916 /// \param OldVal The value we are considering selecting.
917 /// \param BB The block that the value flows in from.
918 /// \param IncomingValues A map from block-to-value for other phi inputs
919 /// that we have examined.
920 ///
921 /// \returns the selected value.
922 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB,
923                                           IncomingValueMap &IncomingValues) {
924   if (!isa<UndefValue>(OldVal)) {
925     assert((!IncomingValues.count(BB) ||
926             IncomingValues.find(BB)->second == OldVal) &&
927            "Expected OldVal to match incoming value from BB!");
928 
929     IncomingValues.insert(std::make_pair(BB, OldVal));
930     return OldVal;
931   }
932 
933   IncomingValueMap::const_iterator It = IncomingValues.find(BB);
934   if (It != IncomingValues.end()) return It->second;
935 
936   return OldVal;
937 }
938 
939 /// Create a map from block to value for the operands of a
940 /// given phi.
941 ///
942 /// Create a map from block to value for each non-undef value flowing
943 /// into \p PN.
944 ///
945 /// \param PN The phi we are collecting the map for.
946 /// \param IncomingValues [out] The map from block to value for this phi.
947 static void gatherIncomingValuesToPhi(PHINode *PN,
948                                       IncomingValueMap &IncomingValues) {
949   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
950     BasicBlock *BB = PN->getIncomingBlock(i);
951     Value *V = PN->getIncomingValue(i);
952 
953     if (!isa<UndefValue>(V))
954       IncomingValues.insert(std::make_pair(BB, V));
955   }
956 }
957 
958 /// Replace the incoming undef values to a phi with the values
959 /// from a block-to-value map.
960 ///
961 /// \param PN The phi we are replacing the undefs in.
962 /// \param IncomingValues A map from block to value.
963 static void replaceUndefValuesInPhi(PHINode *PN,
964                                     const IncomingValueMap &IncomingValues) {
965   SmallVector<unsigned> TrueUndefOps;
966   for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
967     Value *V = PN->getIncomingValue(i);
968 
969     if (!isa<UndefValue>(V)) continue;
970 
971     BasicBlock *BB = PN->getIncomingBlock(i);
972     IncomingValueMap::const_iterator It = IncomingValues.find(BB);
973 
974     // Keep track of undef/poison incoming values. Those must match, so we fix
975     // them up below if needed.
976     // Note: this is conservatively correct, but we could try harder and group
977     // the undef values per incoming basic block.
978     if (It == IncomingValues.end()) {
979       TrueUndefOps.push_back(i);
980       continue;
981     }
982 
983     // There is a defined value for this incoming block, so map this undef
984     // incoming value to the defined value.
985     PN->setIncomingValue(i, It->second);
986   }
987 
988   // If there are both undef and poison values incoming, then convert those
989   // values to undef. It is invalid to have different values for the same
990   // incoming block.
991   unsigned PoisonCount = count_if(TrueUndefOps, [&](unsigned i) {
992     return isa<PoisonValue>(PN->getIncomingValue(i));
993   });
994   if (PoisonCount != 0 && PoisonCount != TrueUndefOps.size()) {
995     for (unsigned i : TrueUndefOps)
996       PN->setIncomingValue(i, UndefValue::get(PN->getType()));
997   }
998 }
999 
1000 /// Replace a value flowing from a block to a phi with
1001 /// potentially multiple instances of that value flowing from the
1002 /// block's predecessors to the phi.
1003 ///
1004 /// \param BB The block with the value flowing into the phi.
1005 /// \param BBPreds The predecessors of BB.
1006 /// \param PN The phi that we are updating.
1007 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB,
1008                                                 const PredBlockVector &BBPreds,
1009                                                 PHINode *PN) {
1010   Value *OldVal = PN->removeIncomingValue(BB, false);
1011   assert(OldVal && "No entry in PHI for Pred BB!");
1012 
1013   IncomingValueMap IncomingValues;
1014 
1015   // We are merging two blocks - BB, and the block containing PN - and
1016   // as a result we need to redirect edges from the predecessors of BB
1017   // to go to the block containing PN, and update PN
1018   // accordingly. Since we allow merging blocks in the case where the
1019   // predecessor and successor blocks both share some predecessors,
1020   // and where some of those common predecessors might have undef
1021   // values flowing into PN, we want to rewrite those values to be
1022   // consistent with the non-undef values.
1023 
1024   gatherIncomingValuesToPhi(PN, IncomingValues);
1025 
1026   // If this incoming value is one of the PHI nodes in BB, the new entries
1027   // in the PHI node are the entries from the old PHI.
1028   if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) {
1029     PHINode *OldValPN = cast<PHINode>(OldVal);
1030     for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) {
1031       // Note that, since we are merging phi nodes and BB and Succ might
1032       // have common predecessors, we could end up with a phi node with
1033       // identical incoming branches. This will be cleaned up later (and
1034       // will trigger asserts if we try to clean it up now, without also
1035       // simplifying the corresponding conditional branch).
1036       BasicBlock *PredBB = OldValPN->getIncomingBlock(i);
1037       Value *PredVal = OldValPN->getIncomingValue(i);
1038       Value *Selected = selectIncomingValueForBlock(PredVal, PredBB,
1039                                                     IncomingValues);
1040 
1041       // And add a new incoming value for this predecessor for the
1042       // newly retargeted branch.
1043       PN->addIncoming(Selected, PredBB);
1044     }
1045   } else {
1046     for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) {
1047       // Update existing incoming values in PN for this
1048       // predecessor of BB.
1049       BasicBlock *PredBB = BBPreds[i];
1050       Value *Selected = selectIncomingValueForBlock(OldVal, PredBB,
1051                                                     IncomingValues);
1052 
1053       // And add a new incoming value for this predecessor for the
1054       // newly retargeted branch.
1055       PN->addIncoming(Selected, PredBB);
1056     }
1057   }
1058 
1059   replaceUndefValuesInPhi(PN, IncomingValues);
1060 }
1061 
1062 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
1063                                                    DomTreeUpdater *DTU) {
1064   assert(BB != &BB->getParent()->getEntryBlock() &&
1065          "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!");
1066 
1067   // We can't eliminate infinite loops.
1068   BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0);
1069   if (BB == Succ) return false;
1070 
1071   // Check to see if merging these blocks would cause conflicts for any of the
1072   // phi nodes in BB or Succ. If not, we can safely merge.
1073   if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false;
1074 
1075   // Check for cases where Succ has multiple predecessors and a PHI node in BB
1076   // has uses which will not disappear when the PHI nodes are merged.  It is
1077   // possible to handle such cases, but difficult: it requires checking whether
1078   // BB dominates Succ, which is non-trivial to calculate in the case where
1079   // Succ has multiple predecessors.  Also, it requires checking whether
1080   // constructing the necessary self-referential PHI node doesn't introduce any
1081   // conflicts; this isn't too difficult, but the previous code for doing this
1082   // was incorrect.
1083   //
1084   // Note that if this check finds a live use, BB dominates Succ, so BB is
1085   // something like a loop pre-header (or rarely, a part of an irreducible CFG);
1086   // folding the branch isn't profitable in that case anyway.
1087   if (!Succ->getSinglePredecessor()) {
1088     BasicBlock::iterator BBI = BB->begin();
1089     while (isa<PHINode>(*BBI)) {
1090       for (Use &U : BBI->uses()) {
1091         if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
1092           if (PN->getIncomingBlock(U) != BB)
1093             return false;
1094         } else {
1095           return false;
1096         }
1097       }
1098       ++BBI;
1099     }
1100   }
1101 
1102   // 'BB' and 'BB->Pred' are loop latches, bail out to presrve inner loop
1103   // metadata.
1104   //
1105   // FIXME: This is a stop-gap solution to preserve inner-loop metadata given
1106   // current status (that loop metadata is implemented as metadata attached to
1107   // the branch instruction in the loop latch block). To quote from review
1108   // comments, "the current representation of loop metadata (using a loop latch
1109   // terminator attachment) is known to be fundamentally broken. Loop latches
1110   // are not uniquely associated with loops (both in that a latch can be part of
1111   // multiple loops and a loop may have multiple latches). Loop headers are. The
1112   // solution to this problem is also known: Add support for basic block
1113   // metadata, and attach loop metadata to the loop header."
1114   //
1115   // Why bail out:
1116   // In this case, we expect 'BB' is the latch for outer-loop and 'BB->Pred' is
1117   // the latch for inner-loop (see reason below), so bail out to prerserve
1118   // inner-loop metadata rather than eliminating 'BB' and attaching its metadata
1119   // to this inner-loop.
1120   // - The reason we believe 'BB' and 'BB->Pred' have different inner-most
1121   // loops: assuming 'BB' and 'BB->Pred' are from the same inner-most loop L,
1122   // then 'BB' is the header and latch of 'L' and thereby 'L' must consist of
1123   // one self-looping basic block, which is contradictory with the assumption.
1124   //
1125   // To illustrate how inner-loop metadata is dropped:
1126   //
1127   // CFG Before
1128   //
1129   // BB is while.cond.exit, attached with loop metdata md2.
1130   // BB->Pred is for.body, attached with loop metadata md1.
1131   //
1132   //      entry
1133   //        |
1134   //        v
1135   // ---> while.cond   ------------->  while.end
1136   // |       |
1137   // |       v
1138   // |   while.body
1139   // |       |
1140   // |       v
1141   // |    for.body <---- (md1)
1142   // |       |  |______|
1143   // |       v
1144   // |    while.cond.exit (md2)
1145   // |       |
1146   // |_______|
1147   //
1148   // CFG After
1149   //
1150   // while.cond1 is the merge of while.cond.exit and while.cond above.
1151   // for.body is attached with md2, and md1 is dropped.
1152   // If LoopSimplify runs later (as a part of loop pass), it could create
1153   // dedicated exits for inner-loop (essentially adding `while.cond.exit`
1154   // back), but won't it won't see 'md1' nor restore it for the inner-loop.
1155   //
1156   //       entry
1157   //         |
1158   //         v
1159   // ---> while.cond1  ------------->  while.end
1160   // |       |
1161   // |       v
1162   // |   while.body
1163   // |       |
1164   // |       v
1165   // |    for.body <---- (md2)
1166   // |_______|  |______|
1167   if (Instruction *TI = BB->getTerminator())
1168     if (TI->hasMetadata(LLVMContext::MD_loop))
1169       for (BasicBlock *Pred : predecessors(BB))
1170         if (Instruction *PredTI = Pred->getTerminator())
1171           if (PredTI->hasMetadata(LLVMContext::MD_loop))
1172             return false;
1173 
1174   LLVM_DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB);
1175 
1176   SmallVector<DominatorTree::UpdateType, 32> Updates;
1177   if (DTU) {
1178     // To avoid processing the same predecessor more than once.
1179     SmallPtrSet<BasicBlock *, 8> SeenPreds;
1180     // All predecessors of BB will be moved to Succ.
1181     SmallPtrSet<BasicBlock *, 8> PredsOfSucc(pred_begin(Succ), pred_end(Succ));
1182     Updates.reserve(Updates.size() + 2 * pred_size(BB) + 1);
1183     for (auto *PredOfBB : predecessors(BB))
1184       // This predecessor of BB may already have Succ as a successor.
1185       if (!PredsOfSucc.contains(PredOfBB))
1186         if (SeenPreds.insert(PredOfBB).second)
1187           Updates.push_back({DominatorTree::Insert, PredOfBB, Succ});
1188     SeenPreds.clear();
1189     for (auto *PredOfBB : predecessors(BB))
1190       if (SeenPreds.insert(PredOfBB).second)
1191         Updates.push_back({DominatorTree::Delete, PredOfBB, BB});
1192     Updates.push_back({DominatorTree::Delete, BB, Succ});
1193   }
1194 
1195   if (isa<PHINode>(Succ->begin())) {
1196     // If there is more than one pred of succ, and there are PHI nodes in
1197     // the successor, then we need to add incoming edges for the PHI nodes
1198     //
1199     const PredBlockVector BBPreds(predecessors(BB));
1200 
1201     // Loop over all of the PHI nodes in the successor of BB.
1202     for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) {
1203       PHINode *PN = cast<PHINode>(I);
1204 
1205       redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN);
1206     }
1207   }
1208 
1209   if (Succ->getSinglePredecessor()) {
1210     // BB is the only predecessor of Succ, so Succ will end up with exactly
1211     // the same predecessors BB had.
1212 
1213     // Copy over any phi, debug or lifetime instruction.
1214     BB->getTerminator()->eraseFromParent();
1215     Succ->splice(Succ->getFirstNonPHI()->getIterator(), BB);
1216   } else {
1217     while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) {
1218       // We explicitly check for such uses in CanPropagatePredecessorsForPHIs.
1219       assert(PN->use_empty() && "There shouldn't be any uses here!");
1220       PN->eraseFromParent();
1221     }
1222   }
1223 
1224   // If the unconditional branch we replaced contains llvm.loop metadata, we
1225   // add the metadata to the branch instructions in the predecessors.
1226   if (Instruction *TI = BB->getTerminator())
1227     if (MDNode *LoopMD = TI->getMetadata(LLVMContext::MD_loop))
1228       for (BasicBlock *Pred : predecessors(BB))
1229         Pred->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopMD);
1230 
1231   // Everything that jumped to BB now goes to Succ.
1232   BB->replaceAllUsesWith(Succ);
1233   if (!Succ->hasName()) Succ->takeName(BB);
1234 
1235   // Clear the successor list of BB to match updates applying to DTU later.
1236   if (BB->getTerminator())
1237     BB->back().eraseFromParent();
1238   new UnreachableInst(BB->getContext(), BB);
1239   assert(succ_empty(BB) && "The successor list of BB isn't empty before "
1240                            "applying corresponding DTU updates.");
1241 
1242   if (DTU)
1243     DTU->applyUpdates(Updates);
1244 
1245   DeleteDeadBlock(BB, DTU);
1246 
1247   return true;
1248 }
1249 
1250 static bool
1251 EliminateDuplicatePHINodesNaiveImpl(BasicBlock *BB,
1252                                     SmallPtrSetImpl<PHINode *> &ToRemove) {
1253   // This implementation doesn't currently consider undef operands
1254   // specially. Theoretically, two phis which are identical except for
1255   // one having an undef where the other doesn't could be collapsed.
1256 
1257   bool Changed = false;
1258 
1259   // Examine each PHI.
1260   // Note that increment of I must *NOT* be in the iteration_expression, since
1261   // we don't want to immediately advance when we restart from the beginning.
1262   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I);) {
1263     ++I;
1264     // Is there an identical PHI node in this basic block?
1265     // Note that we only look in the upper square's triangle,
1266     // we already checked that the lower triangle PHI's aren't identical.
1267     for (auto J = I; PHINode *DuplicatePN = dyn_cast<PHINode>(J); ++J) {
1268       if (ToRemove.contains(DuplicatePN))
1269         continue;
1270       if (!DuplicatePN->isIdenticalToWhenDefined(PN))
1271         continue;
1272       // A duplicate. Replace this PHI with the base PHI.
1273       ++NumPHICSEs;
1274       DuplicatePN->replaceAllUsesWith(PN);
1275       ToRemove.insert(DuplicatePN);
1276       Changed = true;
1277 
1278       // The RAUW can change PHIs that we already visited.
1279       I = BB->begin();
1280       break; // Start over from the beginning.
1281     }
1282   }
1283   return Changed;
1284 }
1285 
1286 static bool
1287 EliminateDuplicatePHINodesSetBasedImpl(BasicBlock *BB,
1288                                        SmallPtrSetImpl<PHINode *> &ToRemove) {
1289   // This implementation doesn't currently consider undef operands
1290   // specially. Theoretically, two phis which are identical except for
1291   // one having an undef where the other doesn't could be collapsed.
1292 
1293   struct PHIDenseMapInfo {
1294     static PHINode *getEmptyKey() {
1295       return DenseMapInfo<PHINode *>::getEmptyKey();
1296     }
1297 
1298     static PHINode *getTombstoneKey() {
1299       return DenseMapInfo<PHINode *>::getTombstoneKey();
1300     }
1301 
1302     static bool isSentinel(PHINode *PN) {
1303       return PN == getEmptyKey() || PN == getTombstoneKey();
1304     }
1305 
1306     // WARNING: this logic must be kept in sync with
1307     //          Instruction::isIdenticalToWhenDefined()!
1308     static unsigned getHashValueImpl(PHINode *PN) {
1309       // Compute a hash value on the operands. Instcombine will likely have
1310       // sorted them, which helps expose duplicates, but we have to check all
1311       // the operands to be safe in case instcombine hasn't run.
1312       return static_cast<unsigned>(hash_combine(
1313           hash_combine_range(PN->value_op_begin(), PN->value_op_end()),
1314           hash_combine_range(PN->block_begin(), PN->block_end())));
1315     }
1316 
1317     static unsigned getHashValue(PHINode *PN) {
1318 #ifndef NDEBUG
1319       // If -phicse-debug-hash was specified, return a constant -- this
1320       // will force all hashing to collide, so we'll exhaustively search
1321       // the table for a match, and the assertion in isEqual will fire if
1322       // there's a bug causing equal keys to hash differently.
1323       if (PHICSEDebugHash)
1324         return 0;
1325 #endif
1326       return getHashValueImpl(PN);
1327     }
1328 
1329     static bool isEqualImpl(PHINode *LHS, PHINode *RHS) {
1330       if (isSentinel(LHS) || isSentinel(RHS))
1331         return LHS == RHS;
1332       return LHS->isIdenticalTo(RHS);
1333     }
1334 
1335     static bool isEqual(PHINode *LHS, PHINode *RHS) {
1336       // These comparisons are nontrivial, so assert that equality implies
1337       // hash equality (DenseMap demands this as an invariant).
1338       bool Result = isEqualImpl(LHS, RHS);
1339       assert(!Result || (isSentinel(LHS) && LHS == RHS) ||
1340              getHashValueImpl(LHS) == getHashValueImpl(RHS));
1341       return Result;
1342     }
1343   };
1344 
1345   // Set of unique PHINodes.
1346   DenseSet<PHINode *, PHIDenseMapInfo> PHISet;
1347   PHISet.reserve(4 * PHICSENumPHISmallSize);
1348 
1349   // Examine each PHI.
1350   bool Changed = false;
1351   for (auto I = BB->begin(); PHINode *PN = dyn_cast<PHINode>(I++);) {
1352     if (ToRemove.contains(PN))
1353       continue;
1354     auto Inserted = PHISet.insert(PN);
1355     if (!Inserted.second) {
1356       // A duplicate. Replace this PHI with its duplicate.
1357       ++NumPHICSEs;
1358       PN->replaceAllUsesWith(*Inserted.first);
1359       ToRemove.insert(PN);
1360       Changed = true;
1361 
1362       // The RAUW can change PHIs that we already visited. Start over from the
1363       // beginning.
1364       PHISet.clear();
1365       I = BB->begin();
1366     }
1367   }
1368 
1369   return Changed;
1370 }
1371 
1372 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB,
1373                                       SmallPtrSetImpl<PHINode *> &ToRemove) {
1374   if (
1375 #ifndef NDEBUG
1376       !PHICSEDebugHash &&
1377 #endif
1378       hasNItemsOrLess(BB->phis(), PHICSENumPHISmallSize))
1379     return EliminateDuplicatePHINodesNaiveImpl(BB, ToRemove);
1380   return EliminateDuplicatePHINodesSetBasedImpl(BB, ToRemove);
1381 }
1382 
1383 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) {
1384   SmallPtrSet<PHINode *, 8> ToRemove;
1385   bool Changed = EliminateDuplicatePHINodes(BB, ToRemove);
1386   for (PHINode *PN : ToRemove)
1387     PN->eraseFromParent();
1388   return Changed;
1389 }
1390 
1391 /// If the specified pointer points to an object that we control, try to modify
1392 /// the object's alignment to PrefAlign. Returns a minimum known alignment of
1393 /// the value after the operation, which may be lower than PrefAlign.
1394 ///
1395 /// Increating value alignment isn't often possible though. If alignment is
1396 /// important, a more reliable approach is to simply align all global variables
1397 /// and allocation instructions to their preferred alignment from the beginning.
1398 static Align tryEnforceAlignment(Value *V, Align PrefAlign,
1399                                  const DataLayout &DL) {
1400   V = V->stripPointerCasts();
1401 
1402   if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1403     // TODO: Ideally, this function would not be called if PrefAlign is smaller
1404     // than the current alignment, as the known bits calculation should have
1405     // already taken it into account. However, this is not always the case,
1406     // as computeKnownBits() has a depth limit, while stripPointerCasts()
1407     // doesn't.
1408     Align CurrentAlign = AI->getAlign();
1409     if (PrefAlign <= CurrentAlign)
1410       return CurrentAlign;
1411 
1412     // If the preferred alignment is greater than the natural stack alignment
1413     // then don't round up. This avoids dynamic stack realignment.
1414     if (DL.exceedsNaturalStackAlignment(PrefAlign))
1415       return CurrentAlign;
1416     AI->setAlignment(PrefAlign);
1417     return PrefAlign;
1418   }
1419 
1420   if (auto *GO = dyn_cast<GlobalObject>(V)) {
1421     // TODO: as above, this shouldn't be necessary.
1422     Align CurrentAlign = GO->getPointerAlignment(DL);
1423     if (PrefAlign <= CurrentAlign)
1424       return CurrentAlign;
1425 
1426     // If there is a large requested alignment and we can, bump up the alignment
1427     // of the global.  If the memory we set aside for the global may not be the
1428     // memory used by the final program then it is impossible for us to reliably
1429     // enforce the preferred alignment.
1430     if (!GO->canIncreaseAlignment())
1431       return CurrentAlign;
1432 
1433     if (GO->isThreadLocal()) {
1434       unsigned MaxTLSAlign = GO->getParent()->getMaxTLSAlignment() / CHAR_BIT;
1435       if (MaxTLSAlign && PrefAlign > Align(MaxTLSAlign))
1436         PrefAlign = Align(MaxTLSAlign);
1437     }
1438 
1439     GO->setAlignment(PrefAlign);
1440     return PrefAlign;
1441   }
1442 
1443   return Align(1);
1444 }
1445 
1446 Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
1447                                        const DataLayout &DL,
1448                                        const Instruction *CxtI,
1449                                        AssumptionCache *AC,
1450                                        const DominatorTree *DT) {
1451   assert(V->getType()->isPointerTy() &&
1452          "getOrEnforceKnownAlignment expects a pointer!");
1453 
1454   KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
1455   unsigned TrailZ = Known.countMinTrailingZeros();
1456 
1457   // Avoid trouble with ridiculously large TrailZ values, such as
1458   // those computed from a null pointer.
1459   // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent).
1460   TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent);
1461 
1462   Align Alignment = Align(1ull << std::min(Known.getBitWidth() - 1, TrailZ));
1463 
1464   if (PrefAlign && *PrefAlign > Alignment)
1465     Alignment = std::max(Alignment, tryEnforceAlignment(V, *PrefAlign, DL));
1466 
1467   // We don't need to make any adjustment.
1468   return Alignment;
1469 }
1470 
1471 ///===---------------------------------------------------------------------===//
1472 ///  Dbg Intrinsic utilities
1473 ///
1474 
1475 /// See if there is a dbg.value intrinsic for DIVar for the PHI node.
1476 static bool PhiHasDebugValue(DILocalVariable *DIVar,
1477                              DIExpression *DIExpr,
1478                              PHINode *APN) {
1479   // Since we can't guarantee that the original dbg.declare intrinsic
1480   // is removed by LowerDbgDeclare(), we need to make sure that we are
1481   // not inserting the same dbg.value intrinsic over and over.
1482   SmallVector<DbgValueInst *, 1> DbgValues;
1483   findDbgValues(DbgValues, APN);
1484   for (auto *DVI : DbgValues) {
1485     assert(is_contained(DVI->getValues(), APN));
1486     if ((DVI->getVariable() == DIVar) && (DVI->getExpression() == DIExpr))
1487       return true;
1488   }
1489   return false;
1490 }
1491 
1492 /// Check if the alloc size of \p ValTy is large enough to cover the variable
1493 /// (or fragment of the variable) described by \p DII.
1494 ///
1495 /// This is primarily intended as a helper for the different
1496 /// ConvertDebugDeclareToDebugValue functions. The dbg.declare that is converted
1497 /// describes an alloca'd variable, so we need to use the alloc size of the
1498 /// value when doing the comparison. E.g. an i1 value will be identified as
1499 /// covering an n-bit fragment, if the store size of i1 is at least n bits.
1500 static bool valueCoversEntireFragment(Type *ValTy, DbgVariableIntrinsic *DII) {
1501   const DataLayout &DL = DII->getModule()->getDataLayout();
1502   TypeSize ValueSize = DL.getTypeAllocSizeInBits(ValTy);
1503   if (std::optional<uint64_t> FragmentSize = DII->getFragmentSizeInBits())
1504     return TypeSize::isKnownGE(ValueSize, TypeSize::getFixed(*FragmentSize));
1505 
1506   // We can't always calculate the size of the DI variable (e.g. if it is a
1507   // VLA). Try to use the size of the alloca that the dbg intrinsic describes
1508   // intead.
1509   if (DII->isAddressOfVariable()) {
1510     // DII should have exactly 1 location when it is an address.
1511     assert(DII->getNumVariableLocationOps() == 1 &&
1512            "address of variable must have exactly 1 location operand.");
1513     if (auto *AI =
1514             dyn_cast_or_null<AllocaInst>(DII->getVariableLocationOp(0))) {
1515       if (std::optional<TypeSize> FragmentSize =
1516               AI->getAllocationSizeInBits(DL)) {
1517         return TypeSize::isKnownGE(ValueSize, *FragmentSize);
1518       }
1519     }
1520   }
1521   // Could not determine size of variable. Conservatively return false.
1522   return false;
1523 }
1524 
1525 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
1526 /// that has an associated llvm.dbg.declare intrinsic.
1527 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1528                                            StoreInst *SI, DIBuilder &Builder) {
1529   assert(DII->isAddressOfVariable() || isa<DbgAssignIntrinsic>(DII));
1530   auto *DIVar = DII->getVariable();
1531   assert(DIVar && "Missing variable");
1532   auto *DIExpr = DII->getExpression();
1533   Value *DV = SI->getValueOperand();
1534 
1535   DebugLoc NewLoc = getDebugValueLoc(DII);
1536 
1537   // If the alloca describes the variable itself, i.e. the expression in the
1538   // dbg.declare doesn't start with a dereference, we can perform the
1539   // conversion if the value covers the entire fragment of DII.
1540   // If the alloca describes the *address* of DIVar, i.e. DIExpr is
1541   // *just* a DW_OP_deref, we use DV as is for the dbg.value.
1542   // We conservatively ignore other dereferences, because the following two are
1543   // not equivalent:
1544   //     dbg.declare(alloca, ..., !Expr(deref, plus_uconstant, 2))
1545   //     dbg.value(DV, ..., !Expr(deref, plus_uconstant, 2))
1546   // The former is adding 2 to the address of the variable, whereas the latter
1547   // is adding 2 to the value of the variable. As such, we insist on just a
1548   // deref expression.
1549   bool CanConvert =
1550       DIExpr->isDeref() || (!DIExpr->startsWithDeref() &&
1551                             valueCoversEntireFragment(DV->getType(), DII));
1552   if (CanConvert) {
1553     Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1554     return;
1555   }
1556 
1557   // FIXME: If storing to a part of the variable described by the dbg.declare,
1558   // then we want to insert a dbg.value for the corresponding fragment.
1559   LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: " << *DII
1560                     << '\n');
1561   // For now, when there is a store to parts of the variable (but we do not
1562   // know which part) we insert an dbg.value intrinsic to indicate that we
1563   // know nothing about the variable's content.
1564   DV = UndefValue::get(DV->getType());
1565   Builder.insertDbgValueIntrinsic(DV, DIVar, DIExpr, NewLoc, SI);
1566 }
1567 
1568 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
1569 /// that has an associated llvm.dbg.declare intrinsic.
1570 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1571                                            LoadInst *LI, DIBuilder &Builder) {
1572   auto *DIVar = DII->getVariable();
1573   auto *DIExpr = DII->getExpression();
1574   assert(DIVar && "Missing variable");
1575 
1576   if (!valueCoversEntireFragment(LI->getType(), DII)) {
1577     // FIXME: If only referring to a part of the variable described by the
1578     // dbg.declare, then we want to insert a dbg.value for the corresponding
1579     // fragment.
1580     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1581                       << *DII << '\n');
1582     return;
1583   }
1584 
1585   DebugLoc NewLoc = getDebugValueLoc(DII);
1586 
1587   // We are now tracking the loaded value instead of the address. In the
1588   // future if multi-location support is added to the IR, it might be
1589   // preferable to keep tracking both the loaded value and the original
1590   // address in case the alloca can not be elided.
1591   Instruction *DbgValue = Builder.insertDbgValueIntrinsic(
1592       LI, DIVar, DIExpr, NewLoc, (Instruction *)nullptr);
1593   DbgValue->insertAfter(LI);
1594 }
1595 
1596 /// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
1597 /// llvm.dbg.declare intrinsic.
1598 void llvm::ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
1599                                            PHINode *APN, DIBuilder &Builder) {
1600   auto *DIVar = DII->getVariable();
1601   auto *DIExpr = DII->getExpression();
1602   assert(DIVar && "Missing variable");
1603 
1604   if (PhiHasDebugValue(DIVar, DIExpr, APN))
1605     return;
1606 
1607   if (!valueCoversEntireFragment(APN->getType(), DII)) {
1608     // FIXME: If only referring to a part of the variable described by the
1609     // dbg.declare, then we want to insert a dbg.value for the corresponding
1610     // fragment.
1611     LLVM_DEBUG(dbgs() << "Failed to convert dbg.declare to dbg.value: "
1612                       << *DII << '\n');
1613     return;
1614   }
1615 
1616   BasicBlock *BB = APN->getParent();
1617   auto InsertionPt = BB->getFirstInsertionPt();
1618 
1619   DebugLoc NewLoc = getDebugValueLoc(DII);
1620 
1621   // The block may be a catchswitch block, which does not have a valid
1622   // insertion point.
1623   // FIXME: Insert dbg.value markers in the successors when appropriate.
1624   if (InsertionPt != BB->end())
1625     Builder.insertDbgValueIntrinsic(APN, DIVar, DIExpr, NewLoc, &*InsertionPt);
1626 }
1627 
1628 /// Determine whether this alloca is either a VLA or an array.
1629 static bool isArray(AllocaInst *AI) {
1630   return AI->isArrayAllocation() ||
1631          (AI->getAllocatedType() && AI->getAllocatedType()->isArrayTy());
1632 }
1633 
1634 /// Determine whether this alloca is a structure.
1635 static bool isStructure(AllocaInst *AI) {
1636   return AI->getAllocatedType() && AI->getAllocatedType()->isStructTy();
1637 }
1638 
1639 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
1640 /// of llvm.dbg.value intrinsics.
1641 bool llvm::LowerDbgDeclare(Function &F) {
1642   bool Changed = false;
1643   DIBuilder DIB(*F.getParent(), /*AllowUnresolved*/ false);
1644   SmallVector<DbgDeclareInst *, 4> Dbgs;
1645   for (auto &FI : F)
1646     for (Instruction &BI : FI)
1647       if (auto DDI = dyn_cast<DbgDeclareInst>(&BI))
1648         Dbgs.push_back(DDI);
1649 
1650   if (Dbgs.empty())
1651     return Changed;
1652 
1653   for (auto &I : Dbgs) {
1654     DbgDeclareInst *DDI = I;
1655     AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress());
1656     // If this is an alloca for a scalar variable, insert a dbg.value
1657     // at each load and store to the alloca and erase the dbg.declare.
1658     // The dbg.values allow tracking a variable even if it is not
1659     // stored on the stack, while the dbg.declare can only describe
1660     // the stack slot (and at a lexical-scope granularity). Later
1661     // passes will attempt to elide the stack slot.
1662     if (!AI || isArray(AI) || isStructure(AI))
1663       continue;
1664 
1665     // A volatile load/store means that the alloca can't be elided anyway.
1666     if (llvm::any_of(AI->users(), [](User *U) -> bool {
1667           if (LoadInst *LI = dyn_cast<LoadInst>(U))
1668             return LI->isVolatile();
1669           if (StoreInst *SI = dyn_cast<StoreInst>(U))
1670             return SI->isVolatile();
1671           return false;
1672         }))
1673       continue;
1674 
1675     SmallVector<const Value *, 8> WorkList;
1676     WorkList.push_back(AI);
1677     while (!WorkList.empty()) {
1678       const Value *V = WorkList.pop_back_val();
1679       for (const auto &AIUse : V->uses()) {
1680         User *U = AIUse.getUser();
1681         if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1682           if (AIUse.getOperandNo() == 1)
1683             ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
1684         } else if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1685           ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
1686         } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
1687           // This is a call by-value or some other instruction that takes a
1688           // pointer to the variable. Insert a *value* intrinsic that describes
1689           // the variable by dereferencing the alloca.
1690           if (!CI->isLifetimeStartOrEnd()) {
1691             DebugLoc NewLoc = getDebugValueLoc(DDI);
1692             auto *DerefExpr =
1693                 DIExpression::append(DDI->getExpression(), dwarf::DW_OP_deref);
1694             DIB.insertDbgValueIntrinsic(AI, DDI->getVariable(), DerefExpr,
1695                                         NewLoc, CI);
1696           }
1697         } else if (BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
1698           if (BI->getType()->isPointerTy())
1699             WorkList.push_back(BI);
1700         }
1701       }
1702     }
1703     DDI->eraseFromParent();
1704     Changed = true;
1705   }
1706 
1707   if (Changed)
1708   for (BasicBlock &BB : F)
1709     RemoveRedundantDbgInstrs(&BB);
1710 
1711   return Changed;
1712 }
1713 
1714 /// Propagate dbg.value intrinsics through the newly inserted PHIs.
1715 void llvm::insertDebugValuesForPHIs(BasicBlock *BB,
1716                                     SmallVectorImpl<PHINode *> &InsertedPHIs) {
1717   assert(BB && "No BasicBlock to clone dbg.value(s) from.");
1718   if (InsertedPHIs.size() == 0)
1719     return;
1720 
1721   // Map existing PHI nodes to their dbg.values.
1722   ValueToValueMapTy DbgValueMap;
1723   for (auto &I : *BB) {
1724     if (auto DbgII = dyn_cast<DbgVariableIntrinsic>(&I)) {
1725       for (Value *V : DbgII->location_ops())
1726         if (auto *Loc = dyn_cast_or_null<PHINode>(V))
1727           DbgValueMap.insert({Loc, DbgII});
1728     }
1729   }
1730   if (DbgValueMap.size() == 0)
1731     return;
1732 
1733   // Map a pair of the destination BB and old dbg.value to the new dbg.value,
1734   // so that if a dbg.value is being rewritten to use more than one of the
1735   // inserted PHIs in the same destination BB, we can update the same dbg.value
1736   // with all the new PHIs instead of creating one copy for each.
1737   MapVector<std::pair<BasicBlock *, DbgVariableIntrinsic *>,
1738             DbgVariableIntrinsic *>
1739       NewDbgValueMap;
1740   // Then iterate through the new PHIs and look to see if they use one of the
1741   // previously mapped PHIs. If so, create a new dbg.value intrinsic that will
1742   // propagate the info through the new PHI. If we use more than one new PHI in
1743   // a single destination BB with the same old dbg.value, merge the updates so
1744   // that we get a single new dbg.value with all the new PHIs.
1745   for (auto *PHI : InsertedPHIs) {
1746     BasicBlock *Parent = PHI->getParent();
1747     // Avoid inserting an intrinsic into an EH block.
1748     if (Parent->getFirstNonPHI()->isEHPad())
1749       continue;
1750     for (auto *VI : PHI->operand_values()) {
1751       auto V = DbgValueMap.find(VI);
1752       if (V != DbgValueMap.end()) {
1753         auto *DbgII = cast<DbgVariableIntrinsic>(V->second);
1754         auto NewDI = NewDbgValueMap.find({Parent, DbgII});
1755         if (NewDI == NewDbgValueMap.end()) {
1756           auto *NewDbgII = cast<DbgVariableIntrinsic>(DbgII->clone());
1757           NewDI = NewDbgValueMap.insert({{Parent, DbgII}, NewDbgII}).first;
1758         }
1759         DbgVariableIntrinsic *NewDbgII = NewDI->second;
1760         // If PHI contains VI as an operand more than once, we may
1761         // replaced it in NewDbgII; confirm that it is present.
1762         if (is_contained(NewDbgII->location_ops(), VI))
1763           NewDbgII->replaceVariableLocationOp(VI, PHI);
1764       }
1765     }
1766   }
1767   // Insert thew new dbg.values into their destination blocks.
1768   for (auto DI : NewDbgValueMap) {
1769     BasicBlock *Parent = DI.first.first;
1770     auto *NewDbgII = DI.second;
1771     auto InsertionPt = Parent->getFirstInsertionPt();
1772     assert(InsertionPt != Parent->end() && "Ill-formed basic block");
1773     NewDbgII->insertBefore(&*InsertionPt);
1774   }
1775 }
1776 
1777 bool llvm::replaceDbgDeclare(Value *Address, Value *NewAddress,
1778                              DIBuilder &Builder, uint8_t DIExprFlags,
1779                              int Offset) {
1780   auto DbgDeclares = FindDbgDeclareUses(Address);
1781   for (DbgVariableIntrinsic *DII : DbgDeclares) {
1782     const DebugLoc &Loc = DII->getDebugLoc();
1783     auto *DIVar = DII->getVariable();
1784     auto *DIExpr = DII->getExpression();
1785     assert(DIVar && "Missing variable");
1786     DIExpr = DIExpression::prepend(DIExpr, DIExprFlags, Offset);
1787     // Insert llvm.dbg.declare immediately before DII, and remove old
1788     // llvm.dbg.declare.
1789     Builder.insertDeclare(NewAddress, DIVar, DIExpr, Loc, DII);
1790     DII->eraseFromParent();
1791   }
1792   return !DbgDeclares.empty();
1793 }
1794 
1795 static void replaceOneDbgValueForAlloca(DbgValueInst *DVI, Value *NewAddress,
1796                                         DIBuilder &Builder, int Offset) {
1797   const DebugLoc &Loc = DVI->getDebugLoc();
1798   auto *DIVar = DVI->getVariable();
1799   auto *DIExpr = DVI->getExpression();
1800   assert(DIVar && "Missing variable");
1801 
1802   // This is an alloca-based llvm.dbg.value. The first thing it should do with
1803   // the alloca pointer is dereference it. Otherwise we don't know how to handle
1804   // it and give up.
1805   if (!DIExpr || DIExpr->getNumElements() < 1 ||
1806       DIExpr->getElement(0) != dwarf::DW_OP_deref)
1807     return;
1808 
1809   // Insert the offset before the first deref.
1810   // We could just change the offset argument of dbg.value, but it's unsigned...
1811   if (Offset)
1812     DIExpr = DIExpression::prepend(DIExpr, 0, Offset);
1813 
1814   Builder.insertDbgValueIntrinsic(NewAddress, DIVar, DIExpr, Loc, DVI);
1815   DVI->eraseFromParent();
1816 }
1817 
1818 void llvm::replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
1819                                     DIBuilder &Builder, int Offset) {
1820   if (auto *L = LocalAsMetadata::getIfExists(AI))
1821     if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
1822       for (Use &U : llvm::make_early_inc_range(MDV->uses()))
1823         if (auto *DVI = dyn_cast<DbgValueInst>(U.getUser()))
1824           replaceOneDbgValueForAlloca(DVI, NewAllocaAddress, Builder, Offset);
1825 }
1826 
1827 /// Where possible to salvage debug information for \p I do so.
1828 /// If not possible mark undef.
1829 void llvm::salvageDebugInfo(Instruction &I) {
1830   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
1831   findDbgUsers(DbgUsers, &I);
1832   salvageDebugInfoForDbgValues(I, DbgUsers);
1833 }
1834 
1835 /// Salvage the address component of \p DAI.
1836 static void salvageDbgAssignAddress(DbgAssignIntrinsic *DAI) {
1837   Instruction *I = dyn_cast<Instruction>(DAI->getAddress());
1838   // Only instructions can be salvaged at the moment.
1839   if (!I)
1840     return;
1841 
1842   assert(!DAI->getAddressExpression()->getFragmentInfo().has_value() &&
1843          "address-expression shouldn't have fragment info");
1844 
1845   // The address component of a dbg.assign cannot be variadic.
1846   uint64_t CurrentLocOps = 0;
1847   SmallVector<Value *, 4> AdditionalValues;
1848   SmallVector<uint64_t, 16> Ops;
1849   Value *NewV = salvageDebugInfoImpl(*I, CurrentLocOps, Ops, AdditionalValues);
1850 
1851   // Check if the salvage failed.
1852   if (!NewV)
1853     return;
1854 
1855   DIExpression *SalvagedExpr = DIExpression::appendOpsToArg(
1856       DAI->getAddressExpression(), Ops, 0, /*StackValue=*/false);
1857   assert(!SalvagedExpr->getFragmentInfo().has_value() &&
1858          "address-expression shouldn't have fragment info");
1859 
1860   // Salvage succeeds if no additional values are required.
1861   if (AdditionalValues.empty()) {
1862     DAI->setAddress(NewV);
1863     DAI->setAddressExpression(SalvagedExpr);
1864   } else {
1865     DAI->setKillAddress();
1866   }
1867 }
1868 
1869 void llvm::salvageDebugInfoForDbgValues(
1870     Instruction &I, ArrayRef<DbgVariableIntrinsic *> DbgUsers) {
1871   // These are arbitrary chosen limits on the maximum number of values and the
1872   // maximum size of a debug expression we can salvage up to, used for
1873   // performance reasons.
1874   const unsigned MaxDebugArgs = 16;
1875   const unsigned MaxExpressionSize = 128;
1876   bool Salvaged = false;
1877 
1878   for (auto *DII : DbgUsers) {
1879     if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(DII)) {
1880       if (DAI->getAddress() == &I) {
1881         salvageDbgAssignAddress(DAI);
1882         Salvaged = true;
1883       }
1884       if (DAI->getValue() != &I)
1885         continue;
1886     }
1887 
1888     // Do not add DW_OP_stack_value for DbgDeclare, because they are implicitly
1889     // pointing out the value as a DWARF memory location description.
1890     bool StackValue = isa<DbgValueInst>(DII);
1891     auto DIILocation = DII->location_ops();
1892     assert(
1893         is_contained(DIILocation, &I) &&
1894         "DbgVariableIntrinsic must use salvaged instruction as its location");
1895     SmallVector<Value *, 4> AdditionalValues;
1896     // `I` may appear more than once in DII's location ops, and each use of `I`
1897     // must be updated in the DIExpression and potentially have additional
1898     // values added; thus we call salvageDebugInfoImpl for each `I` instance in
1899     // DIILocation.
1900     Value *Op0 = nullptr;
1901     DIExpression *SalvagedExpr = DII->getExpression();
1902     auto LocItr = find(DIILocation, &I);
1903     while (SalvagedExpr && LocItr != DIILocation.end()) {
1904       SmallVector<uint64_t, 16> Ops;
1905       unsigned LocNo = std::distance(DIILocation.begin(), LocItr);
1906       uint64_t CurrentLocOps = SalvagedExpr->getNumLocationOperands();
1907       Op0 = salvageDebugInfoImpl(I, CurrentLocOps, Ops, AdditionalValues);
1908       if (!Op0)
1909         break;
1910       SalvagedExpr =
1911           DIExpression::appendOpsToArg(SalvagedExpr, Ops, LocNo, StackValue);
1912       LocItr = std::find(++LocItr, DIILocation.end(), &I);
1913     }
1914     // salvageDebugInfoImpl should fail on examining the first element of
1915     // DbgUsers, or none of them.
1916     if (!Op0)
1917       break;
1918 
1919     DII->replaceVariableLocationOp(&I, Op0);
1920     bool IsValidSalvageExpr = SalvagedExpr->getNumElements() <= MaxExpressionSize;
1921     if (AdditionalValues.empty() && IsValidSalvageExpr) {
1922       DII->setExpression(SalvagedExpr);
1923     } else if (isa<DbgValueInst>(DII) && IsValidSalvageExpr &&
1924                DII->getNumVariableLocationOps() + AdditionalValues.size() <=
1925                    MaxDebugArgs) {
1926       DII->addVariableLocationOps(AdditionalValues, SalvagedExpr);
1927     } else {
1928       // Do not salvage using DIArgList for dbg.declare, as it is not currently
1929       // supported in those instructions. Also do not salvage if the resulting
1930       // DIArgList would contain an unreasonably large number of values.
1931       DII->setKillLocation();
1932     }
1933     LLVM_DEBUG(dbgs() << "SALVAGE: " << *DII << '\n');
1934     Salvaged = true;
1935   }
1936 
1937   if (Salvaged)
1938     return;
1939 
1940   for (auto *DII : DbgUsers)
1941     DII->setKillLocation();
1942 }
1943 
1944 Value *getSalvageOpsForGEP(GetElementPtrInst *GEP, const DataLayout &DL,
1945                            uint64_t CurrentLocOps,
1946                            SmallVectorImpl<uint64_t> &Opcodes,
1947                            SmallVectorImpl<Value *> &AdditionalValues) {
1948   unsigned BitWidth = DL.getIndexSizeInBits(GEP->getPointerAddressSpace());
1949   // Rewrite a GEP into a DIExpression.
1950   MapVector<Value *, APInt> VariableOffsets;
1951   APInt ConstantOffset(BitWidth, 0);
1952   if (!GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset))
1953     return nullptr;
1954   if (!VariableOffsets.empty() && !CurrentLocOps) {
1955     Opcodes.insert(Opcodes.begin(), {dwarf::DW_OP_LLVM_arg, 0});
1956     CurrentLocOps = 1;
1957   }
1958   for (const auto &Offset : VariableOffsets) {
1959     AdditionalValues.push_back(Offset.first);
1960     assert(Offset.second.isStrictlyPositive() &&
1961            "Expected strictly positive multiplier for offset.");
1962     Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps++, dwarf::DW_OP_constu,
1963                     Offset.second.getZExtValue(), dwarf::DW_OP_mul,
1964                     dwarf::DW_OP_plus});
1965   }
1966   DIExpression::appendOffset(Opcodes, ConstantOffset.getSExtValue());
1967   return GEP->getOperand(0);
1968 }
1969 
1970 uint64_t getDwarfOpForBinOp(Instruction::BinaryOps Opcode) {
1971   switch (Opcode) {
1972   case Instruction::Add:
1973     return dwarf::DW_OP_plus;
1974   case Instruction::Sub:
1975     return dwarf::DW_OP_minus;
1976   case Instruction::Mul:
1977     return dwarf::DW_OP_mul;
1978   case Instruction::SDiv:
1979     return dwarf::DW_OP_div;
1980   case Instruction::SRem:
1981     return dwarf::DW_OP_mod;
1982   case Instruction::Or:
1983     return dwarf::DW_OP_or;
1984   case Instruction::And:
1985     return dwarf::DW_OP_and;
1986   case Instruction::Xor:
1987     return dwarf::DW_OP_xor;
1988   case Instruction::Shl:
1989     return dwarf::DW_OP_shl;
1990   case Instruction::LShr:
1991     return dwarf::DW_OP_shr;
1992   case Instruction::AShr:
1993     return dwarf::DW_OP_shra;
1994   default:
1995     // TODO: Salvage from each kind of binop we know about.
1996     return 0;
1997   }
1998 }
1999 
2000 static void handleSSAValueOperands(uint64_t CurrentLocOps,
2001                                    SmallVectorImpl<uint64_t> &Opcodes,
2002                                    SmallVectorImpl<Value *> &AdditionalValues,
2003                                    Instruction *I) {
2004   if (!CurrentLocOps) {
2005     Opcodes.append({dwarf::DW_OP_LLVM_arg, 0});
2006     CurrentLocOps = 1;
2007   }
2008   Opcodes.append({dwarf::DW_OP_LLVM_arg, CurrentLocOps});
2009   AdditionalValues.push_back(I->getOperand(1));
2010 }
2011 
2012 Value *getSalvageOpsForBinOp(BinaryOperator *BI, uint64_t CurrentLocOps,
2013                              SmallVectorImpl<uint64_t> &Opcodes,
2014                              SmallVectorImpl<Value *> &AdditionalValues) {
2015   // Handle binary operations with constant integer operands as a special case.
2016   auto *ConstInt = dyn_cast<ConstantInt>(BI->getOperand(1));
2017   // Values wider than 64 bits cannot be represented within a DIExpression.
2018   if (ConstInt && ConstInt->getBitWidth() > 64)
2019     return nullptr;
2020 
2021   Instruction::BinaryOps BinOpcode = BI->getOpcode();
2022   // Push any Constant Int operand onto the expression stack.
2023   if (ConstInt) {
2024     uint64_t Val = ConstInt->getSExtValue();
2025     // Add or Sub Instructions with a constant operand can potentially be
2026     // simplified.
2027     if (BinOpcode == Instruction::Add || BinOpcode == Instruction::Sub) {
2028       uint64_t Offset = BinOpcode == Instruction::Add ? Val : -int64_t(Val);
2029       DIExpression::appendOffset(Opcodes, Offset);
2030       return BI->getOperand(0);
2031     }
2032     Opcodes.append({dwarf::DW_OP_constu, Val});
2033   } else {
2034     handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, BI);
2035   }
2036 
2037   // Add salvaged binary operator to expression stack, if it has a valid
2038   // representation in a DIExpression.
2039   uint64_t DwarfBinOp = getDwarfOpForBinOp(BinOpcode);
2040   if (!DwarfBinOp)
2041     return nullptr;
2042   Opcodes.push_back(DwarfBinOp);
2043   return BI->getOperand(0);
2044 }
2045 
2046 uint64_t getDwarfOpForIcmpPred(CmpInst::Predicate Pred) {
2047   // The signedness of the operation is implicit in the typed stack, signed and
2048   // unsigned instructions map to the same DWARF opcode.
2049   switch (Pred) {
2050   case CmpInst::ICMP_EQ:
2051     return dwarf::DW_OP_eq;
2052   case CmpInst::ICMP_NE:
2053     return dwarf::DW_OP_ne;
2054   case CmpInst::ICMP_UGT:
2055   case CmpInst::ICMP_SGT:
2056     return dwarf::DW_OP_gt;
2057   case CmpInst::ICMP_UGE:
2058   case CmpInst::ICMP_SGE:
2059     return dwarf::DW_OP_ge;
2060   case CmpInst::ICMP_ULT:
2061   case CmpInst::ICMP_SLT:
2062     return dwarf::DW_OP_lt;
2063   case CmpInst::ICMP_ULE:
2064   case CmpInst::ICMP_SLE:
2065     return dwarf::DW_OP_le;
2066   default:
2067     return 0;
2068   }
2069 }
2070 
2071 Value *getSalvageOpsForIcmpOp(ICmpInst *Icmp, uint64_t CurrentLocOps,
2072                               SmallVectorImpl<uint64_t> &Opcodes,
2073                               SmallVectorImpl<Value *> &AdditionalValues) {
2074   // Handle icmp operations with constant integer operands as a special case.
2075   auto *ConstInt = dyn_cast<ConstantInt>(Icmp->getOperand(1));
2076   // Values wider than 64 bits cannot be represented within a DIExpression.
2077   if (ConstInt && ConstInt->getBitWidth() > 64)
2078     return nullptr;
2079   // Push any Constant Int operand onto the expression stack.
2080   if (ConstInt) {
2081     if (Icmp->isSigned())
2082       Opcodes.push_back(dwarf::DW_OP_consts);
2083     else
2084       Opcodes.push_back(dwarf::DW_OP_constu);
2085     uint64_t Val = ConstInt->getSExtValue();
2086     Opcodes.push_back(Val);
2087   } else {
2088     handleSSAValueOperands(CurrentLocOps, Opcodes, AdditionalValues, Icmp);
2089   }
2090 
2091   // Add salvaged binary operator to expression stack, if it has a valid
2092   // representation in a DIExpression.
2093   uint64_t DwarfIcmpOp = getDwarfOpForIcmpPred(Icmp->getPredicate());
2094   if (!DwarfIcmpOp)
2095     return nullptr;
2096   Opcodes.push_back(DwarfIcmpOp);
2097   return Icmp->getOperand(0);
2098 }
2099 
2100 Value *llvm::salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
2101                                   SmallVectorImpl<uint64_t> &Ops,
2102                                   SmallVectorImpl<Value *> &AdditionalValues) {
2103   auto &M = *I.getModule();
2104   auto &DL = M.getDataLayout();
2105 
2106   if (auto *CI = dyn_cast<CastInst>(&I)) {
2107     Value *FromValue = CI->getOperand(0);
2108     // No-op casts are irrelevant for debug info.
2109     if (CI->isNoopCast(DL)) {
2110       return FromValue;
2111     }
2112 
2113     Type *Type = CI->getType();
2114     if (Type->isPointerTy())
2115       Type = DL.getIntPtrType(Type);
2116     // Casts other than Trunc, SExt, or ZExt to scalar types cannot be salvaged.
2117     if (Type->isVectorTy() ||
2118         !(isa<TruncInst>(&I) || isa<SExtInst>(&I) || isa<ZExtInst>(&I) ||
2119           isa<IntToPtrInst>(&I) || isa<PtrToIntInst>(&I)))
2120       return nullptr;
2121 
2122     llvm::Type *FromType = FromValue->getType();
2123     if (FromType->isPointerTy())
2124       FromType = DL.getIntPtrType(FromType);
2125 
2126     unsigned FromTypeBitSize = FromType->getScalarSizeInBits();
2127     unsigned ToTypeBitSize = Type->getScalarSizeInBits();
2128 
2129     auto ExtOps = DIExpression::getExtOps(FromTypeBitSize, ToTypeBitSize,
2130                                           isa<SExtInst>(&I));
2131     Ops.append(ExtOps.begin(), ExtOps.end());
2132     return FromValue;
2133   }
2134 
2135   if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
2136     return getSalvageOpsForGEP(GEP, DL, CurrentLocOps, Ops, AdditionalValues);
2137   if (auto *BI = dyn_cast<BinaryOperator>(&I))
2138     return getSalvageOpsForBinOp(BI, CurrentLocOps, Ops, AdditionalValues);
2139   if (auto *IC = dyn_cast<ICmpInst>(&I))
2140     return getSalvageOpsForIcmpOp(IC, CurrentLocOps, Ops, AdditionalValues);
2141 
2142   // *Not* to do: we should not attempt to salvage load instructions,
2143   // because the validity and lifetime of a dbg.value containing
2144   // DW_OP_deref becomes difficult to analyze. See PR40628 for examples.
2145   return nullptr;
2146 }
2147 
2148 /// A replacement for a dbg.value expression.
2149 using DbgValReplacement = std::optional<DIExpression *>;
2150 
2151 /// Point debug users of \p From to \p To using exprs given by \p RewriteExpr,
2152 /// possibly moving/undefing users to prevent use-before-def. Returns true if
2153 /// changes are made.
2154 static bool rewriteDebugUsers(
2155     Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT,
2156     function_ref<DbgValReplacement(DbgVariableIntrinsic &DII)> RewriteExpr) {
2157   // Find debug users of From.
2158   SmallVector<DbgVariableIntrinsic *, 1> Users;
2159   findDbgUsers(Users, &From);
2160   if (Users.empty())
2161     return false;
2162 
2163   // Prevent use-before-def of To.
2164   bool Changed = false;
2165   SmallPtrSet<DbgVariableIntrinsic *, 1> UndefOrSalvage;
2166   if (isa<Instruction>(&To)) {
2167     bool DomPointAfterFrom = From.getNextNonDebugInstruction() == &DomPoint;
2168 
2169     for (auto *DII : Users) {
2170       // It's common to see a debug user between From and DomPoint. Move it
2171       // after DomPoint to preserve the variable update without any reordering.
2172       if (DomPointAfterFrom && DII->getNextNonDebugInstruction() == &DomPoint) {
2173         LLVM_DEBUG(dbgs() << "MOVE:  " << *DII << '\n');
2174         DII->moveAfter(&DomPoint);
2175         Changed = true;
2176 
2177       // Users which otherwise aren't dominated by the replacement value must
2178       // be salvaged or deleted.
2179       } else if (!DT.dominates(&DomPoint, DII)) {
2180         UndefOrSalvage.insert(DII);
2181       }
2182     }
2183   }
2184 
2185   // Update debug users without use-before-def risk.
2186   for (auto *DII : Users) {
2187     if (UndefOrSalvage.count(DII))
2188       continue;
2189 
2190     DbgValReplacement DVR = RewriteExpr(*DII);
2191     if (!DVR)
2192       continue;
2193 
2194     DII->replaceVariableLocationOp(&From, &To);
2195     DII->setExpression(*DVR);
2196     LLVM_DEBUG(dbgs() << "REWRITE:  " << *DII << '\n');
2197     Changed = true;
2198   }
2199 
2200   if (!UndefOrSalvage.empty()) {
2201     // Try to salvage the remaining debug users.
2202     salvageDebugInfo(From);
2203     Changed = true;
2204   }
2205 
2206   return Changed;
2207 }
2208 
2209 /// Check if a bitcast between a value of type \p FromTy to type \p ToTy would
2210 /// losslessly preserve the bits and semantics of the value. This predicate is
2211 /// symmetric, i.e swapping \p FromTy and \p ToTy should give the same result.
2212 ///
2213 /// Note that Type::canLosslesslyBitCastTo is not suitable here because it
2214 /// allows semantically unequivalent bitcasts, such as <2 x i64> -> <4 x i32>,
2215 /// and also does not allow lossless pointer <-> integer conversions.
2216 static bool isBitCastSemanticsPreserving(const DataLayout &DL, Type *FromTy,
2217                                          Type *ToTy) {
2218   // Trivially compatible types.
2219   if (FromTy == ToTy)
2220     return true;
2221 
2222   // Handle compatible pointer <-> integer conversions.
2223   if (FromTy->isIntOrPtrTy() && ToTy->isIntOrPtrTy()) {
2224     bool SameSize = DL.getTypeSizeInBits(FromTy) == DL.getTypeSizeInBits(ToTy);
2225     bool LosslessConversion = !DL.isNonIntegralPointerType(FromTy) &&
2226                               !DL.isNonIntegralPointerType(ToTy);
2227     return SameSize && LosslessConversion;
2228   }
2229 
2230   // TODO: This is not exhaustive.
2231   return false;
2232 }
2233 
2234 bool llvm::replaceAllDbgUsesWith(Instruction &From, Value &To,
2235                                  Instruction &DomPoint, DominatorTree &DT) {
2236   // Exit early if From has no debug users.
2237   if (!From.isUsedByMetadata())
2238     return false;
2239 
2240   assert(&From != &To && "Can't replace something with itself");
2241 
2242   Type *FromTy = From.getType();
2243   Type *ToTy = To.getType();
2244 
2245   auto Identity = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2246     return DII.getExpression();
2247   };
2248 
2249   // Handle no-op conversions.
2250   Module &M = *From.getModule();
2251   const DataLayout &DL = M.getDataLayout();
2252   if (isBitCastSemanticsPreserving(DL, FromTy, ToTy))
2253     return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2254 
2255   // Handle integer-to-integer widening and narrowing.
2256   // FIXME: Use DW_OP_convert when it's available everywhere.
2257   if (FromTy->isIntegerTy() && ToTy->isIntegerTy()) {
2258     uint64_t FromBits = FromTy->getPrimitiveSizeInBits();
2259     uint64_t ToBits = ToTy->getPrimitiveSizeInBits();
2260     assert(FromBits != ToBits && "Unexpected no-op conversion");
2261 
2262     // When the width of the result grows, assume that a debugger will only
2263     // access the low `FromBits` bits when inspecting the source variable.
2264     if (FromBits < ToBits)
2265       return rewriteDebugUsers(From, To, DomPoint, DT, Identity);
2266 
2267     // The width of the result has shrunk. Use sign/zero extension to describe
2268     // the source variable's high bits.
2269     auto SignOrZeroExt = [&](DbgVariableIntrinsic &DII) -> DbgValReplacement {
2270       DILocalVariable *Var = DII.getVariable();
2271 
2272       // Without knowing signedness, sign/zero extension isn't possible.
2273       auto Signedness = Var->getSignedness();
2274       if (!Signedness)
2275         return std::nullopt;
2276 
2277       bool Signed = *Signedness == DIBasicType::Signedness::Signed;
2278       return DIExpression::appendExt(DII.getExpression(), ToBits, FromBits,
2279                                      Signed);
2280     };
2281     return rewriteDebugUsers(From, To, DomPoint, DT, SignOrZeroExt);
2282   }
2283 
2284   // TODO: Floating-point conversions, vectors.
2285   return false;
2286 }
2287 
2288 std::pair<unsigned, unsigned>
2289 llvm::removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB) {
2290   unsigned NumDeadInst = 0;
2291   unsigned NumDeadDbgInst = 0;
2292   // Delete the instructions backwards, as it has a reduced likelihood of
2293   // having to update as many def-use and use-def chains.
2294   Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
2295   while (EndInst != &BB->front()) {
2296     // Delete the next to last instruction.
2297     Instruction *Inst = &*--EndInst->getIterator();
2298     if (!Inst->use_empty() && !Inst->getType()->isTokenTy())
2299       Inst->replaceAllUsesWith(PoisonValue::get(Inst->getType()));
2300     if (Inst->isEHPad() || Inst->getType()->isTokenTy()) {
2301       EndInst = Inst;
2302       continue;
2303     }
2304     if (isa<DbgInfoIntrinsic>(Inst))
2305       ++NumDeadDbgInst;
2306     else
2307       ++NumDeadInst;
2308     Inst->eraseFromParent();
2309   }
2310   return {NumDeadInst, NumDeadDbgInst};
2311 }
2312 
2313 unsigned llvm::changeToUnreachable(Instruction *I, bool PreserveLCSSA,
2314                                    DomTreeUpdater *DTU,
2315                                    MemorySSAUpdater *MSSAU) {
2316   BasicBlock *BB = I->getParent();
2317 
2318   if (MSSAU)
2319     MSSAU->changeToUnreachable(I);
2320 
2321   SmallSet<BasicBlock *, 8> UniqueSuccessors;
2322 
2323   // Loop over all of the successors, removing BB's entry from any PHI
2324   // nodes.
2325   for (BasicBlock *Successor : successors(BB)) {
2326     Successor->removePredecessor(BB, PreserveLCSSA);
2327     if (DTU)
2328       UniqueSuccessors.insert(Successor);
2329   }
2330   auto *UI = new UnreachableInst(I->getContext(), I);
2331   UI->setDebugLoc(I->getDebugLoc());
2332 
2333   // All instructions after this are dead.
2334   unsigned NumInstrsRemoved = 0;
2335   BasicBlock::iterator BBI = I->getIterator(), BBE = BB->end();
2336   while (BBI != BBE) {
2337     if (!BBI->use_empty())
2338       BBI->replaceAllUsesWith(PoisonValue::get(BBI->getType()));
2339     BBI++->eraseFromParent();
2340     ++NumInstrsRemoved;
2341   }
2342   if (DTU) {
2343     SmallVector<DominatorTree::UpdateType, 8> Updates;
2344     Updates.reserve(UniqueSuccessors.size());
2345     for (BasicBlock *UniqueSuccessor : UniqueSuccessors)
2346       Updates.push_back({DominatorTree::Delete, BB, UniqueSuccessor});
2347     DTU->applyUpdates(Updates);
2348   }
2349   return NumInstrsRemoved;
2350 }
2351 
2352 CallInst *llvm::createCallMatchingInvoke(InvokeInst *II) {
2353   SmallVector<Value *, 8> Args(II->args());
2354   SmallVector<OperandBundleDef, 1> OpBundles;
2355   II->getOperandBundlesAsDefs(OpBundles);
2356   CallInst *NewCall = CallInst::Create(II->getFunctionType(),
2357                                        II->getCalledOperand(), Args, OpBundles);
2358   NewCall->setCallingConv(II->getCallingConv());
2359   NewCall->setAttributes(II->getAttributes());
2360   NewCall->setDebugLoc(II->getDebugLoc());
2361   NewCall->copyMetadata(*II);
2362 
2363   // If the invoke had profile metadata, try converting them for CallInst.
2364   uint64_t TotalWeight;
2365   if (NewCall->extractProfTotalWeight(TotalWeight)) {
2366     // Set the total weight if it fits into i32, otherwise reset.
2367     MDBuilder MDB(NewCall->getContext());
2368     auto NewWeights = uint32_t(TotalWeight) != TotalWeight
2369                           ? nullptr
2370                           : MDB.createBranchWeights({uint32_t(TotalWeight)});
2371     NewCall->setMetadata(LLVMContext::MD_prof, NewWeights);
2372   }
2373 
2374   return NewCall;
2375 }
2376 
2377 // changeToCall - Convert the specified invoke into a normal call.
2378 CallInst *llvm::changeToCall(InvokeInst *II, DomTreeUpdater *DTU) {
2379   CallInst *NewCall = createCallMatchingInvoke(II);
2380   NewCall->takeName(II);
2381   NewCall->insertBefore(II);
2382   II->replaceAllUsesWith(NewCall);
2383 
2384   // Follow the call by a branch to the normal destination.
2385   BasicBlock *NormalDestBB = II->getNormalDest();
2386   BranchInst::Create(NormalDestBB, II);
2387 
2388   // Update PHI nodes in the unwind destination
2389   BasicBlock *BB = II->getParent();
2390   BasicBlock *UnwindDestBB = II->getUnwindDest();
2391   UnwindDestBB->removePredecessor(BB);
2392   II->eraseFromParent();
2393   if (DTU)
2394     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2395   return NewCall;
2396 }
2397 
2398 BasicBlock *llvm::changeToInvokeAndSplitBasicBlock(CallInst *CI,
2399                                                    BasicBlock *UnwindEdge,
2400                                                    DomTreeUpdater *DTU) {
2401   BasicBlock *BB = CI->getParent();
2402 
2403   // Convert this function call into an invoke instruction.  First, split the
2404   // basic block.
2405   BasicBlock *Split = SplitBlock(BB, CI, DTU, /*LI=*/nullptr, /*MSSAU*/ nullptr,
2406                                  CI->getName() + ".noexc");
2407 
2408   // Delete the unconditional branch inserted by SplitBlock
2409   BB->back().eraseFromParent();
2410 
2411   // Create the new invoke instruction.
2412   SmallVector<Value *, 8> InvokeArgs(CI->args());
2413   SmallVector<OperandBundleDef, 1> OpBundles;
2414 
2415   CI->getOperandBundlesAsDefs(OpBundles);
2416 
2417   // Note: we're round tripping operand bundles through memory here, and that
2418   // can potentially be avoided with a cleverer API design that we do not have
2419   // as of this time.
2420 
2421   InvokeInst *II =
2422       InvokeInst::Create(CI->getFunctionType(), CI->getCalledOperand(), Split,
2423                          UnwindEdge, InvokeArgs, OpBundles, CI->getName(), BB);
2424   II->setDebugLoc(CI->getDebugLoc());
2425   II->setCallingConv(CI->getCallingConv());
2426   II->setAttributes(CI->getAttributes());
2427   II->setMetadata(LLVMContext::MD_prof, CI->getMetadata(LLVMContext::MD_prof));
2428 
2429   if (DTU)
2430     DTU->applyUpdates({{DominatorTree::Insert, BB, UnwindEdge}});
2431 
2432   // Make sure that anything using the call now uses the invoke!  This also
2433   // updates the CallGraph if present, because it uses a WeakTrackingVH.
2434   CI->replaceAllUsesWith(II);
2435 
2436   // Delete the original call
2437   Split->front().eraseFromParent();
2438   return Split;
2439 }
2440 
2441 static bool markAliveBlocks(Function &F,
2442                             SmallPtrSetImpl<BasicBlock *> &Reachable,
2443                             DomTreeUpdater *DTU = nullptr) {
2444   SmallVector<BasicBlock*, 128> Worklist;
2445   BasicBlock *BB = &F.front();
2446   Worklist.push_back(BB);
2447   Reachable.insert(BB);
2448   bool Changed = false;
2449   do {
2450     BB = Worklist.pop_back_val();
2451 
2452     // Do a quick scan of the basic block, turning any obviously unreachable
2453     // instructions into LLVM unreachable insts.  The instruction combining pass
2454     // canonicalizes unreachable insts into stores to null or undef.
2455     for (Instruction &I : *BB) {
2456       if (auto *CI = dyn_cast<CallInst>(&I)) {
2457         Value *Callee = CI->getCalledOperand();
2458         // Handle intrinsic calls.
2459         if (Function *F = dyn_cast<Function>(Callee)) {
2460           auto IntrinsicID = F->getIntrinsicID();
2461           // Assumptions that are known to be false are equivalent to
2462           // unreachable. Also, if the condition is undefined, then we make the
2463           // choice most beneficial to the optimizer, and choose that to also be
2464           // unreachable.
2465           if (IntrinsicID == Intrinsic::assume) {
2466             if (match(CI->getArgOperand(0), m_CombineOr(m_Zero(), m_Undef()))) {
2467               // Don't insert a call to llvm.trap right before the unreachable.
2468               changeToUnreachable(CI, false, DTU);
2469               Changed = true;
2470               break;
2471             }
2472           } else if (IntrinsicID == Intrinsic::experimental_guard) {
2473             // A call to the guard intrinsic bails out of the current
2474             // compilation unit if the predicate passed to it is false. If the
2475             // predicate is a constant false, then we know the guard will bail
2476             // out of the current compile unconditionally, so all code following
2477             // it is dead.
2478             //
2479             // Note: unlike in llvm.assume, it is not "obviously profitable" for
2480             // guards to treat `undef` as `false` since a guard on `undef` can
2481             // still be useful for widening.
2482             if (match(CI->getArgOperand(0), m_Zero()))
2483               if (!isa<UnreachableInst>(CI->getNextNode())) {
2484                 changeToUnreachable(CI->getNextNode(), false, DTU);
2485                 Changed = true;
2486                 break;
2487               }
2488           }
2489         } else if ((isa<ConstantPointerNull>(Callee) &&
2490                     !NullPointerIsDefined(CI->getFunction(),
2491                                           cast<PointerType>(Callee->getType())
2492                                               ->getAddressSpace())) ||
2493                    isa<UndefValue>(Callee)) {
2494           changeToUnreachable(CI, false, DTU);
2495           Changed = true;
2496           break;
2497         }
2498         if (CI->doesNotReturn() && !CI->isMustTailCall()) {
2499           // If we found a call to a no-return function, insert an unreachable
2500           // instruction after it.  Make sure there isn't *already* one there
2501           // though.
2502           if (!isa<UnreachableInst>(CI->getNextNode())) {
2503             // Don't insert a call to llvm.trap right before the unreachable.
2504             changeToUnreachable(CI->getNextNode(), false, DTU);
2505             Changed = true;
2506           }
2507           break;
2508         }
2509       } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
2510         // Store to undef and store to null are undefined and used to signal
2511         // that they should be changed to unreachable by passes that can't
2512         // modify the CFG.
2513 
2514         // Don't touch volatile stores.
2515         if (SI->isVolatile()) continue;
2516 
2517         Value *Ptr = SI->getOperand(1);
2518 
2519         if (isa<UndefValue>(Ptr) ||
2520             (isa<ConstantPointerNull>(Ptr) &&
2521              !NullPointerIsDefined(SI->getFunction(),
2522                                    SI->getPointerAddressSpace()))) {
2523           changeToUnreachable(SI, false, DTU);
2524           Changed = true;
2525           break;
2526         }
2527       }
2528     }
2529 
2530     Instruction *Terminator = BB->getTerminator();
2531     if (auto *II = dyn_cast<InvokeInst>(Terminator)) {
2532       // Turn invokes that call 'nounwind' functions into ordinary calls.
2533       Value *Callee = II->getCalledOperand();
2534       if ((isa<ConstantPointerNull>(Callee) &&
2535            !NullPointerIsDefined(BB->getParent())) ||
2536           isa<UndefValue>(Callee)) {
2537         changeToUnreachable(II, false, DTU);
2538         Changed = true;
2539       } else {
2540         if (II->doesNotReturn() &&
2541             !isa<UnreachableInst>(II->getNormalDest()->front())) {
2542           // If we found an invoke of a no-return function,
2543           // create a new empty basic block with an `unreachable` terminator,
2544           // and set it as the normal destination for the invoke,
2545           // unless that is already the case.
2546           // Note that the original normal destination could have other uses.
2547           BasicBlock *OrigNormalDest = II->getNormalDest();
2548           OrigNormalDest->removePredecessor(II->getParent());
2549           LLVMContext &Ctx = II->getContext();
2550           BasicBlock *UnreachableNormalDest = BasicBlock::Create(
2551               Ctx, OrigNormalDest->getName() + ".unreachable",
2552               II->getFunction(), OrigNormalDest);
2553           new UnreachableInst(Ctx, UnreachableNormalDest);
2554           II->setNormalDest(UnreachableNormalDest);
2555           if (DTU)
2556             DTU->applyUpdates(
2557                 {{DominatorTree::Delete, BB, OrigNormalDest},
2558                  {DominatorTree::Insert, BB, UnreachableNormalDest}});
2559           Changed = true;
2560         }
2561         if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) {
2562           if (II->use_empty() && !II->mayHaveSideEffects()) {
2563             // jump to the normal destination branch.
2564             BasicBlock *NormalDestBB = II->getNormalDest();
2565             BasicBlock *UnwindDestBB = II->getUnwindDest();
2566             BranchInst::Create(NormalDestBB, II);
2567             UnwindDestBB->removePredecessor(II->getParent());
2568             II->eraseFromParent();
2569             if (DTU)
2570               DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDestBB}});
2571           } else
2572             changeToCall(II, DTU);
2573           Changed = true;
2574         }
2575       }
2576     } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Terminator)) {
2577       // Remove catchpads which cannot be reached.
2578       struct CatchPadDenseMapInfo {
2579         static CatchPadInst *getEmptyKey() {
2580           return DenseMapInfo<CatchPadInst *>::getEmptyKey();
2581         }
2582 
2583         static CatchPadInst *getTombstoneKey() {
2584           return DenseMapInfo<CatchPadInst *>::getTombstoneKey();
2585         }
2586 
2587         static unsigned getHashValue(CatchPadInst *CatchPad) {
2588           return static_cast<unsigned>(hash_combine_range(
2589               CatchPad->value_op_begin(), CatchPad->value_op_end()));
2590         }
2591 
2592         static bool isEqual(CatchPadInst *LHS, CatchPadInst *RHS) {
2593           if (LHS == getEmptyKey() || LHS == getTombstoneKey() ||
2594               RHS == getEmptyKey() || RHS == getTombstoneKey())
2595             return LHS == RHS;
2596           return LHS->isIdenticalTo(RHS);
2597         }
2598       };
2599 
2600       SmallDenseMap<BasicBlock *, int, 8> NumPerSuccessorCases;
2601       // Set of unique CatchPads.
2602       SmallDenseMap<CatchPadInst *, detail::DenseSetEmpty, 4,
2603                     CatchPadDenseMapInfo, detail::DenseSetPair<CatchPadInst *>>
2604           HandlerSet;
2605       detail::DenseSetEmpty Empty;
2606       for (CatchSwitchInst::handler_iterator I = CatchSwitch->handler_begin(),
2607                                              E = CatchSwitch->handler_end();
2608            I != E; ++I) {
2609         BasicBlock *HandlerBB = *I;
2610         if (DTU)
2611           ++NumPerSuccessorCases[HandlerBB];
2612         auto *CatchPad = cast<CatchPadInst>(HandlerBB->getFirstNonPHI());
2613         if (!HandlerSet.insert({CatchPad, Empty}).second) {
2614           if (DTU)
2615             --NumPerSuccessorCases[HandlerBB];
2616           CatchSwitch->removeHandler(I);
2617           --I;
2618           --E;
2619           Changed = true;
2620         }
2621       }
2622       if (DTU) {
2623         std::vector<DominatorTree::UpdateType> Updates;
2624         for (const std::pair<BasicBlock *, int> &I : NumPerSuccessorCases)
2625           if (I.second == 0)
2626             Updates.push_back({DominatorTree::Delete, BB, I.first});
2627         DTU->applyUpdates(Updates);
2628       }
2629     }
2630 
2631     Changed |= ConstantFoldTerminator(BB, true, nullptr, DTU);
2632     for (BasicBlock *Successor : successors(BB))
2633       if (Reachable.insert(Successor).second)
2634         Worklist.push_back(Successor);
2635   } while (!Worklist.empty());
2636   return Changed;
2637 }
2638 
2639 Instruction *llvm::removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU) {
2640   Instruction *TI = BB->getTerminator();
2641 
2642   if (auto *II = dyn_cast<InvokeInst>(TI))
2643     return changeToCall(II, DTU);
2644 
2645   Instruction *NewTI;
2646   BasicBlock *UnwindDest;
2647 
2648   if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
2649     NewTI = CleanupReturnInst::Create(CRI->getCleanupPad(), nullptr, CRI);
2650     UnwindDest = CRI->getUnwindDest();
2651   } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(TI)) {
2652     auto *NewCatchSwitch = CatchSwitchInst::Create(
2653         CatchSwitch->getParentPad(), nullptr, CatchSwitch->getNumHandlers(),
2654         CatchSwitch->getName(), CatchSwitch);
2655     for (BasicBlock *PadBB : CatchSwitch->handlers())
2656       NewCatchSwitch->addHandler(PadBB);
2657 
2658     NewTI = NewCatchSwitch;
2659     UnwindDest = CatchSwitch->getUnwindDest();
2660   } else {
2661     llvm_unreachable("Could not find unwind successor");
2662   }
2663 
2664   NewTI->takeName(TI);
2665   NewTI->setDebugLoc(TI->getDebugLoc());
2666   UnwindDest->removePredecessor(BB);
2667   TI->replaceAllUsesWith(NewTI);
2668   TI->eraseFromParent();
2669   if (DTU)
2670     DTU->applyUpdates({{DominatorTree::Delete, BB, UnwindDest}});
2671   return NewTI;
2672 }
2673 
2674 /// removeUnreachableBlocks - Remove blocks that are not reachable, even
2675 /// if they are in a dead cycle.  Return true if a change was made, false
2676 /// otherwise.
2677 bool llvm::removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU,
2678                                    MemorySSAUpdater *MSSAU) {
2679   SmallPtrSet<BasicBlock *, 16> Reachable;
2680   bool Changed = markAliveBlocks(F, Reachable, DTU);
2681 
2682   // If there are unreachable blocks in the CFG...
2683   if (Reachable.size() == F.size())
2684     return Changed;
2685 
2686   assert(Reachable.size() < F.size());
2687 
2688   // Are there any blocks left to actually delete?
2689   SmallSetVector<BasicBlock *, 8> BlocksToRemove;
2690   for (BasicBlock &BB : F) {
2691     // Skip reachable basic blocks
2692     if (Reachable.count(&BB))
2693       continue;
2694     // Skip already-deleted blocks
2695     if (DTU && DTU->isBBPendingDeletion(&BB))
2696       continue;
2697     BlocksToRemove.insert(&BB);
2698   }
2699 
2700   if (BlocksToRemove.empty())
2701     return Changed;
2702 
2703   Changed = true;
2704   NumRemoved += BlocksToRemove.size();
2705 
2706   if (MSSAU)
2707     MSSAU->removeBlocks(BlocksToRemove);
2708 
2709   DeleteDeadBlocks(BlocksToRemove.takeVector(), DTU);
2710 
2711   return Changed;
2712 }
2713 
2714 void llvm::combineMetadata(Instruction *K, const Instruction *J,
2715                            ArrayRef<unsigned> KnownIDs, bool DoesKMove) {
2716   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
2717   K->dropUnknownNonDebugMetadata(KnownIDs);
2718   K->getAllMetadataOtherThanDebugLoc(Metadata);
2719   for (const auto &MD : Metadata) {
2720     unsigned Kind = MD.first;
2721     MDNode *JMD = J->getMetadata(Kind);
2722     MDNode *KMD = MD.second;
2723 
2724     switch (Kind) {
2725       default:
2726         K->setMetadata(Kind, nullptr); // Remove unknown metadata
2727         break;
2728       case LLVMContext::MD_dbg:
2729         llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
2730       case LLVMContext::MD_DIAssignID:
2731         K->mergeDIAssignID(J);
2732         break;
2733       case LLVMContext::MD_tbaa:
2734         K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
2735         break;
2736       case LLVMContext::MD_alias_scope:
2737         K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
2738         break;
2739       case LLVMContext::MD_noalias:
2740       case LLVMContext::MD_mem_parallel_loop_access:
2741         K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
2742         break;
2743       case LLVMContext::MD_access_group:
2744         K->setMetadata(LLVMContext::MD_access_group,
2745                        intersectAccessGroups(K, J));
2746         break;
2747       case LLVMContext::MD_range:
2748         if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
2749           K->setMetadata(Kind, MDNode::getMostGenericRange(JMD, KMD));
2750         break;
2751       case LLVMContext::MD_fpmath:
2752         K->setMetadata(Kind, MDNode::getMostGenericFPMath(JMD, KMD));
2753         break;
2754       case LLVMContext::MD_invariant_load:
2755         // If K moves, only set the !invariant.load if it is present in both
2756         // instructions.
2757         if (DoesKMove)
2758           K->setMetadata(Kind, JMD);
2759         break;
2760       case LLVMContext::MD_nonnull:
2761         if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
2762           K->setMetadata(Kind, JMD);
2763         break;
2764       case LLVMContext::MD_invariant_group:
2765         // Preserve !invariant.group in K.
2766         break;
2767       case LLVMContext::MD_align:
2768         if (DoesKMove || !K->hasMetadata(LLVMContext::MD_noundef))
2769           K->setMetadata(
2770               Kind, MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2771         break;
2772       case LLVMContext::MD_dereferenceable:
2773       case LLVMContext::MD_dereferenceable_or_null:
2774         if (DoesKMove)
2775           K->setMetadata(Kind,
2776             MDNode::getMostGenericAlignmentOrDereferenceable(JMD, KMD));
2777         break;
2778       case LLVMContext::MD_preserve_access_index:
2779         // Preserve !preserve.access.index in K.
2780         break;
2781       case LLVMContext::MD_noundef:
2782         // If K does move, keep noundef if it is present in both instructions.
2783         if (DoesKMove)
2784           K->setMetadata(Kind, JMD);
2785         break;
2786       case LLVMContext::MD_nontemporal:
2787         // Preserve !nontemporal if it is present on both instructions.
2788         K->setMetadata(Kind, JMD);
2789         break;
2790       case LLVMContext::MD_prof:
2791         if (DoesKMove)
2792           K->setMetadata(Kind, MDNode::getMergedProfMetadata(KMD, JMD, K, J));
2793         break;
2794     }
2795   }
2796   // Set !invariant.group from J if J has it. If both instructions have it
2797   // then we will just pick it from J - even when they are different.
2798   // Also make sure that K is load or store - f.e. combining bitcast with load
2799   // could produce bitcast with invariant.group metadata, which is invalid.
2800   // FIXME: we should try to preserve both invariant.group md if they are
2801   // different, but right now instruction can only have one invariant.group.
2802   if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group))
2803     if (isa<LoadInst>(K) || isa<StoreInst>(K))
2804       K->setMetadata(LLVMContext::MD_invariant_group, JMD);
2805 }
2806 
2807 void llvm::combineMetadataForCSE(Instruction *K, const Instruction *J,
2808                                  bool KDominatesJ) {
2809   unsigned KnownIDs[] = {LLVMContext::MD_tbaa,
2810                          LLVMContext::MD_alias_scope,
2811                          LLVMContext::MD_noalias,
2812                          LLVMContext::MD_range,
2813                          LLVMContext::MD_fpmath,
2814                          LLVMContext::MD_invariant_load,
2815                          LLVMContext::MD_nonnull,
2816                          LLVMContext::MD_invariant_group,
2817                          LLVMContext::MD_align,
2818                          LLVMContext::MD_dereferenceable,
2819                          LLVMContext::MD_dereferenceable_or_null,
2820                          LLVMContext::MD_access_group,
2821                          LLVMContext::MD_preserve_access_index,
2822                          LLVMContext::MD_prof,
2823                          LLVMContext::MD_nontemporal,
2824                          LLVMContext::MD_noundef};
2825   combineMetadata(K, J, KnownIDs, KDominatesJ);
2826 }
2827 
2828 void llvm::copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source) {
2829   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
2830   Source.getAllMetadata(MD);
2831   MDBuilder MDB(Dest.getContext());
2832   Type *NewType = Dest.getType();
2833   const DataLayout &DL = Source.getModule()->getDataLayout();
2834   for (const auto &MDPair : MD) {
2835     unsigned ID = MDPair.first;
2836     MDNode *N = MDPair.second;
2837     // Note, essentially every kind of metadata should be preserved here! This
2838     // routine is supposed to clone a load instruction changing *only its type*.
2839     // The only metadata it makes sense to drop is metadata which is invalidated
2840     // when the pointer type changes. This should essentially never be the case
2841     // in LLVM, but we explicitly switch over only known metadata to be
2842     // conservatively correct. If you are adding metadata to LLVM which pertains
2843     // to loads, you almost certainly want to add it here.
2844     switch (ID) {
2845     case LLVMContext::MD_dbg:
2846     case LLVMContext::MD_tbaa:
2847     case LLVMContext::MD_prof:
2848     case LLVMContext::MD_fpmath:
2849     case LLVMContext::MD_tbaa_struct:
2850     case LLVMContext::MD_invariant_load:
2851     case LLVMContext::MD_alias_scope:
2852     case LLVMContext::MD_noalias:
2853     case LLVMContext::MD_nontemporal:
2854     case LLVMContext::MD_mem_parallel_loop_access:
2855     case LLVMContext::MD_access_group:
2856     case LLVMContext::MD_noundef:
2857       // All of these directly apply.
2858       Dest.setMetadata(ID, N);
2859       break;
2860 
2861     case LLVMContext::MD_nonnull:
2862       copyNonnullMetadata(Source, N, Dest);
2863       break;
2864 
2865     case LLVMContext::MD_align:
2866     case LLVMContext::MD_dereferenceable:
2867     case LLVMContext::MD_dereferenceable_or_null:
2868       // These only directly apply if the new type is also a pointer.
2869       if (NewType->isPointerTy())
2870         Dest.setMetadata(ID, N);
2871       break;
2872 
2873     case LLVMContext::MD_range:
2874       copyRangeMetadata(DL, Source, N, Dest);
2875       break;
2876     }
2877   }
2878 }
2879 
2880 void llvm::patchReplacementInstruction(Instruction *I, Value *Repl) {
2881   auto *ReplInst = dyn_cast<Instruction>(Repl);
2882   if (!ReplInst)
2883     return;
2884 
2885   // Patch the replacement so that it is not more restrictive than the value
2886   // being replaced.
2887   // Note that if 'I' is a load being replaced by some operation,
2888   // for example, by an arithmetic operation, then andIRFlags()
2889   // would just erase all math flags from the original arithmetic
2890   // operation, which is clearly not wanted and not needed.
2891   if (!isa<LoadInst>(I))
2892     ReplInst->andIRFlags(I);
2893 
2894   // FIXME: If both the original and replacement value are part of the
2895   // same control-flow region (meaning that the execution of one
2896   // guarantees the execution of the other), then we can combine the
2897   // noalias scopes here and do better than the general conservative
2898   // answer used in combineMetadata().
2899 
2900   // In general, GVN unifies expressions over different control-flow
2901   // regions, and so we need a conservative combination of the noalias
2902   // scopes.
2903   combineMetadataForCSE(ReplInst, I, false);
2904 }
2905 
2906 template <typename RootType, typename DominatesFn>
2907 static unsigned replaceDominatedUsesWith(Value *From, Value *To,
2908                                          const RootType &Root,
2909                                          const DominatesFn &Dominates) {
2910   assert(From->getType() == To->getType());
2911 
2912   unsigned Count = 0;
2913   for (Use &U : llvm::make_early_inc_range(From->uses())) {
2914     if (!Dominates(Root, U))
2915       continue;
2916     U.set(To);
2917     LLVM_DEBUG(dbgs() << "Replace dominated use of '" << From->getName()
2918                       << "' as " << *To << " in " << *U << "\n");
2919     ++Count;
2920   }
2921   return Count;
2922 }
2923 
2924 unsigned llvm::replaceNonLocalUsesWith(Instruction *From, Value *To) {
2925    assert(From->getType() == To->getType());
2926    auto *BB = From->getParent();
2927    unsigned Count = 0;
2928 
2929    for (Use &U : llvm::make_early_inc_range(From->uses())) {
2930     auto *I = cast<Instruction>(U.getUser());
2931     if (I->getParent() == BB)
2932       continue;
2933     U.set(To);
2934     ++Count;
2935   }
2936   return Count;
2937 }
2938 
2939 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2940                                         DominatorTree &DT,
2941                                         const BasicBlockEdge &Root) {
2942   auto Dominates = [&DT](const BasicBlockEdge &Root, const Use &U) {
2943     return DT.dominates(Root, U);
2944   };
2945   return ::replaceDominatedUsesWith(From, To, Root, Dominates);
2946 }
2947 
2948 unsigned llvm::replaceDominatedUsesWith(Value *From, Value *To,
2949                                         DominatorTree &DT,
2950                                         const BasicBlock *BB) {
2951   auto Dominates = [&DT](const BasicBlock *BB, const Use &U) {
2952     return DT.dominates(BB, U);
2953   };
2954   return ::replaceDominatedUsesWith(From, To, BB, Dominates);
2955 }
2956 
2957 bool llvm::callsGCLeafFunction(const CallBase *Call,
2958                                const TargetLibraryInfo &TLI) {
2959   // Check if the function is specifically marked as a gc leaf function.
2960   if (Call->hasFnAttr("gc-leaf-function"))
2961     return true;
2962   if (const Function *F = Call->getCalledFunction()) {
2963     if (F->hasFnAttribute("gc-leaf-function"))
2964       return true;
2965 
2966     if (auto IID = F->getIntrinsicID()) {
2967       // Most LLVM intrinsics do not take safepoints.
2968       return IID != Intrinsic::experimental_gc_statepoint &&
2969              IID != Intrinsic::experimental_deoptimize &&
2970              IID != Intrinsic::memcpy_element_unordered_atomic &&
2971              IID != Intrinsic::memmove_element_unordered_atomic;
2972     }
2973   }
2974 
2975   // Lib calls can be materialized by some passes, and won't be
2976   // marked as 'gc-leaf-function.' All available Libcalls are
2977   // GC-leaf.
2978   LibFunc LF;
2979   if (TLI.getLibFunc(*Call, LF)) {
2980     return TLI.has(LF);
2981   }
2982 
2983   return false;
2984 }
2985 
2986 void llvm::copyNonnullMetadata(const LoadInst &OldLI, MDNode *N,
2987                                LoadInst &NewLI) {
2988   auto *NewTy = NewLI.getType();
2989 
2990   // This only directly applies if the new type is also a pointer.
2991   if (NewTy->isPointerTy()) {
2992     NewLI.setMetadata(LLVMContext::MD_nonnull, N);
2993     return;
2994   }
2995 
2996   // The only other translation we can do is to integral loads with !range
2997   // metadata.
2998   if (!NewTy->isIntegerTy())
2999     return;
3000 
3001   MDBuilder MDB(NewLI.getContext());
3002   const Value *Ptr = OldLI.getPointerOperand();
3003   auto *ITy = cast<IntegerType>(NewTy);
3004   auto *NullInt = ConstantExpr::getPtrToInt(
3005       ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
3006   auto *NonNullInt = ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
3007   NewLI.setMetadata(LLVMContext::MD_range,
3008                     MDB.createRange(NonNullInt, NullInt));
3009 }
3010 
3011 void llvm::copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI,
3012                              MDNode *N, LoadInst &NewLI) {
3013   auto *NewTy = NewLI.getType();
3014   // Simply copy the metadata if the type did not change.
3015   if (NewTy == OldLI.getType()) {
3016     NewLI.setMetadata(LLVMContext::MD_range, N);
3017     return;
3018   }
3019 
3020   // Give up unless it is converted to a pointer where there is a single very
3021   // valuable mapping we can do reliably.
3022   // FIXME: It would be nice to propagate this in more ways, but the type
3023   // conversions make it hard.
3024   if (!NewTy->isPointerTy())
3025     return;
3026 
3027   unsigned BitWidth = DL.getPointerTypeSizeInBits(NewTy);
3028   if (BitWidth == OldLI.getType()->getScalarSizeInBits() &&
3029       !getConstantRangeFromMetadata(*N).contains(APInt(BitWidth, 0))) {
3030     MDNode *NN = MDNode::get(OldLI.getContext(), std::nullopt);
3031     NewLI.setMetadata(LLVMContext::MD_nonnull, NN);
3032   }
3033 }
3034 
3035 void llvm::dropDebugUsers(Instruction &I) {
3036   SmallVector<DbgVariableIntrinsic *, 1> DbgUsers;
3037   findDbgUsers(DbgUsers, &I);
3038   for (auto *DII : DbgUsers)
3039     DII->eraseFromParent();
3040 }
3041 
3042 void llvm::hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
3043                                     BasicBlock *BB) {
3044   // Since we are moving the instructions out of its basic block, we do not
3045   // retain their original debug locations (DILocations) and debug intrinsic
3046   // instructions.
3047   //
3048   // Doing so would degrade the debugging experience and adversely affect the
3049   // accuracy of profiling information.
3050   //
3051   // Currently, when hoisting the instructions, we take the following actions:
3052   // - Remove their debug intrinsic instructions.
3053   // - Set their debug locations to the values from the insertion point.
3054   //
3055   // As per PR39141 (comment #8), the more fundamental reason why the dbg.values
3056   // need to be deleted, is because there will not be any instructions with a
3057   // DILocation in either branch left after performing the transformation. We
3058   // can only insert a dbg.value after the two branches are joined again.
3059   //
3060   // See PR38762, PR39243 for more details.
3061   //
3062   // TODO: Extend llvm.dbg.value to take more than one SSA Value (PR39141) to
3063   // encode predicated DIExpressions that yield different results on different
3064   // code paths.
3065 
3066   for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
3067     Instruction *I = &*II;
3068     I->dropUBImplyingAttrsAndMetadata();
3069     if (I->isUsedByMetadata())
3070       dropDebugUsers(*I);
3071     if (I->isDebugOrPseudoInst()) {
3072       // Remove DbgInfo and pseudo probe Intrinsics.
3073       II = I->eraseFromParent();
3074       continue;
3075     }
3076     I->setDebugLoc(InsertPt->getDebugLoc());
3077     ++II;
3078   }
3079   DomBlock->splice(InsertPt->getIterator(), BB, BB->begin(),
3080                    BB->getTerminator()->getIterator());
3081 }
3082 
3083 namespace {
3084 
3085 /// A potential constituent of a bitreverse or bswap expression. See
3086 /// collectBitParts for a fuller explanation.
3087 struct BitPart {
3088   BitPart(Value *P, unsigned BW) : Provider(P) {
3089     Provenance.resize(BW);
3090   }
3091 
3092   /// The Value that this is a bitreverse/bswap of.
3093   Value *Provider;
3094 
3095   /// The "provenance" of each bit. Provenance[A] = B means that bit A
3096   /// in Provider becomes bit B in the result of this expression.
3097   SmallVector<int8_t, 32> Provenance; // int8_t means max size is i128.
3098 
3099   enum { Unset = -1 };
3100 };
3101 
3102 } // end anonymous namespace
3103 
3104 /// Analyze the specified subexpression and see if it is capable of providing
3105 /// pieces of a bswap or bitreverse. The subexpression provides a potential
3106 /// piece of a bswap or bitreverse if it can be proved that each non-zero bit in
3107 /// the output of the expression came from a corresponding bit in some other
3108 /// value. This function is recursive, and the end result is a mapping of
3109 /// bitnumber to bitnumber. It is the caller's responsibility to validate that
3110 /// the bitnumber to bitnumber mapping is correct for a bswap or bitreverse.
3111 ///
3112 /// For example, if the current subexpression if "(shl i32 %X, 24)" then we know
3113 /// that the expression deposits the low byte of %X into the high byte of the
3114 /// result and that all other bits are zero. This expression is accepted and a
3115 /// BitPart is returned with Provider set to %X and Provenance[24-31] set to
3116 /// [0-7].
3117 ///
3118 /// For vector types, all analysis is performed at the per-element level. No
3119 /// cross-element analysis is supported (shuffle/insertion/reduction), and all
3120 /// constant masks must be splatted across all elements.
3121 ///
3122 /// To avoid revisiting values, the BitPart results are memoized into the
3123 /// provided map. To avoid unnecessary copying of BitParts, BitParts are
3124 /// constructed in-place in the \c BPS map. Because of this \c BPS needs to
3125 /// store BitParts objects, not pointers. As we need the concept of a nullptr
3126 /// BitParts (Value has been analyzed and the analysis failed), we an Optional
3127 /// type instead to provide the same functionality.
3128 ///
3129 /// Because we pass around references into \c BPS, we must use a container that
3130 /// does not invalidate internal references (std::map instead of DenseMap).
3131 static const std::optional<BitPart> &
3132 collectBitParts(Value *V, bool MatchBSwaps, bool MatchBitReversals,
3133                 std::map<Value *, std::optional<BitPart>> &BPS, int Depth,
3134                 bool &FoundRoot) {
3135   auto I = BPS.find(V);
3136   if (I != BPS.end())
3137     return I->second;
3138 
3139   auto &Result = BPS[V] = std::nullopt;
3140   auto BitWidth = V->getType()->getScalarSizeInBits();
3141 
3142   // Can't do integer/elements > 128 bits.
3143   if (BitWidth > 128)
3144     return Result;
3145 
3146   // Prevent stack overflow by limiting the recursion depth
3147   if (Depth == BitPartRecursionMaxDepth) {
3148     LLVM_DEBUG(dbgs() << "collectBitParts max recursion depth reached.\n");
3149     return Result;
3150   }
3151 
3152   if (auto *I = dyn_cast<Instruction>(V)) {
3153     Value *X, *Y;
3154     const APInt *C;
3155 
3156     // If this is an or instruction, it may be an inner node of the bswap.
3157     if (match(V, m_Or(m_Value(X), m_Value(Y)))) {
3158       // Check we have both sources and they are from the same provider.
3159       const auto &A = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3160                                       Depth + 1, FoundRoot);
3161       if (!A || !A->Provider)
3162         return Result;
3163 
3164       const auto &B = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3165                                       Depth + 1, FoundRoot);
3166       if (!B || A->Provider != B->Provider)
3167         return Result;
3168 
3169       // Try and merge the two together.
3170       Result = BitPart(A->Provider, BitWidth);
3171       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx) {
3172         if (A->Provenance[BitIdx] != BitPart::Unset &&
3173             B->Provenance[BitIdx] != BitPart::Unset &&
3174             A->Provenance[BitIdx] != B->Provenance[BitIdx])
3175           return Result = std::nullopt;
3176 
3177         if (A->Provenance[BitIdx] == BitPart::Unset)
3178           Result->Provenance[BitIdx] = B->Provenance[BitIdx];
3179         else
3180           Result->Provenance[BitIdx] = A->Provenance[BitIdx];
3181       }
3182 
3183       return Result;
3184     }
3185 
3186     // If this is a logical shift by a constant, recurse then shift the result.
3187     if (match(V, m_LogicalShift(m_Value(X), m_APInt(C)))) {
3188       const APInt &BitShift = *C;
3189 
3190       // Ensure the shift amount is defined.
3191       if (BitShift.uge(BitWidth))
3192         return Result;
3193 
3194       // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3195       if (!MatchBitReversals && (BitShift.getZExtValue() % 8) != 0)
3196         return Result;
3197 
3198       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3199                                         Depth + 1, FoundRoot);
3200       if (!Res)
3201         return Result;
3202       Result = Res;
3203 
3204       // Perform the "shift" on BitProvenance.
3205       auto &P = Result->Provenance;
3206       if (I->getOpcode() == Instruction::Shl) {
3207         P.erase(std::prev(P.end(), BitShift.getZExtValue()), P.end());
3208         P.insert(P.begin(), BitShift.getZExtValue(), BitPart::Unset);
3209       } else {
3210         P.erase(P.begin(), std::next(P.begin(), BitShift.getZExtValue()));
3211         P.insert(P.end(), BitShift.getZExtValue(), BitPart::Unset);
3212       }
3213 
3214       return Result;
3215     }
3216 
3217     // If this is a logical 'and' with a mask that clears bits, recurse then
3218     // unset the appropriate bits.
3219     if (match(V, m_And(m_Value(X), m_APInt(C)))) {
3220       const APInt &AndMask = *C;
3221 
3222       // Check that the mask allows a multiple of 8 bits for a bswap, for an
3223       // early exit.
3224       unsigned NumMaskedBits = AndMask.popcount();
3225       if (!MatchBitReversals && (NumMaskedBits % 8) != 0)
3226         return Result;
3227 
3228       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3229                                         Depth + 1, FoundRoot);
3230       if (!Res)
3231         return Result;
3232       Result = Res;
3233 
3234       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3235         // If the AndMask is zero for this bit, clear the bit.
3236         if (AndMask[BitIdx] == 0)
3237           Result->Provenance[BitIdx] = BitPart::Unset;
3238       return Result;
3239     }
3240 
3241     // If this is a zext instruction zero extend the result.
3242     if (match(V, m_ZExt(m_Value(X)))) {
3243       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3244                                         Depth + 1, FoundRoot);
3245       if (!Res)
3246         return Result;
3247 
3248       Result = BitPart(Res->Provider, BitWidth);
3249       auto NarrowBitWidth = X->getType()->getScalarSizeInBits();
3250       for (unsigned BitIdx = 0; BitIdx < NarrowBitWidth; ++BitIdx)
3251         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3252       for (unsigned BitIdx = NarrowBitWidth; BitIdx < BitWidth; ++BitIdx)
3253         Result->Provenance[BitIdx] = BitPart::Unset;
3254       return Result;
3255     }
3256 
3257     // If this is a truncate instruction, extract the lower bits.
3258     if (match(V, m_Trunc(m_Value(X)))) {
3259       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3260                                         Depth + 1, FoundRoot);
3261       if (!Res)
3262         return Result;
3263 
3264       Result = BitPart(Res->Provider, BitWidth);
3265       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3266         Result->Provenance[BitIdx] = Res->Provenance[BitIdx];
3267       return Result;
3268     }
3269 
3270     // BITREVERSE - most likely due to us previous matching a partial
3271     // bitreverse.
3272     if (match(V, m_BitReverse(m_Value(X)))) {
3273       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3274                                         Depth + 1, FoundRoot);
3275       if (!Res)
3276         return Result;
3277 
3278       Result = BitPart(Res->Provider, BitWidth);
3279       for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3280         Result->Provenance[(BitWidth - 1) - BitIdx] = Res->Provenance[BitIdx];
3281       return Result;
3282     }
3283 
3284     // BSWAP - most likely due to us previous matching a partial bswap.
3285     if (match(V, m_BSwap(m_Value(X)))) {
3286       const auto &Res = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3287                                         Depth + 1, FoundRoot);
3288       if (!Res)
3289         return Result;
3290 
3291       unsigned ByteWidth = BitWidth / 8;
3292       Result = BitPart(Res->Provider, BitWidth);
3293       for (unsigned ByteIdx = 0; ByteIdx < ByteWidth; ++ByteIdx) {
3294         unsigned ByteBitOfs = ByteIdx * 8;
3295         for (unsigned BitIdx = 0; BitIdx < 8; ++BitIdx)
3296           Result->Provenance[(BitWidth - 8 - ByteBitOfs) + BitIdx] =
3297               Res->Provenance[ByteBitOfs + BitIdx];
3298       }
3299       return Result;
3300     }
3301 
3302     // Funnel 'double' shifts take 3 operands, 2 inputs and the shift
3303     // amount (modulo).
3304     // fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3305     // fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3306     if (match(V, m_FShl(m_Value(X), m_Value(Y), m_APInt(C))) ||
3307         match(V, m_FShr(m_Value(X), m_Value(Y), m_APInt(C)))) {
3308       // We can treat fshr as a fshl by flipping the modulo amount.
3309       unsigned ModAmt = C->urem(BitWidth);
3310       if (cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fshr)
3311         ModAmt = BitWidth - ModAmt;
3312 
3313       // For bswap-only, limit shift amounts to whole bytes, for an early exit.
3314       if (!MatchBitReversals && (ModAmt % 8) != 0)
3315         return Result;
3316 
3317       // Check we have both sources and they are from the same provider.
3318       const auto &LHS = collectBitParts(X, MatchBSwaps, MatchBitReversals, BPS,
3319                                         Depth + 1, FoundRoot);
3320       if (!LHS || !LHS->Provider)
3321         return Result;
3322 
3323       const auto &RHS = collectBitParts(Y, MatchBSwaps, MatchBitReversals, BPS,
3324                                         Depth + 1, FoundRoot);
3325       if (!RHS || LHS->Provider != RHS->Provider)
3326         return Result;
3327 
3328       unsigned StartBitRHS = BitWidth - ModAmt;
3329       Result = BitPart(LHS->Provider, BitWidth);
3330       for (unsigned BitIdx = 0; BitIdx < StartBitRHS; ++BitIdx)
3331         Result->Provenance[BitIdx + ModAmt] = LHS->Provenance[BitIdx];
3332       for (unsigned BitIdx = 0; BitIdx < ModAmt; ++BitIdx)
3333         Result->Provenance[BitIdx] = RHS->Provenance[BitIdx + StartBitRHS];
3334       return Result;
3335     }
3336   }
3337 
3338   // If we've already found a root input value then we're never going to merge
3339   // these back together.
3340   if (FoundRoot)
3341     return Result;
3342 
3343   // Okay, we got to something that isn't a shift, 'or', 'and', etc. This must
3344   // be the root input value to the bswap/bitreverse.
3345   FoundRoot = true;
3346   Result = BitPart(V, BitWidth);
3347   for (unsigned BitIdx = 0; BitIdx < BitWidth; ++BitIdx)
3348     Result->Provenance[BitIdx] = BitIdx;
3349   return Result;
3350 }
3351 
3352 static bool bitTransformIsCorrectForBSwap(unsigned From, unsigned To,
3353                                           unsigned BitWidth) {
3354   if (From % 8 != To % 8)
3355     return false;
3356   // Convert from bit indices to byte indices and check for a byte reversal.
3357   From >>= 3;
3358   To >>= 3;
3359   BitWidth >>= 3;
3360   return From == BitWidth - To - 1;
3361 }
3362 
3363 static bool bitTransformIsCorrectForBitReverse(unsigned From, unsigned To,
3364                                                unsigned BitWidth) {
3365   return From == BitWidth - To - 1;
3366 }
3367 
3368 bool llvm::recognizeBSwapOrBitReverseIdiom(
3369     Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
3370     SmallVectorImpl<Instruction *> &InsertedInsts) {
3371   if (!match(I, m_Or(m_Value(), m_Value())) &&
3372       !match(I, m_FShl(m_Value(), m_Value(), m_Value())) &&
3373       !match(I, m_FShr(m_Value(), m_Value(), m_Value())))
3374     return false;
3375   if (!MatchBSwaps && !MatchBitReversals)
3376     return false;
3377   Type *ITy = I->getType();
3378   if (!ITy->isIntOrIntVectorTy() || ITy->getScalarSizeInBits() > 128)
3379     return false;  // Can't do integer/elements > 128 bits.
3380 
3381   // Try to find all the pieces corresponding to the bswap.
3382   bool FoundRoot = false;
3383   std::map<Value *, std::optional<BitPart>> BPS;
3384   const auto &Res =
3385       collectBitParts(I, MatchBSwaps, MatchBitReversals, BPS, 0, FoundRoot);
3386   if (!Res)
3387     return false;
3388   ArrayRef<int8_t> BitProvenance = Res->Provenance;
3389   assert(all_of(BitProvenance,
3390                 [](int8_t I) { return I == BitPart::Unset || 0 <= I; }) &&
3391          "Illegal bit provenance index");
3392 
3393   // If the upper bits are zero, then attempt to perform as a truncated op.
3394   Type *DemandedTy = ITy;
3395   if (BitProvenance.back() == BitPart::Unset) {
3396     while (!BitProvenance.empty() && BitProvenance.back() == BitPart::Unset)
3397       BitProvenance = BitProvenance.drop_back();
3398     if (BitProvenance.empty())
3399       return false; // TODO - handle null value?
3400     DemandedTy = Type::getIntNTy(I->getContext(), BitProvenance.size());
3401     if (auto *IVecTy = dyn_cast<VectorType>(ITy))
3402       DemandedTy = VectorType::get(DemandedTy, IVecTy);
3403   }
3404 
3405   // Check BitProvenance hasn't found a source larger than the result type.
3406   unsigned DemandedBW = DemandedTy->getScalarSizeInBits();
3407   if (DemandedBW > ITy->getScalarSizeInBits())
3408     return false;
3409 
3410   // Now, is the bit permutation correct for a bswap or a bitreverse? We can
3411   // only byteswap values with an even number of bytes.
3412   APInt DemandedMask = APInt::getAllOnes(DemandedBW);
3413   bool OKForBSwap = MatchBSwaps && (DemandedBW % 16) == 0;
3414   bool OKForBitReverse = MatchBitReversals;
3415   for (unsigned BitIdx = 0;
3416        (BitIdx < DemandedBW) && (OKForBSwap || OKForBitReverse); ++BitIdx) {
3417     if (BitProvenance[BitIdx] == BitPart::Unset) {
3418       DemandedMask.clearBit(BitIdx);
3419       continue;
3420     }
3421     OKForBSwap &= bitTransformIsCorrectForBSwap(BitProvenance[BitIdx], BitIdx,
3422                                                 DemandedBW);
3423     OKForBitReverse &= bitTransformIsCorrectForBitReverse(BitProvenance[BitIdx],
3424                                                           BitIdx, DemandedBW);
3425   }
3426 
3427   Intrinsic::ID Intrin;
3428   if (OKForBSwap)
3429     Intrin = Intrinsic::bswap;
3430   else if (OKForBitReverse)
3431     Intrin = Intrinsic::bitreverse;
3432   else
3433     return false;
3434 
3435   Function *F = Intrinsic::getDeclaration(I->getModule(), Intrin, DemandedTy);
3436   Value *Provider = Res->Provider;
3437 
3438   // We may need to truncate the provider.
3439   if (DemandedTy != Provider->getType()) {
3440     auto *Trunc =
3441         CastInst::CreateIntegerCast(Provider, DemandedTy, false, "trunc", I);
3442     InsertedInsts.push_back(Trunc);
3443     Provider = Trunc;
3444   }
3445 
3446   Instruction *Result = CallInst::Create(F, Provider, "rev", I);
3447   InsertedInsts.push_back(Result);
3448 
3449   if (!DemandedMask.isAllOnes()) {
3450     auto *Mask = ConstantInt::get(DemandedTy, DemandedMask);
3451     Result = BinaryOperator::Create(Instruction::And, Result, Mask, "mask", I);
3452     InsertedInsts.push_back(Result);
3453   }
3454 
3455   // We may need to zeroextend back to the result type.
3456   if (ITy != Result->getType()) {
3457     auto *ExtInst = CastInst::CreateIntegerCast(Result, ITy, false, "zext", I);
3458     InsertedInsts.push_back(ExtInst);
3459   }
3460 
3461   return true;
3462 }
3463 
3464 // CodeGen has special handling for some string functions that may replace
3465 // them with target-specific intrinsics.  Since that'd skip our interceptors
3466 // in ASan/MSan/TSan/DFSan, and thus make us miss some memory accesses,
3467 // we mark affected calls as NoBuiltin, which will disable optimization
3468 // in CodeGen.
3469 void llvm::maybeMarkSanitizerLibraryCallNoBuiltin(
3470     CallInst *CI, const TargetLibraryInfo *TLI) {
3471   Function *F = CI->getCalledFunction();
3472   LibFunc Func;
3473   if (F && !F->hasLocalLinkage() && F->hasName() &&
3474       TLI->getLibFunc(F->getName(), Func) && TLI->hasOptimizedCodeGen(Func) &&
3475       !F->doesNotAccessMemory())
3476     CI->addFnAttr(Attribute::NoBuiltin);
3477 }
3478 
3479 bool llvm::canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx) {
3480   // We can't have a PHI with a metadata type.
3481   if (I->getOperand(OpIdx)->getType()->isMetadataTy())
3482     return false;
3483 
3484   // Early exit.
3485   if (!isa<Constant>(I->getOperand(OpIdx)))
3486     return true;
3487 
3488   switch (I->getOpcode()) {
3489   default:
3490     return true;
3491   case Instruction::Call:
3492   case Instruction::Invoke: {
3493     const auto &CB = cast<CallBase>(*I);
3494 
3495     // Can't handle inline asm. Skip it.
3496     if (CB.isInlineAsm())
3497       return false;
3498 
3499     // Constant bundle operands may need to retain their constant-ness for
3500     // correctness.
3501     if (CB.isBundleOperand(OpIdx))
3502       return false;
3503 
3504     if (OpIdx < CB.arg_size()) {
3505       // Some variadic intrinsics require constants in the variadic arguments,
3506       // which currently aren't markable as immarg.
3507       if (isa<IntrinsicInst>(CB) &&
3508           OpIdx >= CB.getFunctionType()->getNumParams()) {
3509         // This is known to be OK for stackmap.
3510         return CB.getIntrinsicID() == Intrinsic::experimental_stackmap;
3511       }
3512 
3513       // gcroot is a special case, since it requires a constant argument which
3514       // isn't also required to be a simple ConstantInt.
3515       if (CB.getIntrinsicID() == Intrinsic::gcroot)
3516         return false;
3517 
3518       // Some intrinsic operands are required to be immediates.
3519       return !CB.paramHasAttr(OpIdx, Attribute::ImmArg);
3520     }
3521 
3522     // It is never allowed to replace the call argument to an intrinsic, but it
3523     // may be possible for a call.
3524     return !isa<IntrinsicInst>(CB);
3525   }
3526   case Instruction::ShuffleVector:
3527     // Shufflevector masks are constant.
3528     return OpIdx != 2;
3529   case Instruction::Switch:
3530   case Instruction::ExtractValue:
3531     // All operands apart from the first are constant.
3532     return OpIdx == 0;
3533   case Instruction::InsertValue:
3534     // All operands apart from the first and the second are constant.
3535     return OpIdx < 2;
3536   case Instruction::Alloca:
3537     // Static allocas (constant size in the entry block) are handled by
3538     // prologue/epilogue insertion so they're free anyway. We definitely don't
3539     // want to make them non-constant.
3540     return !cast<AllocaInst>(I)->isStaticAlloca();
3541   case Instruction::GetElementPtr:
3542     if (OpIdx == 0)
3543       return true;
3544     gep_type_iterator It = gep_type_begin(I);
3545     for (auto E = std::next(It, OpIdx); It != E; ++It)
3546       if (It.isStruct())
3547         return false;
3548     return true;
3549   }
3550 }
3551 
3552 Value *llvm::invertCondition(Value *Condition) {
3553   // First: Check if it's a constant
3554   if (Constant *C = dyn_cast<Constant>(Condition))
3555     return ConstantExpr::getNot(C);
3556 
3557   // Second: If the condition is already inverted, return the original value
3558   Value *NotCondition;
3559   if (match(Condition, m_Not(m_Value(NotCondition))))
3560     return NotCondition;
3561 
3562   BasicBlock *Parent = nullptr;
3563   Instruction *Inst = dyn_cast<Instruction>(Condition);
3564   if (Inst)
3565     Parent = Inst->getParent();
3566   else if (Argument *Arg = dyn_cast<Argument>(Condition))
3567     Parent = &Arg->getParent()->getEntryBlock();
3568   assert(Parent && "Unsupported condition to invert");
3569 
3570   // Third: Check all the users for an invert
3571   for (User *U : Condition->users())
3572     if (Instruction *I = dyn_cast<Instruction>(U))
3573       if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
3574         return I;
3575 
3576   // Last option: Create a new instruction
3577   auto *Inverted =
3578       BinaryOperator::CreateNot(Condition, Condition->getName() + ".inv");
3579   if (Inst && !isa<PHINode>(Inst))
3580     Inverted->insertAfter(Inst);
3581   else
3582     Inverted->insertBefore(&*Parent->getFirstInsertionPt());
3583   return Inverted;
3584 }
3585 
3586 bool llvm::inferAttributesFromOthers(Function &F) {
3587   // Note: We explicitly check for attributes rather than using cover functions
3588   // because some of the cover functions include the logic being implemented.
3589 
3590   bool Changed = false;
3591   // readnone + not convergent implies nosync
3592   if (!F.hasFnAttribute(Attribute::NoSync) &&
3593       F.doesNotAccessMemory() && !F.isConvergent()) {
3594     F.setNoSync();
3595     Changed = true;
3596   }
3597 
3598   // readonly implies nofree
3599   if (!F.hasFnAttribute(Attribute::NoFree) && F.onlyReadsMemory()) {
3600     F.setDoesNotFreeMemory();
3601     Changed = true;
3602   }
3603 
3604   // willreturn implies mustprogress
3605   if (!F.hasFnAttribute(Attribute::MustProgress) && F.willReturn()) {
3606     F.setMustProgress();
3607     Changed = true;
3608   }
3609 
3610   // TODO: There are a bunch of cases of restrictive memory effects we
3611   // can infer by inspecting arguments of argmemonly-ish functions.
3612 
3613   return Changed;
3614 }
3615