1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines several CodeGen-specific LLVM IR analysis utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/Analysis.h"
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetSubtargetInfo.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Target/TargetMachine.h"
27 
28 using namespace llvm;
29 
30 /// Compute the linearized index of a member in a nested aggregate/struct/array
31 /// by recursing and accumulating CurIndex as long as there are indices in the
32 /// index list.
33 unsigned llvm::ComputeLinearIndex(Type *Ty,
34                                   const unsigned *Indices,
35                                   const unsigned *IndicesEnd,
36                                   unsigned CurIndex) {
37   // Base case: We're done.
38   if (Indices && Indices == IndicesEnd)
39     return CurIndex;
40 
41   // Given a struct type, recursively traverse the elements.
42   if (StructType *STy = dyn_cast<StructType>(Ty)) {
43     for (auto I : llvm::enumerate(STy->elements())) {
44       Type *ET = I.value();
45       if (Indices && *Indices == I.index())
46         return ComputeLinearIndex(ET, Indices + 1, IndicesEnd, CurIndex);
47       CurIndex = ComputeLinearIndex(ET, nullptr, nullptr, CurIndex);
48     }
49     assert(!Indices && "Unexpected out of bound");
50     return CurIndex;
51   }
52   // Given an array type, recursively traverse the elements.
53   else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
54     Type *EltTy = ATy->getElementType();
55     unsigned NumElts = ATy->getNumElements();
56     // Compute the Linear offset when jumping one element of the array
57     unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
58     if (Indices) {
59       assert(*Indices < NumElts && "Unexpected out of bound");
60       // If the indice is inside the array, compute the index to the requested
61       // elt and recurse inside the element with the end of the indices list
62       CurIndex += EltLinearOffset* *Indices;
63       return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
64     }
65     CurIndex += EltLinearOffset*NumElts;
66     return CurIndex;
67   }
68   // We haven't found the type we're looking for, so keep searching.
69   return CurIndex + 1;
70 }
71 
72 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
73 /// EVTs that represent all the individual underlying
74 /// non-aggregate types that comprise it.
75 ///
76 /// If Offsets is non-null, it points to a vector to be filled in
77 /// with the in-memory offsets of each of the individual values.
78 ///
79 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
80                            Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
81                            SmallVectorImpl<EVT> *MemVTs,
82                            SmallVectorImpl<uint64_t> *Offsets,
83                            uint64_t StartingOffset) {
84   // Given a struct type, recursively traverse the elements.
85   if (StructType *STy = dyn_cast<StructType>(Ty)) {
86     // If the Offsets aren't needed, don't query the struct layout. This allows
87     // us to support structs with scalable vectors for operations that don't
88     // need offsets.
89     const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
90     for (StructType::element_iterator EB = STy->element_begin(),
91                                       EI = EB,
92                                       EE = STy->element_end();
93          EI != EE; ++EI) {
94       // Don't compute the element offset if we didn't get a StructLayout above.
95       uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0;
96       ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
97                       StartingOffset + EltOffset);
98     }
99     return;
100   }
101   // Given an array type, recursively traverse the elements.
102   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
103     Type *EltTy = ATy->getElementType();
104     uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
105     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
106       ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
107                       StartingOffset + i * EltSize);
108     return;
109   }
110   // Interpret void as zero return values.
111   if (Ty->isVoidTy())
112     return;
113   // Base case: we can get an EVT for this LLVM IR type.
114   ValueVTs.push_back(TLI.getValueType(DL, Ty));
115   if (MemVTs)
116     MemVTs->push_back(TLI.getMemValueType(DL, Ty));
117   if (Offsets)
118     Offsets->push_back(StartingOffset);
119 }
120 
121 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
122                            Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
123                            SmallVectorImpl<uint64_t> *Offsets,
124                            uint64_t StartingOffset) {
125   return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
126                          StartingOffset);
127 }
128 
129 void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
130                             SmallVectorImpl<LLT> &ValueTys,
131                             SmallVectorImpl<uint64_t> *Offsets,
132                             uint64_t StartingOffset) {
133   // Given a struct type, recursively traverse the elements.
134   if (StructType *STy = dyn_cast<StructType>(&Ty)) {
135     // If the Offsets aren't needed, don't query the struct layout. This allows
136     // us to support structs with scalable vectors for operations that don't
137     // need offsets.
138     const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
139     for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
140       uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
141       computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
142                        StartingOffset + EltOffset);
143     }
144     return;
145   }
146   // Given an array type, recursively traverse the elements.
147   if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
148     Type *EltTy = ATy->getElementType();
149     uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
150     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
151       computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
152                        StartingOffset + i * EltSize);
153     return;
154   }
155   // Interpret void as zero return values.
156   if (Ty.isVoidTy())
157     return;
158   // Base case: we can get an LLT for this LLVM IR type.
159   ValueTys.push_back(getLLTForType(Ty, DL));
160   if (Offsets != nullptr)
161     Offsets->push_back(StartingOffset * 8);
162 }
163 
164 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
165 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
166   V = V->stripPointerCasts();
167   GlobalValue *GV = dyn_cast<GlobalValue>(V);
168   GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
169 
170   if (Var && Var->getName() == "llvm.eh.catch.all.value") {
171     assert(Var->hasInitializer() &&
172            "The EH catch-all value must have an initializer");
173     Value *Init = Var->getInitializer();
174     GV = dyn_cast<GlobalValue>(Init);
175     if (!GV) V = cast<ConstantPointerNull>(Init);
176   }
177 
178   assert((GV || isa<ConstantPointerNull>(V)) &&
179          "TypeInfo must be a global variable or NULL");
180   return GV;
181 }
182 
183 /// getFCmpCondCode - Return the ISD condition code corresponding to
184 /// the given LLVM IR floating-point condition code.  This includes
185 /// consideration of global floating-point math flags.
186 ///
187 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
188   switch (Pred) {
189   case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
190   case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
191   case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
192   case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
193   case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
194   case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
195   case FCmpInst::FCMP_ONE:   return ISD::SETONE;
196   case FCmpInst::FCMP_ORD:   return ISD::SETO;
197   case FCmpInst::FCMP_UNO:   return ISD::SETUO;
198   case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
199   case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
200   case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
201   case FCmpInst::FCMP_ULT:   return ISD::SETULT;
202   case FCmpInst::FCMP_ULE:   return ISD::SETULE;
203   case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
204   case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
205   default: llvm_unreachable("Invalid FCmp predicate opcode!");
206   }
207 }
208 
209 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
210   switch (CC) {
211     case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
212     case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
213     case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
214     case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
215     case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
216     case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
217     default: return CC;
218   }
219 }
220 
221 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
222   switch (Pred) {
223   case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
224   case ICmpInst::ICMP_NE:  return ISD::SETNE;
225   case ICmpInst::ICMP_SLE: return ISD::SETLE;
226   case ICmpInst::ICMP_ULE: return ISD::SETULE;
227   case ICmpInst::ICMP_SGE: return ISD::SETGE;
228   case ICmpInst::ICMP_UGE: return ISD::SETUGE;
229   case ICmpInst::ICMP_SLT: return ISD::SETLT;
230   case ICmpInst::ICMP_ULT: return ISD::SETULT;
231   case ICmpInst::ICMP_SGT: return ISD::SETGT;
232   case ICmpInst::ICMP_UGT: return ISD::SETUGT;
233   default:
234     llvm_unreachable("Invalid ICmp predicate opcode!");
235   }
236 }
237 
238 ICmpInst::Predicate llvm::getICmpCondCode(ISD::CondCode Pred) {
239   switch (Pred) {
240   case ISD::SETEQ:
241     return ICmpInst::ICMP_EQ;
242   case ISD::SETNE:
243     return ICmpInst::ICMP_NE;
244   case ISD::SETLE:
245     return ICmpInst::ICMP_SLE;
246   case ISD::SETULE:
247     return ICmpInst::ICMP_ULE;
248   case ISD::SETGE:
249     return ICmpInst::ICMP_SGE;
250   case ISD::SETUGE:
251     return ICmpInst::ICMP_UGE;
252   case ISD::SETLT:
253     return ICmpInst::ICMP_SLT;
254   case ISD::SETULT:
255     return ICmpInst::ICMP_ULT;
256   case ISD::SETGT:
257     return ICmpInst::ICMP_SGT;
258   case ISD::SETUGT:
259     return ICmpInst::ICMP_UGT;
260   default:
261     llvm_unreachable("Invalid ISD integer condition code!");
262   }
263 }
264 
265 static bool isNoopBitcast(Type *T1, Type *T2,
266                           const TargetLoweringBase& TLI) {
267   return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
268          (isa<VectorType>(T1) && isa<VectorType>(T2) &&
269           TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
270 }
271 
272 /// Look through operations that will be free to find the earliest source of
273 /// this value.
274 ///
275 /// @param ValLoc If V has aggregate type, we will be interested in a particular
276 /// scalar component. This records its address; the reverse of this list gives a
277 /// sequence of indices appropriate for an extractvalue to locate the important
278 /// value. This value is updated during the function and on exit will indicate
279 /// similar information for the Value returned.
280 ///
281 /// @param DataBits If this function looks through truncate instructions, this
282 /// will record the smallest size attained.
283 static const Value *getNoopInput(const Value *V,
284                                  SmallVectorImpl<unsigned> &ValLoc,
285                                  unsigned &DataBits,
286                                  const TargetLoweringBase &TLI,
287                                  const DataLayout &DL) {
288   while (true) {
289     // Try to look through V1; if V1 is not an instruction, it can't be looked
290     // through.
291     const Instruction *I = dyn_cast<Instruction>(V);
292     if (!I || I->getNumOperands() == 0) return V;
293     const Value *NoopInput = nullptr;
294 
295     Value *Op = I->getOperand(0);
296     if (isa<BitCastInst>(I)) {
297       // Look through truly no-op bitcasts.
298       if (isNoopBitcast(Op->getType(), I->getType(), TLI))
299         NoopInput = Op;
300     } else if (isa<GetElementPtrInst>(I)) {
301       // Look through getelementptr
302       if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
303         NoopInput = Op;
304     } else if (isa<IntToPtrInst>(I)) {
305       // Look through inttoptr.
306       // Make sure this isn't a truncating or extending cast.  We could
307       // support this eventually, but don't bother for now.
308       if (!isa<VectorType>(I->getType()) &&
309           DL.getPointerSizeInBits() ==
310               cast<IntegerType>(Op->getType())->getBitWidth())
311         NoopInput = Op;
312     } else if (isa<PtrToIntInst>(I)) {
313       // Look through ptrtoint.
314       // Make sure this isn't a truncating or extending cast.  We could
315       // support this eventually, but don't bother for now.
316       if (!isa<VectorType>(I->getType()) &&
317           DL.getPointerSizeInBits() ==
318               cast<IntegerType>(I->getType())->getBitWidth())
319         NoopInput = Op;
320     } else if (isa<TruncInst>(I) &&
321                TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
322       DataBits = std::min((uint64_t)DataBits,
323                          I->getType()->getPrimitiveSizeInBits().getFixedSize());
324       NoopInput = Op;
325     } else if (auto *CB = dyn_cast<CallBase>(I)) {
326       const Value *ReturnedOp = CB->getReturnedArgOperand();
327       if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
328         NoopInput = ReturnedOp;
329     } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
330       // Value may come from either the aggregate or the scalar
331       ArrayRef<unsigned> InsertLoc = IVI->getIndices();
332       if (ValLoc.size() >= InsertLoc.size() &&
333           std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
334         // The type being inserted is a nested sub-type of the aggregate; we
335         // have to remove those initial indices to get the location we're
336         // interested in for the operand.
337         ValLoc.resize(ValLoc.size() - InsertLoc.size());
338         NoopInput = IVI->getInsertedValueOperand();
339       } else {
340         // The struct we're inserting into has the value we're interested in, no
341         // change of address.
342         NoopInput = Op;
343       }
344     } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
345       // The part we're interested in will inevitably be some sub-section of the
346       // previous aggregate. Combine the two paths to obtain the true address of
347       // our element.
348       ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
349       ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
350       NoopInput = Op;
351     }
352     // Terminate if we couldn't find anything to look through.
353     if (!NoopInput)
354       return V;
355 
356     V = NoopInput;
357   }
358 }
359 
360 /// Return true if this scalar return value only has bits discarded on its path
361 /// from the "tail call" to the "ret". This includes the obvious noop
362 /// instructions handled by getNoopInput above as well as free truncations (or
363 /// extensions prior to the call).
364 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
365                                  SmallVectorImpl<unsigned> &RetIndices,
366                                  SmallVectorImpl<unsigned> &CallIndices,
367                                  bool AllowDifferingSizes,
368                                  const TargetLoweringBase &TLI,
369                                  const DataLayout &DL) {
370 
371   // Trace the sub-value needed by the return value as far back up the graph as
372   // possible, in the hope that it will intersect with the value produced by the
373   // call. In the simple case with no "returned" attribute, the hope is actually
374   // that we end up back at the tail call instruction itself.
375   unsigned BitsRequired = UINT_MAX;
376   RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
377 
378   // If this slot in the value returned is undef, it doesn't matter what the
379   // call puts there, it'll be fine.
380   if (isa<UndefValue>(RetVal))
381     return true;
382 
383   // Now do a similar search up through the graph to find where the value
384   // actually returned by the "tail call" comes from. In the simple case without
385   // a "returned" attribute, the search will be blocked immediately and the loop
386   // a Noop.
387   unsigned BitsProvided = UINT_MAX;
388   CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
389 
390   // There's no hope if we can't actually trace them to (the same part of!) the
391   // same value.
392   if (CallVal != RetVal || CallIndices != RetIndices)
393     return false;
394 
395   // However, intervening truncates may have made the call non-tail. Make sure
396   // all the bits that are needed by the "ret" have been provided by the "tail
397   // call". FIXME: with sufficiently cunning bit-tracking, we could look through
398   // extensions too.
399   if (BitsProvided < BitsRequired ||
400       (!AllowDifferingSizes && BitsProvided != BitsRequired))
401     return false;
402 
403   return true;
404 }
405 
406 /// For an aggregate type, determine whether a given index is within bounds or
407 /// not.
408 static bool indexReallyValid(Type *T, unsigned Idx) {
409   if (ArrayType *AT = dyn_cast<ArrayType>(T))
410     return Idx < AT->getNumElements();
411 
412   return Idx < cast<StructType>(T)->getNumElements();
413 }
414 
415 /// Move the given iterators to the next leaf type in depth first traversal.
416 ///
417 /// Performs a depth-first traversal of the type as specified by its arguments,
418 /// stopping at the next leaf node (which may be a legitimate scalar type or an
419 /// empty struct or array).
420 ///
421 /// @param SubTypes List of the partial components making up the type from
422 /// outermost to innermost non-empty aggregate. The element currently
423 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
424 ///
425 /// @param Path Set of extractvalue indices leading from the outermost type
426 /// (SubTypes[0]) to the leaf node currently represented.
427 ///
428 /// @returns true if a new type was found, false otherwise. Calling this
429 /// function again on a finished iterator will repeatedly return
430 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
431 /// aggregate or a non-aggregate
432 static bool advanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes,
433                                   SmallVectorImpl<unsigned> &Path) {
434   // First march back up the tree until we can successfully increment one of the
435   // coordinates in Path.
436   while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
437     Path.pop_back();
438     SubTypes.pop_back();
439   }
440 
441   // If we reached the top, then the iterator is done.
442   if (Path.empty())
443     return false;
444 
445   // We know there's *some* valid leaf now, so march back down the tree picking
446   // out the left-most element at each node.
447   ++Path.back();
448   Type *DeeperType =
449       ExtractValueInst::getIndexedType(SubTypes.back(), Path.back());
450   while (DeeperType->isAggregateType()) {
451     if (!indexReallyValid(DeeperType, 0))
452       return true;
453 
454     SubTypes.push_back(DeeperType);
455     Path.push_back(0);
456 
457     DeeperType = ExtractValueInst::getIndexedType(DeeperType, 0);
458   }
459 
460   return true;
461 }
462 
463 /// Find the first non-empty, scalar-like type in Next and setup the iterator
464 /// components.
465 ///
466 /// Assuming Next is an aggregate of some kind, this function will traverse the
467 /// tree from left to right (i.e. depth-first) looking for the first
468 /// non-aggregate type which will play a role in function return.
469 ///
470 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
471 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
472 /// i32 in that type.
473 static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
474                           SmallVectorImpl<unsigned> &Path) {
475   // First initialise the iterator components to the first "leaf" node
476   // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
477   // despite nominally being an aggregate).
478   while (Type *FirstInner = ExtractValueInst::getIndexedType(Next, 0)) {
479     SubTypes.push_back(Next);
480     Path.push_back(0);
481     Next = FirstInner;
482   }
483 
484   // If there's no Path now, Next was originally scalar already (or empty
485   // leaf). We're done.
486   if (Path.empty())
487     return true;
488 
489   // Otherwise, use normal iteration to keep looking through the tree until we
490   // find a non-aggregate type.
491   while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
492              ->isAggregateType()) {
493     if (!advanceToNextLeafType(SubTypes, Path))
494       return false;
495   }
496 
497   return true;
498 }
499 
500 /// Set the iterator data-structures to the next non-empty, non-aggregate
501 /// subtype.
502 static bool nextRealType(SmallVectorImpl<Type *> &SubTypes,
503                          SmallVectorImpl<unsigned> &Path) {
504   do {
505     if (!advanceToNextLeafType(SubTypes, Path))
506       return false;
507 
508     assert(!Path.empty() && "found a leaf but didn't set the path?");
509   } while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
510                ->isAggregateType());
511 
512   return true;
513 }
514 
515 
516 /// Test if the given instruction is in a position to be optimized
517 /// with a tail-call. This roughly means that it's in a block with
518 /// a return and there's nothing that needs to be scheduled
519 /// between it and the return.
520 ///
521 /// This function only tests target-independent requirements.
522 bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) {
523   const BasicBlock *ExitBB = Call.getParent();
524   const Instruction *Term = ExitBB->getTerminator();
525   const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
526 
527   // The block must end in a return statement or unreachable.
528   //
529   // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
530   // an unreachable, for now. The way tailcall optimization is currently
531   // implemented means it will add an epilogue followed by a jump. That is
532   // not profitable. Also, if the callee is a special function (e.g.
533   // longjmp on x86), it can end up causing miscompilation that has not
534   // been fully understood.
535   if (!Ret && ((!TM.Options.GuaranteedTailCallOpt &&
536                 Call.getCallingConv() != CallingConv::Tail &&
537                 Call.getCallingConv() != CallingConv::SwiftTail) ||
538                !isa<UnreachableInst>(Term)))
539     return false;
540 
541   // If I will have a chain, make sure no other instruction that will have a
542   // chain interposes between I and the return.
543   // Check for all calls including speculatable functions.
544   for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
545     if (&*BBI == &Call)
546       break;
547     // Debug info intrinsics do not get in the way of tail call optimization.
548     // Pseudo probe intrinsics do not block tail call optimization either.
549     if (BBI->isDebugOrPseudoInst())
550       continue;
551     // A lifetime end, assume or noalias.decl intrinsic should not stop tail
552     // call optimization.
553     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
554       if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
555           II->getIntrinsicID() == Intrinsic::assume ||
556           II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
557         continue;
558     if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
559         !isSafeToSpeculativelyExecute(&*BBI))
560       return false;
561   }
562 
563   const Function *F = ExitBB->getParent();
564   return returnTypeIsEligibleForTailCall(
565       F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
566 }
567 
568 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
569                                     const ReturnInst *Ret,
570                                     const TargetLoweringBase &TLI,
571                                     bool *AllowDifferingSizes) {
572   // ADS may be null, so don't write to it directly.
573   bool DummyADS;
574   bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
575   ADS = true;
576 
577   AttrBuilder CallerAttrs(F->getContext(), F->getAttributes().getRetAttrs());
578   AttrBuilder CalleeAttrs(F->getContext(),
579                           cast<CallInst>(I)->getAttributes().getRetAttrs());
580 
581   // Following attributes are completely benign as far as calling convention
582   // goes, they shouldn't affect whether the call is a tail call.
583   for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
584                            Attribute::DereferenceableOrNull, Attribute::NoAlias,
585                            Attribute::NonNull, Attribute::NoUndef}) {
586     CallerAttrs.removeAttribute(Attr);
587     CalleeAttrs.removeAttribute(Attr);
588   }
589 
590   if (CallerAttrs.contains(Attribute::ZExt)) {
591     if (!CalleeAttrs.contains(Attribute::ZExt))
592       return false;
593 
594     ADS = false;
595     CallerAttrs.removeAttribute(Attribute::ZExt);
596     CalleeAttrs.removeAttribute(Attribute::ZExt);
597   } else if (CallerAttrs.contains(Attribute::SExt)) {
598     if (!CalleeAttrs.contains(Attribute::SExt))
599       return false;
600 
601     ADS = false;
602     CallerAttrs.removeAttribute(Attribute::SExt);
603     CalleeAttrs.removeAttribute(Attribute::SExt);
604   }
605 
606   // Drop sext and zext return attributes if the result is not used.
607   // This enables tail calls for code like:
608   //
609   // define void @caller() {
610   // entry:
611   //   %unused_result = tail call zeroext i1 @callee()
612   //   br label %retlabel
613   // retlabel:
614   //   ret void
615   // }
616   if (I->use_empty()) {
617     CalleeAttrs.removeAttribute(Attribute::SExt);
618     CalleeAttrs.removeAttribute(Attribute::ZExt);
619   }
620 
621   // If they're still different, there's some facet we don't understand
622   // (currently only "inreg", but in future who knows). It may be OK but the
623   // only safe option is to reject the tail call.
624   return CallerAttrs == CalleeAttrs;
625 }
626 
627 /// Check whether B is a bitcast of a pointer type to another pointer type,
628 /// which is equal to A.
629 static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
630   assert(A && B && "Expected non-null inputs!");
631 
632   auto *BitCastIn = dyn_cast<BitCastInst>(B);
633 
634   if (!BitCastIn)
635     return false;
636 
637   if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
638     return false;
639 
640   return A == BitCastIn->getOperand(0);
641 }
642 
643 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
644                                            const Instruction *I,
645                                            const ReturnInst *Ret,
646                                            const TargetLoweringBase &TLI) {
647   // If the block ends with a void return or unreachable, it doesn't matter
648   // what the call's return type is.
649   if (!Ret || Ret->getNumOperands() == 0) return true;
650 
651   // If the return value is undef, it doesn't matter what the call's
652   // return type is.
653   if (isa<UndefValue>(Ret->getOperand(0))) return true;
654 
655   // Make sure the attributes attached to each return are compatible.
656   bool AllowDifferingSizes;
657   if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
658     return false;
659 
660   const Value *RetVal = Ret->getOperand(0), *CallVal = I;
661   // Intrinsic like llvm.memcpy has no return value, but the expanded
662   // libcall may or may not have return value. On most platforms, it
663   // will be expanded as memcpy in libc, which returns the first
664   // argument. On other platforms like arm-none-eabi, memcpy may be
665   // expanded as library call without return value, like __aeabi_memcpy.
666   const CallInst *Call = cast<CallInst>(I);
667   if (Function *F = Call->getCalledFunction()) {
668     Intrinsic::ID IID = F->getIntrinsicID();
669     if (((IID == Intrinsic::memcpy &&
670           TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
671          (IID == Intrinsic::memmove &&
672           TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
673          (IID == Intrinsic::memset &&
674           TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
675         (RetVal == Call->getArgOperand(0) ||
676          isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0))))
677       return true;
678   }
679 
680   SmallVector<unsigned, 4> RetPath, CallPath;
681   SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
682 
683   bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
684   bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
685 
686   // Nothing's actually returned, it doesn't matter what the callee put there
687   // it's a valid tail call.
688   if (RetEmpty)
689     return true;
690 
691   // Iterate pairwise through each of the value types making up the tail call
692   // and the corresponding return. For each one we want to know whether it's
693   // essentially going directly from the tail call to the ret, via operations
694   // that end up not generating any code.
695   //
696   // We allow a certain amount of covariance here. For example it's permitted
697   // for the tail call to define more bits than the ret actually cares about
698   // (e.g. via a truncate).
699   do {
700     if (CallEmpty) {
701       // We've exhausted the values produced by the tail call instruction, the
702       // rest are essentially undef. The type doesn't really matter, but we need
703       // *something*.
704       Type *SlotType =
705           ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back());
706       CallVal = UndefValue::get(SlotType);
707     }
708 
709     // The manipulations performed when we're looking through an insertvalue or
710     // an extractvalue would happen at the front of the RetPath list, so since
711     // we have to copy it anyway it's more efficient to create a reversed copy.
712     SmallVector<unsigned, 4> TmpRetPath(llvm::reverse(RetPath));
713     SmallVector<unsigned, 4> TmpCallPath(llvm::reverse(CallPath));
714 
715     // Finally, we can check whether the value produced by the tail call at this
716     // index is compatible with the value we return.
717     if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
718                               AllowDifferingSizes, TLI,
719                               F->getParent()->getDataLayout()))
720       return false;
721 
722     CallEmpty  = !nextRealType(CallSubTypes, CallPath);
723   } while(nextRealType(RetSubTypes, RetPath));
724 
725   return true;
726 }
727 
728 static void collectEHScopeMembers(
729     DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
730     const MachineBasicBlock *MBB) {
731   SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
732   while (!Worklist.empty()) {
733     const MachineBasicBlock *Visiting = Worklist.pop_back_val();
734     // Don't follow blocks which start new scopes.
735     if (Visiting->isEHPad() && Visiting != MBB)
736       continue;
737 
738     // Add this MBB to our scope.
739     auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
740 
741     // Don't revisit blocks.
742     if (!P.second) {
743       assert(P.first->second == EHScope && "MBB is part of two scopes!");
744       continue;
745     }
746 
747     // Returns are boundaries where scope transfer can occur, don't follow
748     // successors.
749     if (Visiting->isEHScopeReturnBlock())
750       continue;
751 
752     append_range(Worklist, Visiting->successors());
753   }
754 }
755 
756 DenseMap<const MachineBasicBlock *, int>
757 llvm::getEHScopeMembership(const MachineFunction &MF) {
758   DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
759 
760   // We don't have anything to do if there aren't any EH pads.
761   if (!MF.hasEHScopes())
762     return EHScopeMembership;
763 
764   int EntryBBNumber = MF.front().getNumber();
765   bool IsSEH = isAsynchronousEHPersonality(
766       classifyEHPersonality(MF.getFunction().getPersonalityFn()));
767 
768   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
769   SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
770   SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
771   SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
772   SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
773   for (const MachineBasicBlock &MBB : MF) {
774     if (MBB.isEHScopeEntry()) {
775       EHScopeBlocks.push_back(&MBB);
776     } else if (IsSEH && MBB.isEHPad()) {
777       SEHCatchPads.push_back(&MBB);
778     } else if (MBB.pred_empty()) {
779       UnreachableBlocks.push_back(&MBB);
780     }
781 
782     MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
783 
784     // CatchPads are not scopes for SEH so do not consider CatchRet to
785     // transfer control to another scope.
786     if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
787       continue;
788 
789     // FIXME: SEH CatchPads are not necessarily in the parent function:
790     // they could be inside a finally block.
791     const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
792     const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
793     CatchRetSuccessors.push_back(
794         {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
795   }
796 
797   // We don't have anything to do if there aren't any EH pads.
798   if (EHScopeBlocks.empty())
799     return EHScopeMembership;
800 
801   // Identify all the basic blocks reachable from the function entry.
802   collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
803   // All blocks not part of a scope are in the parent function.
804   for (const MachineBasicBlock *MBB : UnreachableBlocks)
805     collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
806   // Next, identify all the blocks inside the scopes.
807   for (const MachineBasicBlock *MBB : EHScopeBlocks)
808     collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
809   // SEH CatchPads aren't really scopes, handle them separately.
810   for (const MachineBasicBlock *MBB : SEHCatchPads)
811     collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
812   // Finally, identify all the targets of a catchret.
813   for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
814        CatchRetSuccessors)
815     collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
816                           CatchRetPair.first);
817   return EHScopeMembership;
818 }
819