1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines several CodeGen-specific LLVM IR analysis utilities.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/CodeGen/Analysis.h"
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetSubtargetInfo.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Transforms/Utils/GlobalStatus.h"
30 
31 using namespace llvm;
32 
33 /// Compute the linearized index of a member in a nested aggregate/struct/array
34 /// by recursing and accumulating CurIndex as long as there are indices in the
35 /// index list.
36 unsigned llvm::ComputeLinearIndex(Type *Ty,
37                                   const unsigned *Indices,
38                                   const unsigned *IndicesEnd,
39                                   unsigned CurIndex) {
40   // Base case: We're done.
41   if (Indices && Indices == IndicesEnd)
42     return CurIndex;
43 
44   // Given a struct type, recursively traverse the elements.
45   if (StructType *STy = dyn_cast<StructType>(Ty)) {
46     for (auto I : llvm::enumerate(STy->elements())) {
47       Type *ET = I.value();
48       if (Indices && *Indices == I.index())
49         return ComputeLinearIndex(ET, Indices + 1, IndicesEnd, CurIndex);
50       CurIndex = ComputeLinearIndex(ET, nullptr, nullptr, CurIndex);
51     }
52     assert(!Indices && "Unexpected out of bound");
53     return CurIndex;
54   }
55   // Given an array type, recursively traverse the elements.
56   else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
57     Type *EltTy = ATy->getElementType();
58     unsigned NumElts = ATy->getNumElements();
59     // Compute the Linear offset when jumping one element of the array
60     unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
61     if (Indices) {
62       assert(*Indices < NumElts && "Unexpected out of bound");
63       // If the indice is inside the array, compute the index to the requested
64       // elt and recurse inside the element with the end of the indices list
65       CurIndex += EltLinearOffset* *Indices;
66       return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
67     }
68     CurIndex += EltLinearOffset*NumElts;
69     return CurIndex;
70   }
71   // We haven't found the type we're looking for, so keep searching.
72   return CurIndex + 1;
73 }
74 
75 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
76 /// EVTs that represent all the individual underlying
77 /// non-aggregate types that comprise it.
78 ///
79 /// If Offsets is non-null, it points to a vector to be filled in
80 /// with the in-memory offsets of each of the individual values.
81 ///
82 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
83                            Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
84                            SmallVectorImpl<EVT> *MemVTs,
85                            SmallVectorImpl<uint64_t> *Offsets,
86                            uint64_t StartingOffset) {
87   // Given a struct type, recursively traverse the elements.
88   if (StructType *STy = dyn_cast<StructType>(Ty)) {
89     // If the Offsets aren't needed, don't query the struct layout. This allows
90     // us to support structs with scalable vectors for operations that don't
91     // need offsets.
92     const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
93     for (StructType::element_iterator EB = STy->element_begin(),
94                                       EI = EB,
95                                       EE = STy->element_end();
96          EI != EE; ++EI) {
97       // Don't compute the element offset if we didn't get a StructLayout above.
98       uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0;
99       ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
100                       StartingOffset + EltOffset);
101     }
102     return;
103   }
104   // Given an array type, recursively traverse the elements.
105   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
106     Type *EltTy = ATy->getElementType();
107     uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
108     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
109       ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
110                       StartingOffset + i * EltSize);
111     return;
112   }
113   // Interpret void as zero return values.
114   if (Ty->isVoidTy())
115     return;
116   // Base case: we can get an EVT for this LLVM IR type.
117   ValueVTs.push_back(TLI.getValueType(DL, Ty));
118   if (MemVTs)
119     MemVTs->push_back(TLI.getMemValueType(DL, Ty));
120   if (Offsets)
121     Offsets->push_back(StartingOffset);
122 }
123 
124 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
125                            Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
126                            SmallVectorImpl<uint64_t> *Offsets,
127                            uint64_t StartingOffset) {
128   return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
129                          StartingOffset);
130 }
131 
132 void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
133                             SmallVectorImpl<LLT> &ValueTys,
134                             SmallVectorImpl<uint64_t> *Offsets,
135                             uint64_t StartingOffset) {
136   // Given a struct type, recursively traverse the elements.
137   if (StructType *STy = dyn_cast<StructType>(&Ty)) {
138     // If the Offsets aren't needed, don't query the struct layout. This allows
139     // us to support structs with scalable vectors for operations that don't
140     // need offsets.
141     const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
142     for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
143       uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
144       computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
145                        StartingOffset + EltOffset);
146     }
147     return;
148   }
149   // Given an array type, recursively traverse the elements.
150   if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
151     Type *EltTy = ATy->getElementType();
152     uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
153     for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
154       computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
155                        StartingOffset + i * EltSize);
156     return;
157   }
158   // Interpret void as zero return values.
159   if (Ty.isVoidTy())
160     return;
161   // Base case: we can get an LLT for this LLVM IR type.
162   ValueTys.push_back(getLLTForType(Ty, DL));
163   if (Offsets != nullptr)
164     Offsets->push_back(StartingOffset * 8);
165 }
166 
167 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
168 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
169   V = V->stripPointerCasts();
170   GlobalValue *GV = dyn_cast<GlobalValue>(V);
171   GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
172 
173   if (Var && Var->getName() == "llvm.eh.catch.all.value") {
174     assert(Var->hasInitializer() &&
175            "The EH catch-all value must have an initializer");
176     Value *Init = Var->getInitializer();
177     GV = dyn_cast<GlobalValue>(Init);
178     if (!GV) V = cast<ConstantPointerNull>(Init);
179   }
180 
181   assert((GV || isa<ConstantPointerNull>(V)) &&
182          "TypeInfo must be a global variable or NULL");
183   return GV;
184 }
185 
186 /// getFCmpCondCode - Return the ISD condition code corresponding to
187 /// the given LLVM IR floating-point condition code.  This includes
188 /// consideration of global floating-point math flags.
189 ///
190 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
191   switch (Pred) {
192   case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
193   case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
194   case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
195   case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
196   case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
197   case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
198   case FCmpInst::FCMP_ONE:   return ISD::SETONE;
199   case FCmpInst::FCMP_ORD:   return ISD::SETO;
200   case FCmpInst::FCMP_UNO:   return ISD::SETUO;
201   case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
202   case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
203   case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
204   case FCmpInst::FCMP_ULT:   return ISD::SETULT;
205   case FCmpInst::FCMP_ULE:   return ISD::SETULE;
206   case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
207   case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
208   default: llvm_unreachable("Invalid FCmp predicate opcode!");
209   }
210 }
211 
212 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
213   switch (CC) {
214     case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
215     case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
216     case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
217     case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
218     case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
219     case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
220     default: return CC;
221   }
222 }
223 
224 /// getICmpCondCode - Return the ISD condition code corresponding to
225 /// the given LLVM IR integer condition code.
226 ///
227 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
228   switch (Pred) {
229   case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
230   case ICmpInst::ICMP_NE:  return ISD::SETNE;
231   case ICmpInst::ICMP_SLE: return ISD::SETLE;
232   case ICmpInst::ICMP_ULE: return ISD::SETULE;
233   case ICmpInst::ICMP_SGE: return ISD::SETGE;
234   case ICmpInst::ICMP_UGE: return ISD::SETUGE;
235   case ICmpInst::ICMP_SLT: return ISD::SETLT;
236   case ICmpInst::ICMP_ULT: return ISD::SETULT;
237   case ICmpInst::ICMP_SGT: return ISD::SETGT;
238   case ICmpInst::ICMP_UGT: return ISD::SETUGT;
239   default:
240     llvm_unreachable("Invalid ICmp predicate opcode!");
241   }
242 }
243 
244 static bool isNoopBitcast(Type *T1, Type *T2,
245                           const TargetLoweringBase& TLI) {
246   return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
247          (isa<VectorType>(T1) && isa<VectorType>(T2) &&
248           TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
249 }
250 
251 /// Look through operations that will be free to find the earliest source of
252 /// this value.
253 ///
254 /// @param ValLoc If V has aggregate type, we will be interested in a particular
255 /// scalar component. This records its address; the reverse of this list gives a
256 /// sequence of indices appropriate for an extractvalue to locate the important
257 /// value. This value is updated during the function and on exit will indicate
258 /// similar information for the Value returned.
259 ///
260 /// @param DataBits If this function looks through truncate instructions, this
261 /// will record the smallest size attained.
262 static const Value *getNoopInput(const Value *V,
263                                  SmallVectorImpl<unsigned> &ValLoc,
264                                  unsigned &DataBits,
265                                  const TargetLoweringBase &TLI,
266                                  const DataLayout &DL) {
267   while (true) {
268     // Try to look through V1; if V1 is not an instruction, it can't be looked
269     // through.
270     const Instruction *I = dyn_cast<Instruction>(V);
271     if (!I || I->getNumOperands() == 0) return V;
272     const Value *NoopInput = nullptr;
273 
274     Value *Op = I->getOperand(0);
275     if (isa<BitCastInst>(I)) {
276       // Look through truly no-op bitcasts.
277       if (isNoopBitcast(Op->getType(), I->getType(), TLI))
278         NoopInput = Op;
279     } else if (isa<GetElementPtrInst>(I)) {
280       // Look through getelementptr
281       if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
282         NoopInput = Op;
283     } else if (isa<IntToPtrInst>(I)) {
284       // Look through inttoptr.
285       // Make sure this isn't a truncating or extending cast.  We could
286       // support this eventually, but don't bother for now.
287       if (!isa<VectorType>(I->getType()) &&
288           DL.getPointerSizeInBits() ==
289               cast<IntegerType>(Op->getType())->getBitWidth())
290         NoopInput = Op;
291     } else if (isa<PtrToIntInst>(I)) {
292       // Look through ptrtoint.
293       // Make sure this isn't a truncating or extending cast.  We could
294       // support this eventually, but don't bother for now.
295       if (!isa<VectorType>(I->getType()) &&
296           DL.getPointerSizeInBits() ==
297               cast<IntegerType>(I->getType())->getBitWidth())
298         NoopInput = Op;
299     } else if (isa<TruncInst>(I) &&
300                TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
301       DataBits = std::min((uint64_t)DataBits,
302                          I->getType()->getPrimitiveSizeInBits().getFixedSize());
303       NoopInput = Op;
304     } else if (auto *CB = dyn_cast<CallBase>(I)) {
305       const Value *ReturnedOp = CB->getReturnedArgOperand();
306       if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
307         NoopInput = ReturnedOp;
308     } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
309       // Value may come from either the aggregate or the scalar
310       ArrayRef<unsigned> InsertLoc = IVI->getIndices();
311       if (ValLoc.size() >= InsertLoc.size() &&
312           std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
313         // The type being inserted is a nested sub-type of the aggregate; we
314         // have to remove those initial indices to get the location we're
315         // interested in for the operand.
316         ValLoc.resize(ValLoc.size() - InsertLoc.size());
317         NoopInput = IVI->getInsertedValueOperand();
318       } else {
319         // The struct we're inserting into has the value we're interested in, no
320         // change of address.
321         NoopInput = Op;
322       }
323     } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
324       // The part we're interested in will inevitably be some sub-section of the
325       // previous aggregate. Combine the two paths to obtain the true address of
326       // our element.
327       ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
328       ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
329       NoopInput = Op;
330     }
331     // Terminate if we couldn't find anything to look through.
332     if (!NoopInput)
333       return V;
334 
335     V = NoopInput;
336   }
337 }
338 
339 /// Return true if this scalar return value only has bits discarded on its path
340 /// from the "tail call" to the "ret". This includes the obvious noop
341 /// instructions handled by getNoopInput above as well as free truncations (or
342 /// extensions prior to the call).
343 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
344                                  SmallVectorImpl<unsigned> &RetIndices,
345                                  SmallVectorImpl<unsigned> &CallIndices,
346                                  bool AllowDifferingSizes,
347                                  const TargetLoweringBase &TLI,
348                                  const DataLayout &DL) {
349 
350   // Trace the sub-value needed by the return value as far back up the graph as
351   // possible, in the hope that it will intersect with the value produced by the
352   // call. In the simple case with no "returned" attribute, the hope is actually
353   // that we end up back at the tail call instruction itself.
354   unsigned BitsRequired = UINT_MAX;
355   RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
356 
357   // If this slot in the value returned is undef, it doesn't matter what the
358   // call puts there, it'll be fine.
359   if (isa<UndefValue>(RetVal))
360     return true;
361 
362   // Now do a similar search up through the graph to find where the value
363   // actually returned by the "tail call" comes from. In the simple case without
364   // a "returned" attribute, the search will be blocked immediately and the loop
365   // a Noop.
366   unsigned BitsProvided = UINT_MAX;
367   CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
368 
369   // There's no hope if we can't actually trace them to (the same part of!) the
370   // same value.
371   if (CallVal != RetVal || CallIndices != RetIndices)
372     return false;
373 
374   // However, intervening truncates may have made the call non-tail. Make sure
375   // all the bits that are needed by the "ret" have been provided by the "tail
376   // call". FIXME: with sufficiently cunning bit-tracking, we could look through
377   // extensions too.
378   if (BitsProvided < BitsRequired ||
379       (!AllowDifferingSizes && BitsProvided != BitsRequired))
380     return false;
381 
382   return true;
383 }
384 
385 /// For an aggregate type, determine whether a given index is within bounds or
386 /// not.
387 static bool indexReallyValid(Type *T, unsigned Idx) {
388   if (ArrayType *AT = dyn_cast<ArrayType>(T))
389     return Idx < AT->getNumElements();
390 
391   return Idx < cast<StructType>(T)->getNumElements();
392 }
393 
394 /// Move the given iterators to the next leaf type in depth first traversal.
395 ///
396 /// Performs a depth-first traversal of the type as specified by its arguments,
397 /// stopping at the next leaf node (which may be a legitimate scalar type or an
398 /// empty struct or array).
399 ///
400 /// @param SubTypes List of the partial components making up the type from
401 /// outermost to innermost non-empty aggregate. The element currently
402 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
403 ///
404 /// @param Path Set of extractvalue indices leading from the outermost type
405 /// (SubTypes[0]) to the leaf node currently represented.
406 ///
407 /// @returns true if a new type was found, false otherwise. Calling this
408 /// function again on a finished iterator will repeatedly return
409 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
410 /// aggregate or a non-aggregate
411 static bool advanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes,
412                                   SmallVectorImpl<unsigned> &Path) {
413   // First march back up the tree until we can successfully increment one of the
414   // coordinates in Path.
415   while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
416     Path.pop_back();
417     SubTypes.pop_back();
418   }
419 
420   // If we reached the top, then the iterator is done.
421   if (Path.empty())
422     return false;
423 
424   // We know there's *some* valid leaf now, so march back down the tree picking
425   // out the left-most element at each node.
426   ++Path.back();
427   Type *DeeperType =
428       ExtractValueInst::getIndexedType(SubTypes.back(), Path.back());
429   while (DeeperType->isAggregateType()) {
430     if (!indexReallyValid(DeeperType, 0))
431       return true;
432 
433     SubTypes.push_back(DeeperType);
434     Path.push_back(0);
435 
436     DeeperType = ExtractValueInst::getIndexedType(DeeperType, 0);
437   }
438 
439   return true;
440 }
441 
442 /// Find the first non-empty, scalar-like type in Next and setup the iterator
443 /// components.
444 ///
445 /// Assuming Next is an aggregate of some kind, this function will traverse the
446 /// tree from left to right (i.e. depth-first) looking for the first
447 /// non-aggregate type which will play a role in function return.
448 ///
449 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
450 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
451 /// i32 in that type.
452 static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
453                           SmallVectorImpl<unsigned> &Path) {
454   // First initialise the iterator components to the first "leaf" node
455   // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
456   // despite nominally being an aggregate).
457   while (Type *FirstInner = ExtractValueInst::getIndexedType(Next, 0)) {
458     SubTypes.push_back(Next);
459     Path.push_back(0);
460     Next = FirstInner;
461   }
462 
463   // If there's no Path now, Next was originally scalar already (or empty
464   // leaf). We're done.
465   if (Path.empty())
466     return true;
467 
468   // Otherwise, use normal iteration to keep looking through the tree until we
469   // find a non-aggregate type.
470   while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
471              ->isAggregateType()) {
472     if (!advanceToNextLeafType(SubTypes, Path))
473       return false;
474   }
475 
476   return true;
477 }
478 
479 /// Set the iterator data-structures to the next non-empty, non-aggregate
480 /// subtype.
481 static bool nextRealType(SmallVectorImpl<Type *> &SubTypes,
482                          SmallVectorImpl<unsigned> &Path) {
483   do {
484     if (!advanceToNextLeafType(SubTypes, Path))
485       return false;
486 
487     assert(!Path.empty() && "found a leaf but didn't set the path?");
488   } while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
489                ->isAggregateType());
490 
491   return true;
492 }
493 
494 
495 /// Test if the given instruction is in a position to be optimized
496 /// with a tail-call. This roughly means that it's in a block with
497 /// a return and there's nothing that needs to be scheduled
498 /// between it and the return.
499 ///
500 /// This function only tests target-independent requirements.
501 bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) {
502   const BasicBlock *ExitBB = Call.getParent();
503   const Instruction *Term = ExitBB->getTerminator();
504   const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
505 
506   // The block must end in a return statement or unreachable.
507   //
508   // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
509   // an unreachable, for now. The way tailcall optimization is currently
510   // implemented means it will add an epilogue followed by a jump. That is
511   // not profitable. Also, if the callee is a special function (e.g.
512   // longjmp on x86), it can end up causing miscompilation that has not
513   // been fully understood.
514   if (!Ret && ((!TM.Options.GuaranteedTailCallOpt &&
515                 Call.getCallingConv() != CallingConv::Tail &&
516                 Call.getCallingConv() != CallingConv::SwiftTail) ||
517                !isa<UnreachableInst>(Term)))
518     return false;
519 
520   // If I will have a chain, make sure no other instruction that will have a
521   // chain interposes between I and the return.
522   // Check for all calls including speculatable functions.
523   for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
524     if (&*BBI == &Call)
525       break;
526     // Debug info intrinsics do not get in the way of tail call optimization.
527     if (isa<DbgInfoIntrinsic>(BBI))
528       continue;
529     // Pseudo probe intrinsics do not block tail call optimization either.
530     if (isa<PseudoProbeInst>(BBI))
531       continue;
532     // A lifetime end, assume or noalias.decl intrinsic should not stop tail
533     // call optimization.
534     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
535       if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
536           II->getIntrinsicID() == Intrinsic::assume ||
537           II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
538         continue;
539     if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
540         !isSafeToSpeculativelyExecute(&*BBI))
541       return false;
542   }
543 
544   const Function *F = ExitBB->getParent();
545   return returnTypeIsEligibleForTailCall(
546       F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
547 }
548 
549 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
550                                     const ReturnInst *Ret,
551                                     const TargetLoweringBase &TLI,
552                                     bool *AllowDifferingSizes) {
553   // ADS may be null, so don't write to it directly.
554   bool DummyADS;
555   bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
556   ADS = true;
557 
558   AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex);
559   AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
560                           AttributeList::ReturnIndex);
561 
562   // Following attributes are completely benign as far as calling convention
563   // goes, they shouldn't affect whether the call is a tail call.
564   for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
565                            Attribute::DereferenceableOrNull, Attribute::NoAlias,
566                            Attribute::NonNull}) {
567     CallerAttrs.removeAttribute(Attr);
568     CalleeAttrs.removeAttribute(Attr);
569   }
570 
571   if (CallerAttrs.contains(Attribute::ZExt)) {
572     if (!CalleeAttrs.contains(Attribute::ZExt))
573       return false;
574 
575     ADS = false;
576     CallerAttrs.removeAttribute(Attribute::ZExt);
577     CalleeAttrs.removeAttribute(Attribute::ZExt);
578   } else if (CallerAttrs.contains(Attribute::SExt)) {
579     if (!CalleeAttrs.contains(Attribute::SExt))
580       return false;
581 
582     ADS = false;
583     CallerAttrs.removeAttribute(Attribute::SExt);
584     CalleeAttrs.removeAttribute(Attribute::SExt);
585   }
586 
587   // Drop sext and zext return attributes if the result is not used.
588   // This enables tail calls for code like:
589   //
590   // define void @caller() {
591   // entry:
592   //   %unused_result = tail call zeroext i1 @callee()
593   //   br label %retlabel
594   // retlabel:
595   //   ret void
596   // }
597   if (I->use_empty()) {
598     CalleeAttrs.removeAttribute(Attribute::SExt);
599     CalleeAttrs.removeAttribute(Attribute::ZExt);
600   }
601 
602   // If they're still different, there's some facet we don't understand
603   // (currently only "inreg", but in future who knows). It may be OK but the
604   // only safe option is to reject the tail call.
605   return CallerAttrs == CalleeAttrs;
606 }
607 
608 /// Check whether B is a bitcast of a pointer type to another pointer type,
609 /// which is equal to A.
610 static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
611   assert(A && B && "Expected non-null inputs!");
612 
613   auto *BitCastIn = dyn_cast<BitCastInst>(B);
614 
615   if (!BitCastIn)
616     return false;
617 
618   if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
619     return false;
620 
621   return A == BitCastIn->getOperand(0);
622 }
623 
624 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
625                                            const Instruction *I,
626                                            const ReturnInst *Ret,
627                                            const TargetLoweringBase &TLI) {
628   // If the block ends with a void return or unreachable, it doesn't matter
629   // what the call's return type is.
630   if (!Ret || Ret->getNumOperands() == 0) return true;
631 
632   // If the return value is undef, it doesn't matter what the call's
633   // return type is.
634   if (isa<UndefValue>(Ret->getOperand(0))) return true;
635 
636   // Make sure the attributes attached to each return are compatible.
637   bool AllowDifferingSizes;
638   if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
639     return false;
640 
641   const Value *RetVal = Ret->getOperand(0), *CallVal = I;
642   // Intrinsic like llvm.memcpy has no return value, but the expanded
643   // libcall may or may not have return value. On most platforms, it
644   // will be expanded as memcpy in libc, which returns the first
645   // argument. On other platforms like arm-none-eabi, memcpy may be
646   // expanded as library call without return value, like __aeabi_memcpy.
647   const CallInst *Call = cast<CallInst>(I);
648   if (Function *F = Call->getCalledFunction()) {
649     Intrinsic::ID IID = F->getIntrinsicID();
650     if (((IID == Intrinsic::memcpy &&
651           TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
652          (IID == Intrinsic::memmove &&
653           TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
654          (IID == Intrinsic::memset &&
655           TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
656         (RetVal == Call->getArgOperand(0) ||
657          isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0))))
658       return true;
659   }
660 
661   SmallVector<unsigned, 4> RetPath, CallPath;
662   SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
663 
664   bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
665   bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
666 
667   // Nothing's actually returned, it doesn't matter what the callee put there
668   // it's a valid tail call.
669   if (RetEmpty)
670     return true;
671 
672   // Iterate pairwise through each of the value types making up the tail call
673   // and the corresponding return. For each one we want to know whether it's
674   // essentially going directly from the tail call to the ret, via operations
675   // that end up not generating any code.
676   //
677   // We allow a certain amount of covariance here. For example it's permitted
678   // for the tail call to define more bits than the ret actually cares about
679   // (e.g. via a truncate).
680   do {
681     if (CallEmpty) {
682       // We've exhausted the values produced by the tail call instruction, the
683       // rest are essentially undef. The type doesn't really matter, but we need
684       // *something*.
685       Type *SlotType =
686           ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back());
687       CallVal = UndefValue::get(SlotType);
688     }
689 
690     // The manipulations performed when we're looking through an insertvalue or
691     // an extractvalue would happen at the front of the RetPath list, so since
692     // we have to copy it anyway it's more efficient to create a reversed copy.
693     SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
694     SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
695 
696     // Finally, we can check whether the value produced by the tail call at this
697     // index is compatible with the value we return.
698     if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
699                               AllowDifferingSizes, TLI,
700                               F->getParent()->getDataLayout()))
701       return false;
702 
703     CallEmpty  = !nextRealType(CallSubTypes, CallPath);
704   } while(nextRealType(RetSubTypes, RetPath));
705 
706   return true;
707 }
708 
709 static void collectEHScopeMembers(
710     DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
711     const MachineBasicBlock *MBB) {
712   SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
713   while (!Worklist.empty()) {
714     const MachineBasicBlock *Visiting = Worklist.pop_back_val();
715     // Don't follow blocks which start new scopes.
716     if (Visiting->isEHPad() && Visiting != MBB)
717       continue;
718 
719     // Add this MBB to our scope.
720     auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
721 
722     // Don't revisit blocks.
723     if (!P.second) {
724       assert(P.first->second == EHScope && "MBB is part of two scopes!");
725       continue;
726     }
727 
728     // Returns are boundaries where scope transfer can occur, don't follow
729     // successors.
730     if (Visiting->isEHScopeReturnBlock())
731       continue;
732 
733     append_range(Worklist, Visiting->successors());
734   }
735 }
736 
737 DenseMap<const MachineBasicBlock *, int>
738 llvm::getEHScopeMembership(const MachineFunction &MF) {
739   DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
740 
741   // We don't have anything to do if there aren't any EH pads.
742   if (!MF.hasEHScopes())
743     return EHScopeMembership;
744 
745   int EntryBBNumber = MF.front().getNumber();
746   bool IsSEH = isAsynchronousEHPersonality(
747       classifyEHPersonality(MF.getFunction().getPersonalityFn()));
748 
749   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
750   SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
751   SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
752   SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
753   SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
754   for (const MachineBasicBlock &MBB : MF) {
755     if (MBB.isEHScopeEntry()) {
756       EHScopeBlocks.push_back(&MBB);
757     } else if (IsSEH && MBB.isEHPad()) {
758       SEHCatchPads.push_back(&MBB);
759     } else if (MBB.pred_empty()) {
760       UnreachableBlocks.push_back(&MBB);
761     }
762 
763     MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
764 
765     // CatchPads are not scopes for SEH so do not consider CatchRet to
766     // transfer control to another scope.
767     if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
768       continue;
769 
770     // FIXME: SEH CatchPads are not necessarily in the parent function:
771     // they could be inside a finally block.
772     const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
773     const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
774     CatchRetSuccessors.push_back(
775         {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
776   }
777 
778   // We don't have anything to do if there aren't any EH pads.
779   if (EHScopeBlocks.empty())
780     return EHScopeMembership;
781 
782   // Identify all the basic blocks reachable from the function entry.
783   collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
784   // All blocks not part of a scope are in the parent function.
785   for (const MachineBasicBlock *MBB : UnreachableBlocks)
786     collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
787   // Next, identify all the blocks inside the scopes.
788   for (const MachineBasicBlock *MBB : EHScopeBlocks)
789     collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
790   // SEH CatchPads aren't really scopes, handle them separately.
791   for (const MachineBasicBlock *MBB : SEHCatchPads)
792     collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
793   // Finally, identify all the targets of a catchret.
794   for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
795        CatchRetSuccessors)
796     collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
797                           CatchRetPair.first);
798   return EHScopeMembership;
799 }
800