1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines several CodeGen-specific LLVM IR analysis utilities.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/Analysis.h"
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetSubtargetInfo.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Transforms/Utils/GlobalStatus.h"
30
31 using namespace llvm;
32
33 /// Compute the linearized index of a member in a nested aggregate/struct/array
34 /// by recursing and accumulating CurIndex as long as there are indices in the
35 /// index list.
ComputeLinearIndex(Type * Ty,const unsigned * Indices,const unsigned * IndicesEnd,unsigned CurIndex)36 unsigned llvm::ComputeLinearIndex(Type *Ty,
37 const unsigned *Indices,
38 const unsigned *IndicesEnd,
39 unsigned CurIndex) {
40 // Base case: We're done.
41 if (Indices && Indices == IndicesEnd)
42 return CurIndex;
43
44 // Given a struct type, recursively traverse the elements.
45 if (StructType *STy = dyn_cast<StructType>(Ty)) {
46 for (StructType::element_iterator EB = STy->element_begin(),
47 EI = EB,
48 EE = STy->element_end();
49 EI != EE; ++EI) {
50 if (Indices && *Indices == unsigned(EI - EB))
51 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
52 CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
53 }
54 assert(!Indices && "Unexpected out of bound");
55 return CurIndex;
56 }
57 // Given an array type, recursively traverse the elements.
58 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
59 Type *EltTy = ATy->getElementType();
60 unsigned NumElts = ATy->getNumElements();
61 // Compute the Linear offset when jumping one element of the array
62 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
63 if (Indices) {
64 assert(*Indices < NumElts && "Unexpected out of bound");
65 // If the indice is inside the array, compute the index to the requested
66 // elt and recurse inside the element with the end of the indices list
67 CurIndex += EltLinearOffset* *Indices;
68 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
69 }
70 CurIndex += EltLinearOffset*NumElts;
71 return CurIndex;
72 }
73 // We haven't found the type we're looking for, so keep searching.
74 return CurIndex + 1;
75 }
76
77 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
78 /// EVTs that represent all the individual underlying
79 /// non-aggregate types that comprise it.
80 ///
81 /// If Offsets is non-null, it points to a vector to be filled in
82 /// with the in-memory offsets of each of the individual values.
83 ///
ComputeValueVTs(const TargetLowering & TLI,const DataLayout & DL,Type * Ty,SmallVectorImpl<EVT> & ValueVTs,SmallVectorImpl<EVT> * MemVTs,SmallVectorImpl<uint64_t> * Offsets,uint64_t StartingOffset)84 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
85 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
86 SmallVectorImpl<EVT> *MemVTs,
87 SmallVectorImpl<uint64_t> *Offsets,
88 uint64_t StartingOffset) {
89 // Given a struct type, recursively traverse the elements.
90 if (StructType *STy = dyn_cast<StructType>(Ty)) {
91 // If the Offsets aren't needed, don't query the struct layout. This allows
92 // us to support structs with scalable vectors for operations that don't
93 // need offsets.
94 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
95 for (StructType::element_iterator EB = STy->element_begin(),
96 EI = EB,
97 EE = STy->element_end();
98 EI != EE; ++EI) {
99 // Don't compute the element offset if we didn't get a StructLayout above.
100 uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0;
101 ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
102 StartingOffset + EltOffset);
103 }
104 return;
105 }
106 // Given an array type, recursively traverse the elements.
107 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
108 Type *EltTy = ATy->getElementType();
109 uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
110 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
111 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
112 StartingOffset + i * EltSize);
113 return;
114 }
115 // Interpret void as zero return values.
116 if (Ty->isVoidTy())
117 return;
118 // Base case: we can get an EVT for this LLVM IR type.
119 ValueVTs.push_back(TLI.getValueType(DL, Ty));
120 if (MemVTs)
121 MemVTs->push_back(TLI.getMemValueType(DL, Ty));
122 if (Offsets)
123 Offsets->push_back(StartingOffset);
124 }
125
ComputeValueVTs(const TargetLowering & TLI,const DataLayout & DL,Type * Ty,SmallVectorImpl<EVT> & ValueVTs,SmallVectorImpl<uint64_t> * Offsets,uint64_t StartingOffset)126 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
127 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
128 SmallVectorImpl<uint64_t> *Offsets,
129 uint64_t StartingOffset) {
130 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
131 StartingOffset);
132 }
133
computeValueLLTs(const DataLayout & DL,Type & Ty,SmallVectorImpl<LLT> & ValueTys,SmallVectorImpl<uint64_t> * Offsets,uint64_t StartingOffset)134 void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
135 SmallVectorImpl<LLT> &ValueTys,
136 SmallVectorImpl<uint64_t> *Offsets,
137 uint64_t StartingOffset) {
138 // Given a struct type, recursively traverse the elements.
139 if (StructType *STy = dyn_cast<StructType>(&Ty)) {
140 // If the Offsets aren't needed, don't query the struct layout. This allows
141 // us to support structs with scalable vectors for operations that don't
142 // need offsets.
143 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
144 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
145 uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
146 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
147 StartingOffset + EltOffset);
148 }
149 return;
150 }
151 // Given an array type, recursively traverse the elements.
152 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
153 Type *EltTy = ATy->getElementType();
154 uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
155 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
156 computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
157 StartingOffset + i * EltSize);
158 return;
159 }
160 // Interpret void as zero return values.
161 if (Ty.isVoidTy())
162 return;
163 // Base case: we can get an LLT for this LLVM IR type.
164 ValueTys.push_back(getLLTForType(Ty, DL));
165 if (Offsets != nullptr)
166 Offsets->push_back(StartingOffset * 8);
167 }
168
169 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
ExtractTypeInfo(Value * V)170 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
171 V = V->stripPointerCasts();
172 GlobalValue *GV = dyn_cast<GlobalValue>(V);
173 GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
174
175 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
176 assert(Var->hasInitializer() &&
177 "The EH catch-all value must have an initializer");
178 Value *Init = Var->getInitializer();
179 GV = dyn_cast<GlobalValue>(Init);
180 if (!GV) V = cast<ConstantPointerNull>(Init);
181 }
182
183 assert((GV || isa<ConstantPointerNull>(V)) &&
184 "TypeInfo must be a global variable or NULL");
185 return GV;
186 }
187
188 /// getFCmpCondCode - Return the ISD condition code corresponding to
189 /// the given LLVM IR floating-point condition code. This includes
190 /// consideration of global floating-point math flags.
191 ///
getFCmpCondCode(FCmpInst::Predicate Pred)192 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
193 switch (Pred) {
194 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
195 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
196 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
197 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
198 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
199 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
200 case FCmpInst::FCMP_ONE: return ISD::SETONE;
201 case FCmpInst::FCMP_ORD: return ISD::SETO;
202 case FCmpInst::FCMP_UNO: return ISD::SETUO;
203 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
204 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
205 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
206 case FCmpInst::FCMP_ULT: return ISD::SETULT;
207 case FCmpInst::FCMP_ULE: return ISD::SETULE;
208 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
209 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
210 default: llvm_unreachable("Invalid FCmp predicate opcode!");
211 }
212 }
213
getFCmpCodeWithoutNaN(ISD::CondCode CC)214 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
215 switch (CC) {
216 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
217 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
218 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
219 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
220 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
221 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
222 default: return CC;
223 }
224 }
225
226 /// getICmpCondCode - Return the ISD condition code corresponding to
227 /// the given LLVM IR integer condition code.
228 ///
getICmpCondCode(ICmpInst::Predicate Pred)229 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
230 switch (Pred) {
231 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
232 case ICmpInst::ICMP_NE: return ISD::SETNE;
233 case ICmpInst::ICMP_SLE: return ISD::SETLE;
234 case ICmpInst::ICMP_ULE: return ISD::SETULE;
235 case ICmpInst::ICMP_SGE: return ISD::SETGE;
236 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
237 case ICmpInst::ICMP_SLT: return ISD::SETLT;
238 case ICmpInst::ICMP_ULT: return ISD::SETULT;
239 case ICmpInst::ICMP_SGT: return ISD::SETGT;
240 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
241 default:
242 llvm_unreachable("Invalid ICmp predicate opcode!");
243 }
244 }
245
isNoopBitcast(Type * T1,Type * T2,const TargetLoweringBase & TLI)246 static bool isNoopBitcast(Type *T1, Type *T2,
247 const TargetLoweringBase& TLI) {
248 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
249 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
250 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
251 }
252
253 /// Look through operations that will be free to find the earliest source of
254 /// this value.
255 ///
256 /// @param ValLoc If V has aggregate type, we will be interested in a particular
257 /// scalar component. This records its address; the reverse of this list gives a
258 /// sequence of indices appropriate for an extractvalue to locate the important
259 /// value. This value is updated during the function and on exit will indicate
260 /// similar information for the Value returned.
261 ///
262 /// @param DataBits If this function looks through truncate instructions, this
263 /// will record the smallest size attained.
getNoopInput(const Value * V,SmallVectorImpl<unsigned> & ValLoc,unsigned & DataBits,const TargetLoweringBase & TLI,const DataLayout & DL)264 static const Value *getNoopInput(const Value *V,
265 SmallVectorImpl<unsigned> &ValLoc,
266 unsigned &DataBits,
267 const TargetLoweringBase &TLI,
268 const DataLayout &DL) {
269 while (true) {
270 // Try to look through V1; if V1 is not an instruction, it can't be looked
271 // through.
272 const Instruction *I = dyn_cast<Instruction>(V);
273 if (!I || I->getNumOperands() == 0) return V;
274 const Value *NoopInput = nullptr;
275
276 Value *Op = I->getOperand(0);
277 if (isa<BitCastInst>(I)) {
278 // Look through truly no-op bitcasts.
279 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
280 NoopInput = Op;
281 } else if (isa<GetElementPtrInst>(I)) {
282 // Look through getelementptr
283 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
284 NoopInput = Op;
285 } else if (isa<IntToPtrInst>(I)) {
286 // Look through inttoptr.
287 // Make sure this isn't a truncating or extending cast. We could
288 // support this eventually, but don't bother for now.
289 if (!isa<VectorType>(I->getType()) &&
290 DL.getPointerSizeInBits() ==
291 cast<IntegerType>(Op->getType())->getBitWidth())
292 NoopInput = Op;
293 } else if (isa<PtrToIntInst>(I)) {
294 // Look through ptrtoint.
295 // Make sure this isn't a truncating or extending cast. We could
296 // support this eventually, but don't bother for now.
297 if (!isa<VectorType>(I->getType()) &&
298 DL.getPointerSizeInBits() ==
299 cast<IntegerType>(I->getType())->getBitWidth())
300 NoopInput = Op;
301 } else if (isa<TruncInst>(I) &&
302 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
303 DataBits = std::min((uint64_t)DataBits,
304 I->getType()->getPrimitiveSizeInBits().getFixedSize());
305 NoopInput = Op;
306 } else if (auto *CB = dyn_cast<CallBase>(I)) {
307 const Value *ReturnedOp = CB->getReturnedArgOperand();
308 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
309 NoopInput = ReturnedOp;
310 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
311 // Value may come from either the aggregate or the scalar
312 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
313 if (ValLoc.size() >= InsertLoc.size() &&
314 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
315 // The type being inserted is a nested sub-type of the aggregate; we
316 // have to remove those initial indices to get the location we're
317 // interested in for the operand.
318 ValLoc.resize(ValLoc.size() - InsertLoc.size());
319 NoopInput = IVI->getInsertedValueOperand();
320 } else {
321 // The struct we're inserting into has the value we're interested in, no
322 // change of address.
323 NoopInput = Op;
324 }
325 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
326 // The part we're interested in will inevitably be some sub-section of the
327 // previous aggregate. Combine the two paths to obtain the true address of
328 // our element.
329 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
330 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
331 NoopInput = Op;
332 }
333 // Terminate if we couldn't find anything to look through.
334 if (!NoopInput)
335 return V;
336
337 V = NoopInput;
338 }
339 }
340
341 /// Return true if this scalar return value only has bits discarded on its path
342 /// from the "tail call" to the "ret". This includes the obvious noop
343 /// instructions handled by getNoopInput above as well as free truncations (or
344 /// extensions prior to the call).
slotOnlyDiscardsData(const Value * RetVal,const Value * CallVal,SmallVectorImpl<unsigned> & RetIndices,SmallVectorImpl<unsigned> & CallIndices,bool AllowDifferingSizes,const TargetLoweringBase & TLI,const DataLayout & DL)345 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
346 SmallVectorImpl<unsigned> &RetIndices,
347 SmallVectorImpl<unsigned> &CallIndices,
348 bool AllowDifferingSizes,
349 const TargetLoweringBase &TLI,
350 const DataLayout &DL) {
351
352 // Trace the sub-value needed by the return value as far back up the graph as
353 // possible, in the hope that it will intersect with the value produced by the
354 // call. In the simple case with no "returned" attribute, the hope is actually
355 // that we end up back at the tail call instruction itself.
356 unsigned BitsRequired = UINT_MAX;
357 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
358
359 // If this slot in the value returned is undef, it doesn't matter what the
360 // call puts there, it'll be fine.
361 if (isa<UndefValue>(RetVal))
362 return true;
363
364 // Now do a similar search up through the graph to find where the value
365 // actually returned by the "tail call" comes from. In the simple case without
366 // a "returned" attribute, the search will be blocked immediately and the loop
367 // a Noop.
368 unsigned BitsProvided = UINT_MAX;
369 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
370
371 // There's no hope if we can't actually trace them to (the same part of!) the
372 // same value.
373 if (CallVal != RetVal || CallIndices != RetIndices)
374 return false;
375
376 // However, intervening truncates may have made the call non-tail. Make sure
377 // all the bits that are needed by the "ret" have been provided by the "tail
378 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
379 // extensions too.
380 if (BitsProvided < BitsRequired ||
381 (!AllowDifferingSizes && BitsProvided != BitsRequired))
382 return false;
383
384 return true;
385 }
386
387 /// For an aggregate type, determine whether a given index is within bounds or
388 /// not.
indexReallyValid(Type * T,unsigned Idx)389 static bool indexReallyValid(Type *T, unsigned Idx) {
390 if (ArrayType *AT = dyn_cast<ArrayType>(T))
391 return Idx < AT->getNumElements();
392
393 return Idx < cast<StructType>(T)->getNumElements();
394 }
395
396 /// Move the given iterators to the next leaf type in depth first traversal.
397 ///
398 /// Performs a depth-first traversal of the type as specified by its arguments,
399 /// stopping at the next leaf node (which may be a legitimate scalar type or an
400 /// empty struct or array).
401 ///
402 /// @param SubTypes List of the partial components making up the type from
403 /// outermost to innermost non-empty aggregate. The element currently
404 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
405 ///
406 /// @param Path Set of extractvalue indices leading from the outermost type
407 /// (SubTypes[0]) to the leaf node currently represented.
408 ///
409 /// @returns true if a new type was found, false otherwise. Calling this
410 /// function again on a finished iterator will repeatedly return
411 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
412 /// aggregate or a non-aggregate
advanceToNextLeafType(SmallVectorImpl<Type * > & SubTypes,SmallVectorImpl<unsigned> & Path)413 static bool advanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes,
414 SmallVectorImpl<unsigned> &Path) {
415 // First march back up the tree until we can successfully increment one of the
416 // coordinates in Path.
417 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
418 Path.pop_back();
419 SubTypes.pop_back();
420 }
421
422 // If we reached the top, then the iterator is done.
423 if (Path.empty())
424 return false;
425
426 // We know there's *some* valid leaf now, so march back down the tree picking
427 // out the left-most element at each node.
428 ++Path.back();
429 Type *DeeperType =
430 ExtractValueInst::getIndexedType(SubTypes.back(), Path.back());
431 while (DeeperType->isAggregateType()) {
432 if (!indexReallyValid(DeeperType, 0))
433 return true;
434
435 SubTypes.push_back(DeeperType);
436 Path.push_back(0);
437
438 DeeperType = ExtractValueInst::getIndexedType(DeeperType, 0);
439 }
440
441 return true;
442 }
443
444 /// Find the first non-empty, scalar-like type in Next and setup the iterator
445 /// components.
446 ///
447 /// Assuming Next is an aggregate of some kind, this function will traverse the
448 /// tree from left to right (i.e. depth-first) looking for the first
449 /// non-aggregate type which will play a role in function return.
450 ///
451 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
452 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
453 /// i32 in that type.
firstRealType(Type * Next,SmallVectorImpl<Type * > & SubTypes,SmallVectorImpl<unsigned> & Path)454 static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
455 SmallVectorImpl<unsigned> &Path) {
456 // First initialise the iterator components to the first "leaf" node
457 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
458 // despite nominally being an aggregate).
459 while (Type *FirstInner = ExtractValueInst::getIndexedType(Next, 0)) {
460 SubTypes.push_back(Next);
461 Path.push_back(0);
462 Next = FirstInner;
463 }
464
465 // If there's no Path now, Next was originally scalar already (or empty
466 // leaf). We're done.
467 if (Path.empty())
468 return true;
469
470 // Otherwise, use normal iteration to keep looking through the tree until we
471 // find a non-aggregate type.
472 while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
473 ->isAggregateType()) {
474 if (!advanceToNextLeafType(SubTypes, Path))
475 return false;
476 }
477
478 return true;
479 }
480
481 /// Set the iterator data-structures to the next non-empty, non-aggregate
482 /// subtype.
nextRealType(SmallVectorImpl<Type * > & SubTypes,SmallVectorImpl<unsigned> & Path)483 static bool nextRealType(SmallVectorImpl<Type *> &SubTypes,
484 SmallVectorImpl<unsigned> &Path) {
485 do {
486 if (!advanceToNextLeafType(SubTypes, Path))
487 return false;
488
489 assert(!Path.empty() && "found a leaf but didn't set the path?");
490 } while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
491 ->isAggregateType());
492
493 return true;
494 }
495
496
497 /// Test if the given instruction is in a position to be optimized
498 /// with a tail-call. This roughly means that it's in a block with
499 /// a return and there's nothing that needs to be scheduled
500 /// between it and the return.
501 ///
502 /// This function only tests target-independent requirements.
isInTailCallPosition(const CallBase & Call,const TargetMachine & TM)503 bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) {
504 const BasicBlock *ExitBB = Call.getParent();
505 const Instruction *Term = ExitBB->getTerminator();
506 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
507
508 // The block must end in a return statement or unreachable.
509 //
510 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
511 // an unreachable, for now. The way tailcall optimization is currently
512 // implemented means it will add an epilogue followed by a jump. That is
513 // not profitable. Also, if the callee is a special function (e.g.
514 // longjmp on x86), it can end up causing miscompilation that has not
515 // been fully understood.
516 if (!Ret &&
517 ((!TM.Options.GuaranteedTailCallOpt &&
518 Call.getCallingConv() != CallingConv::Tail) || !isa<UnreachableInst>(Term)))
519 return false;
520
521 // If I will have a chain, make sure no other instruction that will have a
522 // chain interposes between I and the return.
523 // Check for all calls including speculatable functions.
524 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
525 if (&*BBI == &Call)
526 break;
527 // Debug info intrinsics do not get in the way of tail call optimization.
528 if (isa<DbgInfoIntrinsic>(BBI))
529 continue;
530 // Pseudo probe intrinsics do not block tail call optimization either.
531 if (isa<PseudoProbeInst>(BBI))
532 continue;
533 // A lifetime end, assume or noalias.decl intrinsic should not stop tail
534 // call optimization.
535 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
536 if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
537 II->getIntrinsicID() == Intrinsic::assume ||
538 II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
539 continue;
540 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
541 !isSafeToSpeculativelyExecute(&*BBI))
542 return false;
543 }
544
545 const Function *F = ExitBB->getParent();
546 return returnTypeIsEligibleForTailCall(
547 F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
548 }
549
attributesPermitTailCall(const Function * F,const Instruction * I,const ReturnInst * Ret,const TargetLoweringBase & TLI,bool * AllowDifferingSizes)550 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
551 const ReturnInst *Ret,
552 const TargetLoweringBase &TLI,
553 bool *AllowDifferingSizes) {
554 // ADS may be null, so don't write to it directly.
555 bool DummyADS;
556 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
557 ADS = true;
558
559 AttrBuilder CallerAttrs(F->getAttributes(), AttributeList::ReturnIndex);
560 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
561 AttributeList::ReturnIndex);
562
563 // Following attributes are completely benign as far as calling convention
564 // goes, they shouldn't affect whether the call is a tail call.
565 CallerAttrs.removeAttribute(Attribute::NoAlias);
566 CalleeAttrs.removeAttribute(Attribute::NoAlias);
567 CallerAttrs.removeAttribute(Attribute::NonNull);
568 CalleeAttrs.removeAttribute(Attribute::NonNull);
569 CallerAttrs.removeAttribute(Attribute::Dereferenceable);
570 CalleeAttrs.removeAttribute(Attribute::Dereferenceable);
571 CallerAttrs.removeAttribute(Attribute::DereferenceableOrNull);
572 CalleeAttrs.removeAttribute(Attribute::DereferenceableOrNull);
573
574 if (CallerAttrs.contains(Attribute::ZExt)) {
575 if (!CalleeAttrs.contains(Attribute::ZExt))
576 return false;
577
578 ADS = false;
579 CallerAttrs.removeAttribute(Attribute::ZExt);
580 CalleeAttrs.removeAttribute(Attribute::ZExt);
581 } else if (CallerAttrs.contains(Attribute::SExt)) {
582 if (!CalleeAttrs.contains(Attribute::SExt))
583 return false;
584
585 ADS = false;
586 CallerAttrs.removeAttribute(Attribute::SExt);
587 CalleeAttrs.removeAttribute(Attribute::SExt);
588 }
589
590 // Drop sext and zext return attributes if the result is not used.
591 // This enables tail calls for code like:
592 //
593 // define void @caller() {
594 // entry:
595 // %unused_result = tail call zeroext i1 @callee()
596 // br label %retlabel
597 // retlabel:
598 // ret void
599 // }
600 if (I->use_empty()) {
601 CalleeAttrs.removeAttribute(Attribute::SExt);
602 CalleeAttrs.removeAttribute(Attribute::ZExt);
603 }
604
605 // If they're still different, there's some facet we don't understand
606 // (currently only "inreg", but in future who knows). It may be OK but the
607 // only safe option is to reject the tail call.
608 return CallerAttrs == CalleeAttrs;
609 }
610
611 /// Check whether B is a bitcast of a pointer type to another pointer type,
612 /// which is equal to A.
isPointerBitcastEqualTo(const Value * A,const Value * B)613 static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
614 assert(A && B && "Expected non-null inputs!");
615
616 auto *BitCastIn = dyn_cast<BitCastInst>(B);
617
618 if (!BitCastIn)
619 return false;
620
621 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
622 return false;
623
624 return A == BitCastIn->getOperand(0);
625 }
626
returnTypeIsEligibleForTailCall(const Function * F,const Instruction * I,const ReturnInst * Ret,const TargetLoweringBase & TLI)627 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
628 const Instruction *I,
629 const ReturnInst *Ret,
630 const TargetLoweringBase &TLI) {
631 // If the block ends with a void return or unreachable, it doesn't matter
632 // what the call's return type is.
633 if (!Ret || Ret->getNumOperands() == 0) return true;
634
635 // If the return value is undef, it doesn't matter what the call's
636 // return type is.
637 if (isa<UndefValue>(Ret->getOperand(0))) return true;
638
639 // Make sure the attributes attached to each return are compatible.
640 bool AllowDifferingSizes;
641 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
642 return false;
643
644 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
645 // Intrinsic like llvm.memcpy has no return value, but the expanded
646 // libcall may or may not have return value. On most platforms, it
647 // will be expanded as memcpy in libc, which returns the first
648 // argument. On other platforms like arm-none-eabi, memcpy may be
649 // expanded as library call without return value, like __aeabi_memcpy.
650 const CallInst *Call = cast<CallInst>(I);
651 if (Function *F = Call->getCalledFunction()) {
652 Intrinsic::ID IID = F->getIntrinsicID();
653 if (((IID == Intrinsic::memcpy &&
654 TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
655 (IID == Intrinsic::memmove &&
656 TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
657 (IID == Intrinsic::memset &&
658 TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
659 (RetVal == Call->getArgOperand(0) ||
660 isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0))))
661 return true;
662 }
663
664 SmallVector<unsigned, 4> RetPath, CallPath;
665 SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
666
667 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
668 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
669
670 // Nothing's actually returned, it doesn't matter what the callee put there
671 // it's a valid tail call.
672 if (RetEmpty)
673 return true;
674
675 // Iterate pairwise through each of the value types making up the tail call
676 // and the corresponding return. For each one we want to know whether it's
677 // essentially going directly from the tail call to the ret, via operations
678 // that end up not generating any code.
679 //
680 // We allow a certain amount of covariance here. For example it's permitted
681 // for the tail call to define more bits than the ret actually cares about
682 // (e.g. via a truncate).
683 do {
684 if (CallEmpty) {
685 // We've exhausted the values produced by the tail call instruction, the
686 // rest are essentially undef. The type doesn't really matter, but we need
687 // *something*.
688 Type *SlotType =
689 ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back());
690 CallVal = UndefValue::get(SlotType);
691 }
692
693 // The manipulations performed when we're looking through an insertvalue or
694 // an extractvalue would happen at the front of the RetPath list, so since
695 // we have to copy it anyway it's more efficient to create a reversed copy.
696 SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
697 SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
698
699 // Finally, we can check whether the value produced by the tail call at this
700 // index is compatible with the value we return.
701 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
702 AllowDifferingSizes, TLI,
703 F->getParent()->getDataLayout()))
704 return false;
705
706 CallEmpty = !nextRealType(CallSubTypes, CallPath);
707 } while(nextRealType(RetSubTypes, RetPath));
708
709 return true;
710 }
711
collectEHScopeMembers(DenseMap<const MachineBasicBlock *,int> & EHScopeMembership,int EHScope,const MachineBasicBlock * MBB)712 static void collectEHScopeMembers(
713 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
714 const MachineBasicBlock *MBB) {
715 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
716 while (!Worklist.empty()) {
717 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
718 // Don't follow blocks which start new scopes.
719 if (Visiting->isEHPad() && Visiting != MBB)
720 continue;
721
722 // Add this MBB to our scope.
723 auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
724
725 // Don't revisit blocks.
726 if (!P.second) {
727 assert(P.first->second == EHScope && "MBB is part of two scopes!");
728 continue;
729 }
730
731 // Returns are boundaries where scope transfer can occur, don't follow
732 // successors.
733 if (Visiting->isEHScopeReturnBlock())
734 continue;
735
736 append_range(Worklist, Visiting->successors());
737 }
738 }
739
740 DenseMap<const MachineBasicBlock *, int>
getEHScopeMembership(const MachineFunction & MF)741 llvm::getEHScopeMembership(const MachineFunction &MF) {
742 DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
743
744 // We don't have anything to do if there aren't any EH pads.
745 if (!MF.hasEHScopes())
746 return EHScopeMembership;
747
748 int EntryBBNumber = MF.front().getNumber();
749 bool IsSEH = isAsynchronousEHPersonality(
750 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
751
752 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
753 SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
754 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
755 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
756 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
757 for (const MachineBasicBlock &MBB : MF) {
758 if (MBB.isEHScopeEntry()) {
759 EHScopeBlocks.push_back(&MBB);
760 } else if (IsSEH && MBB.isEHPad()) {
761 SEHCatchPads.push_back(&MBB);
762 } else if (MBB.pred_empty()) {
763 UnreachableBlocks.push_back(&MBB);
764 }
765
766 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
767
768 // CatchPads are not scopes for SEH so do not consider CatchRet to
769 // transfer control to another scope.
770 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
771 continue;
772
773 // FIXME: SEH CatchPads are not necessarily in the parent function:
774 // they could be inside a finally block.
775 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
776 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
777 CatchRetSuccessors.push_back(
778 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
779 }
780
781 // We don't have anything to do if there aren't any EH pads.
782 if (EHScopeBlocks.empty())
783 return EHScopeMembership;
784
785 // Identify all the basic blocks reachable from the function entry.
786 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
787 // All blocks not part of a scope are in the parent function.
788 for (const MachineBasicBlock *MBB : UnreachableBlocks)
789 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
790 // Next, identify all the blocks inside the scopes.
791 for (const MachineBasicBlock *MBB : EHScopeBlocks)
792 collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
793 // SEH CatchPads aren't really scopes, handle them separately.
794 for (const MachineBasicBlock *MBB : SEHCatchPads)
795 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
796 // Finally, identify all the targets of a catchret.
797 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
798 CatchRetSuccessors)
799 collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
800 CatchRetPair.first);
801 return EHScopeMembership;
802 }
803