1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file defines ExprEngine's support for calls and returns.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PrettyStackTraceLocationContext.h"
14 #include "clang/AST/CXXInheritance.h"
15 #include "clang/AST/Decl.h"
16 #include "clang/AST/DeclCXX.h"
17 #include "clang/Analysis/Analyses/LiveVariables.h"
18 #include "clang/Analysis/ConstructionContext.h"
19 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
20 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
21 #include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
22 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Support/Casting.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/SaveAndRestore.h"
28 
29 using namespace clang;
30 using namespace ento;
31 
32 #define DEBUG_TYPE "ExprEngine"
33 
34 STATISTIC(NumOfDynamicDispatchPathSplits,
35   "The # of times we split the path due to imprecise dynamic dispatch info");
36 
37 STATISTIC(NumInlinedCalls,
38   "The # of times we inlined a call");
39 
40 STATISTIC(NumReachedInlineCountMax,
41   "The # of times we reached inline count maximum");
42 
43 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
44                                   ExplodedNode *Pred) {
45   // Get the entry block in the CFG of the callee.
46   const StackFrameContext *calleeCtx = CE.getCalleeContext();
47   PrettyStackTraceLocationContext CrashInfo(calleeCtx);
48   const CFGBlock *Entry = CE.getEntry();
49 
50   // Validate the CFG.
51   assert(Entry->empty());
52   assert(Entry->succ_size() == 1);
53 
54   // Get the solitary successor.
55   const CFGBlock *Succ = *(Entry->succ_begin());
56 
57   // Construct an edge representing the starting location in the callee.
58   BlockEdge Loc(Entry, Succ, calleeCtx);
59 
60   ProgramStateRef state = Pred->getState();
61 
62   // Construct a new node, notify checkers that analysis of the function has
63   // begun, and add the resultant nodes to the worklist.
64   bool isNew;
65   ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
66   Node->addPredecessor(Pred, G);
67   if (isNew) {
68     ExplodedNodeSet DstBegin;
69     processBeginOfFunction(BC, Node, DstBegin, Loc);
70     Engine.enqueue(DstBegin);
71   }
72 }
73 
74 // Find the last statement on the path to the exploded node and the
75 // corresponding Block.
76 static std::pair<const Stmt*,
77                  const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
78   const Stmt *S = nullptr;
79   const CFGBlock *Blk = nullptr;
80   const StackFrameContext *SF = Node->getStackFrame();
81 
82   // Back up through the ExplodedGraph until we reach a statement node in this
83   // stack frame.
84   while (Node) {
85     const ProgramPoint &PP = Node->getLocation();
86 
87     if (PP.getStackFrame() == SF) {
88       if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
89         S = SP->getStmt();
90         break;
91       } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
92         S = CEE->getCalleeContext()->getCallSite();
93         if (S)
94           break;
95 
96         // If there is no statement, this is an implicitly-generated call.
97         // We'll walk backwards over it and then continue the loop to find
98         // an actual statement.
99         Optional<CallEnter> CE;
100         do {
101           Node = Node->getFirstPred();
102           CE = Node->getLocationAs<CallEnter>();
103         } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
104 
105         // Continue searching the graph.
106       } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
107         Blk = BE->getSrc();
108       }
109     } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
110       // If we reached the CallEnter for this function, it has no statements.
111       if (CE->getCalleeContext() == SF)
112         break;
113     }
114 
115     if (Node->pred_empty())
116       return std::make_pair(nullptr, nullptr);
117 
118     Node = *Node->pred_begin();
119   }
120 
121   return std::make_pair(S, Blk);
122 }
123 
124 /// Adjusts a return value when the called function's return type does not
125 /// match the caller's expression type. This can happen when a dynamic call
126 /// is devirtualized, and the overriding method has a covariant (more specific)
127 /// return type than the parent's method. For C++ objects, this means we need
128 /// to add base casts.
129 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
130                               StoreManager &StoreMgr) {
131   // For now, the only adjustments we handle apply only to locations.
132   if (!isa<Loc>(V))
133     return V;
134 
135   // If the types already match, don't do any unnecessary work.
136   ExpectedTy = ExpectedTy.getCanonicalType();
137   ActualTy = ActualTy.getCanonicalType();
138   if (ExpectedTy == ActualTy)
139     return V;
140 
141   // No adjustment is needed between Objective-C pointer types.
142   if (ExpectedTy->isObjCObjectPointerType() &&
143       ActualTy->isObjCObjectPointerType())
144     return V;
145 
146   // C++ object pointers may need "derived-to-base" casts.
147   const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
148   const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
149   if (ExpectedClass && ActualClass) {
150     CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
151                        /*DetectVirtual=*/false);
152     if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
153         !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
154       return StoreMgr.evalDerivedToBase(V, Paths.front());
155     }
156   }
157 
158   // Unfortunately, Objective-C does not enforce that overridden methods have
159   // covariant return types, so we can't assert that that never happens.
160   // Be safe and return UnknownVal().
161   return UnknownVal();
162 }
163 
164 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
165                                            ExplodedNode *Pred,
166                                            ExplodedNodeSet &Dst) {
167   // Find the last statement in the function and the corresponding basic block.
168   const Stmt *LastSt = nullptr;
169   const CFGBlock *Blk = nullptr;
170   std::tie(LastSt, Blk) = getLastStmt(Pred);
171   if (!Blk || !LastSt) {
172     Dst.Add(Pred);
173     return;
174   }
175 
176   // Here, we destroy the current location context. We use the current
177   // function's entire body as a diagnostic statement, with which the program
178   // point will be associated. However, we only want to use LastStmt as a
179   // reference for what to clean up if it's a ReturnStmt; otherwise, everything
180   // is dead.
181   SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
182   const LocationContext *LCtx = Pred->getLocationContext();
183   removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
184              LCtx->getAnalysisDeclContext()->getBody(),
185              ProgramPoint::PostStmtPurgeDeadSymbolsKind);
186 }
187 
188 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
189     const StackFrameContext *calleeCtx) {
190   const Decl *RuntimeCallee = calleeCtx->getDecl();
191   const Decl *StaticDecl = Call->getDecl();
192   assert(RuntimeCallee);
193   if (!StaticDecl)
194     return true;
195   return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
196 }
197 
198 /// The call exit is simulated with a sequence of nodes, which occur between
199 /// CallExitBegin and CallExitEnd. The following operations occur between the
200 /// two program points:
201 /// 1. CallExitBegin (triggers the start of call exit sequence)
202 /// 2. Bind the return value
203 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
204 /// 4. CallExitEnd (switch to the caller context)
205 /// 5. PostStmt<CallExpr>
206 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
207   // Step 1 CEBNode was generated before the call.
208   PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
209   const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
210 
211   // The parent context might not be a stack frame, so make sure we
212   // look up the first enclosing stack frame.
213   const StackFrameContext *callerCtx =
214     calleeCtx->getParent()->getStackFrame();
215 
216   const Stmt *CE = calleeCtx->getCallSite();
217   ProgramStateRef state = CEBNode->getState();
218   // Find the last statement in the function and the corresponding basic block.
219   const Stmt *LastSt = nullptr;
220   const CFGBlock *Blk = nullptr;
221   std::tie(LastSt, Blk) = getLastStmt(CEBNode);
222 
223   // Generate a CallEvent /before/ cleaning the state, so that we can get the
224   // correct value for 'this' (if necessary).
225   CallEventManager &CEMgr = getStateManager().getCallEventManager();
226   CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
227 
228   // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
229 
230   // If the callee returns an expression, bind its value to CallExpr.
231   if (CE) {
232     if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
233       const LocationContext *LCtx = CEBNode->getLocationContext();
234       SVal V = state->getSVal(RS, LCtx);
235 
236       // Ensure that the return type matches the type of the returned Expr.
237       if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
238         QualType ReturnedTy =
239           CallEvent::getDeclaredResultType(calleeCtx->getDecl());
240         if (!ReturnedTy.isNull()) {
241           if (const Expr *Ex = dyn_cast<Expr>(CE)) {
242             V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
243                                   getStoreManager());
244           }
245         }
246       }
247 
248       state = state->BindExpr(CE, callerCtx, V);
249     }
250 
251     // Bind the constructed object value to CXXConstructExpr.
252     if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
253       loc::MemRegionVal This =
254         svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
255       SVal ThisV = state->getSVal(This);
256       ThisV = state->getSVal(ThisV.castAs<Loc>());
257       state = state->BindExpr(CCE, callerCtx, ThisV);
258     }
259 
260     if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
261       // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
262       // while to reach the actual CXXNewExpr element from here, so keep the
263       // region for later use.
264       // Additionally cast the return value of the inlined operator new
265       // (which is of type 'void *') to the correct object type.
266       SVal AllocV = state->getSVal(CNE, callerCtx);
267       AllocV = svalBuilder.evalCast(
268           AllocV, CNE->getType(),
269           getContext().getPointerType(getContext().VoidTy));
270 
271       state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
272                                          AllocV);
273     }
274   }
275 
276   // Step 3: BindedRetNode -> CleanedNodes
277   // If we can find a statement and a block in the inlined function, run remove
278   // dead bindings before returning from the call. This is important to ensure
279   // that we report the issues such as leaks in the stack contexts in which
280   // they occurred.
281   ExplodedNodeSet CleanedNodes;
282   if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
283     static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
284     PostStmt Loc(LastSt, calleeCtx, &retValBind);
285     bool isNew;
286     ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
287     BindedRetNode->addPredecessor(CEBNode, G);
288     if (!isNew)
289       return;
290 
291     NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
292     currBldrCtx = &Ctx;
293     // Here, we call the Symbol Reaper with 0 statement and callee location
294     // context, telling it to clean up everything in the callee's context
295     // (and its children). We use the callee's function body as a diagnostic
296     // statement, with which the program point will be associated.
297     removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
298                calleeCtx->getAnalysisDeclContext()->getBody(),
299                ProgramPoint::PostStmtPurgeDeadSymbolsKind);
300     currBldrCtx = nullptr;
301   } else {
302     CleanedNodes.Add(CEBNode);
303   }
304 
305   for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
306                                  E = CleanedNodes.end(); I != E; ++I) {
307 
308     // Step 4: Generate the CallExit and leave the callee's context.
309     // CleanedNodes -> CEENode
310     CallExitEnd Loc(calleeCtx, callerCtx);
311     bool isNew;
312     ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
313 
314     ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
315     CEENode->addPredecessor(*I, G);
316     if (!isNew)
317       return;
318 
319     // Step 5: Perform the post-condition check of the CallExpr and enqueue the
320     // result onto the work list.
321     // CEENode -> Dst -> WorkList
322     NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
323     SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
324         &Ctx);
325     SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
326 
327     CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
328 
329     ExplodedNodeSet DstPostCall;
330     if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
331       ExplodedNodeSet DstPostPostCallCallback;
332       getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
333                                                  CEENode, *UpdatedCall, *this,
334                                                  /*wasInlined=*/true);
335       for (ExplodedNode *I : DstPostPostCallCallback) {
336         getCheckerManager().runCheckersForNewAllocator(
337             cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
338             /*wasInlined=*/true);
339       }
340     } else {
341       getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
342                                                  *UpdatedCall, *this,
343                                                  /*wasInlined=*/true);
344     }
345     ExplodedNodeSet Dst;
346     if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
347       getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
348                                                         *this,
349                                                         /*wasInlined=*/true);
350     } else if (CE &&
351                !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
352                  AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
353       getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
354                                                  *this, /*wasInlined=*/true);
355     } else {
356       Dst.insert(DstPostCall);
357     }
358 
359     // Enqueue the next element in the block.
360     for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
361                                    PSI != PSE; ++PSI) {
362       Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
363                                     calleeCtx->getIndex()+1);
364     }
365   }
366 }
367 
368 bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
369   // When there are no branches in the function, it means that there's no
370   // exponential complexity introduced by inlining such function.
371   // Such functions also don't trigger various fundamental problems
372   // with our inlining mechanism, such as the problem of
373   // inlined defensive checks. Hence isLinear().
374   const CFG *Cfg = ADC->getCFG();
375   return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
376 }
377 
378 bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
379   const CFG *Cfg = ADC->getCFG();
380   return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
381 }
382 
383 bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
384   const CFG *Cfg = ADC->getCFG();
385   return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
386 }
387 
388 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
389                                bool &IsRecursive, unsigned &StackDepth) {
390   IsRecursive = false;
391   StackDepth = 0;
392 
393   while (LCtx) {
394     if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
395       const Decl *DI = SFC->getDecl();
396 
397       // Mark recursive (and mutually recursive) functions and always count
398       // them when measuring the stack depth.
399       if (DI == D) {
400         IsRecursive = true;
401         ++StackDepth;
402         LCtx = LCtx->getParent();
403         continue;
404       }
405 
406       // Do not count the small functions when determining the stack depth.
407       AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
408       if (!isSmall(CalleeADC))
409         ++StackDepth;
410     }
411     LCtx = LCtx->getParent();
412   }
413 }
414 
415 // The GDM component containing the dynamic dispatch bifurcation info. When
416 // the exact type of the receiver is not known, we want to explore both paths -
417 // one on which we do inline it and the other one on which we don't. This is
418 // done to ensure we do not drop coverage.
419 // This is the map from the receiver region to a bool, specifying either we
420 // consider this region's information precise or not along the given path.
421 namespace {
422   enum DynamicDispatchMode {
423     DynamicDispatchModeInlined = 1,
424     DynamicDispatchModeConservative
425   };
426 } // end anonymous namespace
427 
428 REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
429                                const MemRegion *, unsigned)
430 REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool)
431 
432 void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D,
433                               NodeBuilder &Bldr, ExplodedNode *Pred,
434                               ProgramStateRef State) {
435   ProgramStateRef ConservativeEvalState = nullptr;
436   if (Call.isForeign() && !isSecondPhaseCTU()) {
437     const auto IK = AMgr.options.getCTUPhase1Inlining();
438     const bool DoInline = IK == CTUPhase1InliningKind::All ||
439                           (IK == CTUPhase1InliningKind::Small &&
440                            isSmall(AMgr.getAnalysisDeclContext(D)));
441     if (DoInline) {
442       inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
443       return;
444     }
445     const bool BState = State->get<CTUDispatchBifurcation>();
446     if (!BState) { // This is the first time we see this foreign function.
447       // Enqueue it to be analyzed in the second (ctu) phase.
448       inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State);
449       // Conservatively evaluate in the first phase.
450       ConservativeEvalState = State->set<CTUDispatchBifurcation>(true);
451       conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState);
452     } else {
453       conservativeEvalCall(Call, Bldr, Pred, State);
454     }
455     return;
456   }
457   inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
458 }
459 
460 void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call,
461                             const Decl *D, NodeBuilder &Bldr,
462                             ExplodedNode *Pred, ProgramStateRef State) {
463   assert(D);
464 
465   const LocationContext *CurLC = Pred->getLocationContext();
466   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
467   const LocationContext *ParentOfCallee = CallerSFC;
468   if (Call.getKind() == CE_Block &&
469       !cast<BlockCall>(Call).isConversionFromLambda()) {
470     const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
471     assert(BR && "If we have the block definition we should have its region");
472     AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
473     ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
474                                                          cast<BlockDecl>(D),
475                                                          BR);
476   }
477 
478   // This may be NULL, but that's fine.
479   const Expr *CallE = Call.getOriginExpr();
480 
481   // Construct a new stack frame for the callee.
482   AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
483   const StackFrameContext *CalleeSFC =
484       CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(),
485                                currBldrCtx->blockCount(), currStmtIdx);
486 
487   CallEnter Loc(CallE, CalleeSFC, CurLC);
488 
489   // Construct a new state which contains the mapping from actual to
490   // formal arguments.
491   State = State->enterStackFrame(Call, CalleeSFC);
492 
493   bool isNew;
494   if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
495     N->addPredecessor(Pred, G);
496     if (isNew)
497       WList->enqueue(N);
498   }
499 
500   // If we decided to inline the call, the successor has been manually
501   // added onto the work list so remove it from the node builder.
502   Bldr.takeNodes(Pred);
503 
504   NumInlinedCalls++;
505   Engine.FunctionSummaries->bumpNumTimesInlined(D);
506 
507   // Do not mark as visited in the 2nd run (CTUWList), so the function will
508   // be visited as top-level, this way we won't loose reports in non-ctu
509   // mode. Considering the case when a function in a foreign TU calls back
510   // into the main TU.
511   // Note, during the 1st run, it doesn't matter if we mark the foreign
512   // functions as visited (or not) because they can never appear as a top level
513   // function in the main TU.
514   if (!isSecondPhaseCTU())
515     // Mark the decl as visited.
516     if (VisitedCallees)
517       VisitedCallees->insert(D);
518 }
519 
520 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
521                                             const Stmt *CallE) {
522   const void *ReplayState = State->get<ReplayWithoutInlining>();
523   if (!ReplayState)
524     return nullptr;
525 
526   assert(ReplayState == CallE && "Backtracked to the wrong call.");
527   (void)CallE;
528 
529   return State->remove<ReplayWithoutInlining>();
530 }
531 
532 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
533                                ExplodedNodeSet &dst) {
534   // Perform the previsit of the CallExpr.
535   ExplodedNodeSet dstPreVisit;
536   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
537 
538   // Get the call in its initial state. We use this as a template to perform
539   // all the checks.
540   CallEventManager &CEMgr = getStateManager().getCallEventManager();
541   CallEventRef<> CallTemplate
542     = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
543 
544   // Evaluate the function call.  We try each of the checkers
545   // to see if the can evaluate the function call.
546   ExplodedNodeSet dstCallEvaluated;
547   for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
548        I != E; ++I) {
549     evalCall(dstCallEvaluated, *I, *CallTemplate);
550   }
551 
552   // Finally, perform the post-condition check of the CallExpr and store
553   // the created nodes in 'Dst'.
554   // Note that if the call was inlined, dstCallEvaluated will be empty.
555   // The post-CallExpr check will occur in processCallExit.
556   getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
557                                              *this);
558 }
559 
560 ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
561                                                        const CallEvent &Call) {
562   const Expr *E = Call.getOriginExpr();
563   // FIXME: Constructors to placement arguments of operator new
564   // are not supported yet.
565   if (!E || isa<CXXNewExpr>(E))
566     return State;
567 
568   const LocationContext *LC = Call.getLocationContext();
569   for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
570     unsigned I = Call.getASTArgumentIndex(CallI);
571     if (Optional<SVal> V =
572             getObjectUnderConstruction(State, {E, I}, LC)) {
573       SVal VV = *V;
574       (void)VV;
575       assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
576                  ->getStackFrame()->getParent()
577                  ->getStackFrame() == LC->getStackFrame());
578       State = finishObjectConstruction(State, {E, I}, LC);
579     }
580   }
581 
582   return State;
583 }
584 
585 void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
586                                             ExplodedNode *Pred,
587                                             const CallEvent &Call) {
588   ProgramStateRef State = Pred->getState();
589   ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
590   if (CleanedState == State) {
591     Dst.insert(Pred);
592     return;
593   }
594 
595   const Expr *E = Call.getOriginExpr();
596   const LocationContext *LC = Call.getLocationContext();
597   NodeBuilder B(Pred, Dst, *currBldrCtx);
598   static SimpleProgramPointTag Tag("ExprEngine",
599                                    "Finish argument construction");
600   PreStmt PP(E, LC, &Tag);
601   B.generateNode(PP, CleanedState, Pred);
602 }
603 
604 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
605                           const CallEvent &Call) {
606   // WARNING: At this time, the state attached to 'Call' may be older than the
607   // state in 'Pred'. This is a minor optimization since CheckerManager will
608   // use an updated CallEvent instance when calling checkers, but if 'Call' is
609   // ever used directly in this function all callers should be updated to pass
610   // the most recent state. (It is probably not worth doing the work here since
611   // for some callers this will not be necessary.)
612 
613   // Run any pre-call checks using the generic call interface.
614   ExplodedNodeSet dstPreVisit;
615   getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
616                                             Call, *this);
617 
618   // Actually evaluate the function call.  We try each of the checkers
619   // to see if the can evaluate the function call, and get a callback at
620   // defaultEvalCall if all of them fail.
621   ExplodedNodeSet dstCallEvaluated;
622   getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
623                                              Call, *this, EvalCallOptions());
624 
625   // If there were other constructors called for object-type arguments
626   // of this call, clean them up.
627   ExplodedNodeSet dstArgumentCleanup;
628   for (ExplodedNode *I : dstCallEvaluated)
629     finishArgumentConstruction(dstArgumentCleanup, I, Call);
630 
631   ExplodedNodeSet dstPostCall;
632   getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
633                                              Call, *this);
634 
635   // Escaping symbols conjured during invalidating the regions above.
636   // Note that, for inlined calls the nodes were put back into the worklist,
637   // so we can assume that every node belongs to a conservative call at this
638   // point.
639 
640   // Run pointerEscape callback with the newly conjured symbols.
641   SmallVector<std::pair<SVal, SVal>, 8> Escaped;
642   for (ExplodedNode *I : dstPostCall) {
643     NodeBuilder B(I, Dst, *currBldrCtx);
644     ProgramStateRef State = I->getState();
645     Escaped.clear();
646     {
647       unsigned Arg = -1;
648       for (const ParmVarDecl *PVD : Call.parameters()) {
649         ++Arg;
650         QualType ParamTy = PVD->getType();
651         if (ParamTy.isNull() ||
652             (!ParamTy->isPointerType() && !ParamTy->isReferenceType()))
653           continue;
654         QualType Pointee = ParamTy->getPointeeType();
655         if (Pointee.isConstQualified() || Pointee->isVoidType())
656           continue;
657         if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
658           Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
659       }
660     }
661 
662     State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
663                                         PSK_EscapeOutParameters, &Call);
664 
665     if (State == I->getState())
666       Dst.insert(I);
667     else
668       B.generateNode(I->getLocation(), State, I);
669   }
670 }
671 
672 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
673                                             const LocationContext *LCtx,
674                                             ProgramStateRef State) {
675   const Expr *E = Call.getOriginExpr();
676   if (!E)
677     return State;
678 
679   // Some method families have known return values.
680   if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
681     switch (Msg->getMethodFamily()) {
682     default:
683       break;
684     case OMF_autorelease:
685     case OMF_retain:
686     case OMF_self: {
687       // These methods return their receivers.
688       return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
689     }
690     }
691   } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
692     SVal ThisV = C->getCXXThisVal();
693     ThisV = State->getSVal(ThisV.castAs<Loc>());
694     return State->BindExpr(E, LCtx, ThisV);
695   }
696 
697   SVal R;
698   QualType ResultTy = Call.getResultType();
699   unsigned Count = currBldrCtx->blockCount();
700   if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
701     // Conjure a temporary if the function returns an object by value.
702     SVal Target;
703     assert(RTC->getStmt() == Call.getOriginExpr());
704     EvalCallOptions CallOpts; // FIXME: We won't really need those.
705     std::tie(State, Target) =
706         handleConstructionContext(Call.getOriginExpr(), State, LCtx,
707                                   RTC->getConstructionContext(), CallOpts);
708     const MemRegion *TargetR = Target.getAsRegion();
709     assert(TargetR);
710     // Invalidate the region so that it didn't look uninitialized. If this is
711     // a field or element constructor, we do not want to invalidate
712     // the whole structure. Pointer escape is meaningless because
713     // the structure is a product of conservative evaluation
714     // and therefore contains nothing interesting at this point.
715     RegionAndSymbolInvalidationTraits ITraits;
716     ITraits.setTrait(TargetR,
717         RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
718     State = State->invalidateRegions(TargetR, E, Count, LCtx,
719                                      /* CausesPointerEscape=*/false, nullptr,
720                                      &Call, &ITraits);
721 
722     R = State->getSVal(Target.castAs<Loc>(), E->getType());
723   } else {
724     // Conjure a symbol if the return value is unknown.
725 
726     // See if we need to conjure a heap pointer instead of
727     // a regular unknown pointer.
728     const auto *CNE = dyn_cast<CXXNewExpr>(E);
729     if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
730       R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count);
731       const MemRegion *MR = R.getAsRegion()->StripCasts();
732 
733       // Store the extent of the allocated object(s).
734       SVal ElementCount;
735       if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) {
736         ElementCount = State->getSVal(SizeExpr, LCtx);
737       } else {
738         ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
739       }
740 
741       SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder);
742 
743       SVal Size =
744           svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize,
745                                 svalBuilder.getArrayIndexType());
746 
747       State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(),
748                                svalBuilder);
749     } else {
750       R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
751     }
752   }
753   return State->BindExpr(E, LCtx, R);
754 }
755 
756 // Conservatively evaluate call by invalidating regions and binding
757 // a conjured return value.
758 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
759                                       ExplodedNode *Pred, ProgramStateRef State) {
760   State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
761   State = bindReturnValue(Call, Pred->getLocationContext(), State);
762 
763   // And make the result node.
764   Bldr.generateNode(Call.getProgramPoint(), State, Pred);
765 }
766 
767 ExprEngine::CallInlinePolicy
768 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
769                               AnalyzerOptions &Opts,
770                               const EvalCallOptions &CallOpts) {
771   const LocationContext *CurLC = Pred->getLocationContext();
772   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
773   switch (Call.getKind()) {
774   case CE_Function:
775   case CE_Block:
776     break;
777   case CE_CXXMember:
778   case CE_CXXMemberOperator:
779     if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
780       return CIP_DisallowedAlways;
781     break;
782   case CE_CXXConstructor: {
783     if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
784       return CIP_DisallowedAlways;
785 
786     const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
787 
788     const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
789 
790     auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
791     const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
792                                         : nullptr;
793 
794     if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
795         !Opts.MayInlineCXXAllocator)
796       return CIP_DisallowedOnce;
797 
798     // FIXME: We don't handle constructors or destructors for arrays properly.
799     // Even once we do, we still need to be careful about implicitly-generated
800     // initializers for array fields in default move/copy constructors.
801     // We still allow construction into ElementRegion targets when they don't
802     // represent array elements.
803     if (CallOpts.IsArrayCtorOrDtor)
804       return CIP_DisallowedOnce;
805 
806     // Inlining constructors requires including initializers in the CFG.
807     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
808     assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
809     (void)ADC;
810 
811     // If the destructor is trivial, it's always safe to inline the constructor.
812     if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
813       break;
814 
815     // For other types, only inline constructors if destructor inlining is
816     // also enabled.
817     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
818       return CIP_DisallowedAlways;
819 
820     if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) {
821       // If we don't handle temporary destructors, we shouldn't inline
822       // their constructors.
823       if (CallOpts.IsTemporaryCtorOrDtor &&
824           !Opts.ShouldIncludeTemporaryDtorsInCFG)
825         return CIP_DisallowedOnce;
826 
827       // If we did not find the correct this-region, it would be pointless
828       // to inline the constructor. Instead we will simply invalidate
829       // the fake temporary target.
830       if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
831         return CIP_DisallowedOnce;
832 
833       // If the temporary is lifetime-extended by binding it to a reference-type
834       // field within an aggregate, automatic destructors don't work properly.
835       if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
836         return CIP_DisallowedOnce;
837     }
838 
839     break;
840   }
841   case CE_CXXInheritedConstructor: {
842     // This doesn't really increase the cost of inlining ever, because
843     // the stack frame of the inherited constructor is trivial.
844     return CIP_Allowed;
845   }
846   case CE_CXXDestructor: {
847     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
848       return CIP_DisallowedAlways;
849 
850     // Inlining destructors requires building the CFG correctly.
851     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
852     assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
853     (void)ADC;
854 
855     // FIXME: We don't handle constructors or destructors for arrays properly.
856     if (CallOpts.IsArrayCtorOrDtor)
857       return CIP_DisallowedOnce;
858 
859     // Allow disabling temporary destructor inlining with a separate option.
860     if (CallOpts.IsTemporaryCtorOrDtor &&
861         !Opts.MayInlineCXXTemporaryDtors)
862       return CIP_DisallowedOnce;
863 
864     // If we did not find the correct this-region, it would be pointless
865     // to inline the destructor. Instead we will simply invalidate
866     // the fake temporary target.
867     if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
868       return CIP_DisallowedOnce;
869     break;
870   }
871   case CE_CXXDeallocator:
872     LLVM_FALLTHROUGH;
873   case CE_CXXAllocator:
874     if (Opts.MayInlineCXXAllocator)
875       break;
876     // Do not inline allocators until we model deallocators.
877     // This is unfortunate, but basically necessary for smart pointers and such.
878     return CIP_DisallowedAlways;
879   case CE_ObjCMessage:
880     if (!Opts.MayInlineObjCMethod)
881       return CIP_DisallowedAlways;
882     if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
883           Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
884       return CIP_DisallowedAlways;
885     break;
886   }
887 
888   return CIP_Allowed;
889 }
890 
891 /// Returns true if the given C++ class contains a member with the given name.
892 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
893                       StringRef Name) {
894   const IdentifierInfo &II = Ctx.Idents.get(Name);
895   return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II));
896 }
897 
898 /// Returns true if the given C++ class is a container or iterator.
899 ///
900 /// Our heuristic for this is whether it contains a method named 'begin()' or a
901 /// nested type named 'iterator' or 'iterator_category'.
902 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
903   return hasMember(Ctx, RD, "begin") ||
904          hasMember(Ctx, RD, "iterator") ||
905          hasMember(Ctx, RD, "iterator_category");
906 }
907 
908 /// Returns true if the given function refers to a method of a C++ container
909 /// or iterator.
910 ///
911 /// We generally do a poor job modeling most containers right now, and might
912 /// prefer not to inline their methods.
913 static bool isContainerMethod(const ASTContext &Ctx,
914                               const FunctionDecl *FD) {
915   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
916     return isContainerClass(Ctx, MD->getParent());
917   return false;
918 }
919 
920 /// Returns true if the given function is the destructor of a class named
921 /// "shared_ptr".
922 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
923   const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
924   if (!Dtor)
925     return false;
926 
927   const CXXRecordDecl *RD = Dtor->getParent();
928   if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
929     if (II->isStr("shared_ptr"))
930         return true;
931 
932   return false;
933 }
934 
935 /// Returns true if the function in \p CalleeADC may be inlined in general.
936 ///
937 /// This checks static properties of the function, such as its signature and
938 /// CFG, to determine whether the analyzer should ever consider inlining it,
939 /// in any context.
940 bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
941   AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
942   // FIXME: Do not inline variadic calls.
943   if (CallEvent::isVariadic(CalleeADC->getDecl()))
944     return false;
945 
946   // Check certain C++-related inlining policies.
947   ASTContext &Ctx = CalleeADC->getASTContext();
948   if (Ctx.getLangOpts().CPlusPlus) {
949     if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
950       // Conditionally control the inlining of template functions.
951       if (!Opts.MayInlineTemplateFunctions)
952         if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
953           return false;
954 
955       // Conditionally control the inlining of C++ standard library functions.
956       if (!Opts.MayInlineCXXStandardLibrary)
957         if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
958           if (AnalysisDeclContext::isInStdNamespace(FD))
959             return false;
960 
961       // Conditionally control the inlining of methods on objects that look
962       // like C++ containers.
963       if (!Opts.MayInlineCXXContainerMethods)
964         if (!AMgr.isInCodeFile(FD->getLocation()))
965           if (isContainerMethod(Ctx, FD))
966             return false;
967 
968       // Conditionally control the inlining of the destructor of C++ shared_ptr.
969       // We don't currently do a good job modeling shared_ptr because we can't
970       // see the reference count, so treating as opaque is probably the best
971       // idea.
972       if (!Opts.MayInlineCXXSharedPtrDtor)
973         if (isCXXSharedPtrDtor(FD))
974           return false;
975     }
976   }
977 
978   // It is possible that the CFG cannot be constructed.
979   // Be safe, and check if the CalleeCFG is valid.
980   const CFG *CalleeCFG = CalleeADC->getCFG();
981   if (!CalleeCFG)
982     return false;
983 
984   // Do not inline large functions.
985   if (isHuge(CalleeADC))
986     return false;
987 
988   // It is possible that the live variables analysis cannot be
989   // run.  If so, bail out.
990   if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
991     return false;
992 
993   return true;
994 }
995 
996 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
997                                   const ExplodedNode *Pred,
998                                   const EvalCallOptions &CallOpts) {
999   if (!D)
1000     return false;
1001 
1002   AnalysisManager &AMgr = getAnalysisManager();
1003   AnalyzerOptions &Opts = AMgr.options;
1004   AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
1005   AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
1006 
1007   // The auto-synthesized bodies are essential to inline as they are
1008   // usually small and commonly used. Note: we should do this check early on to
1009   // ensure we always inline these calls.
1010   if (CalleeADC->isBodyAutosynthesized())
1011     return true;
1012 
1013   if (!AMgr.shouldInlineCall())
1014     return false;
1015 
1016   // Check if this function has been marked as non-inlinable.
1017   Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
1018   if (MayInline) {
1019     if (!MayInline.getValue())
1020       return false;
1021 
1022   } else {
1023     // We haven't actually checked the static properties of this function yet.
1024     // Do that now, and record our decision in the function summaries.
1025     if (mayInlineDecl(CalleeADC)) {
1026       Engine.FunctionSummaries->markMayInline(D);
1027     } else {
1028       Engine.FunctionSummaries->markShouldNotInline(D);
1029       return false;
1030     }
1031   }
1032 
1033   // Check if we should inline a call based on its kind.
1034   // FIXME: this checks both static and dynamic properties of the call, which
1035   // means we're redoing a bit of work that could be cached in the function
1036   // summary.
1037   CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
1038   if (CIP != CIP_Allowed) {
1039     if (CIP == CIP_DisallowedAlways) {
1040       assert(!MayInline || *MayInline);
1041       Engine.FunctionSummaries->markShouldNotInline(D);
1042     }
1043     return false;
1044   }
1045 
1046   // Do not inline if recursive or we've reached max stack frame count.
1047   bool IsRecursive = false;
1048   unsigned StackDepth = 0;
1049   examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
1050   if ((StackDepth >= Opts.InlineMaxStackDepth) &&
1051       (!isSmall(CalleeADC) || IsRecursive))
1052     return false;
1053 
1054   // Do not inline large functions too many times.
1055   if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
1056        Opts.MaxTimesInlineLarge) &&
1057       isLarge(CalleeADC)) {
1058     NumReachedInlineCountMax++;
1059     return false;
1060   }
1061 
1062   if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
1063     return false;
1064 
1065   return true;
1066 }
1067 
1068 static bool isTrivialObjectAssignment(const CallEvent &Call) {
1069   const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
1070   if (!ICall)
1071     return false;
1072 
1073   const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
1074   if (!MD)
1075     return false;
1076   if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
1077     return false;
1078 
1079   return MD->isTrivial();
1080 }
1081 
1082 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
1083                                  const CallEvent &CallTemplate,
1084                                  const EvalCallOptions &CallOpts) {
1085   // Make sure we have the most recent state attached to the call.
1086   ProgramStateRef State = Pred->getState();
1087   CallEventRef<> Call = CallTemplate.cloneWithState(State);
1088 
1089   // Special-case trivial assignment operators.
1090   if (isTrivialObjectAssignment(*Call)) {
1091     performTrivialCopy(Bldr, Pred, *Call);
1092     return;
1093   }
1094 
1095   // Try to inline the call.
1096   // The origin expression here is just used as a kind of checksum;
1097   // this should still be safe even for CallEvents that don't come from exprs.
1098   const Expr *E = Call->getOriginExpr();
1099 
1100   ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
1101   if (InlinedFailedState) {
1102     // If we already tried once and failed, make sure we don't retry later.
1103     State = InlinedFailedState;
1104   } else {
1105     RuntimeDefinition RD = Call->getRuntimeDefinition();
1106     Call->setForeign(RD.isForeign());
1107     const Decl *D = RD.getDecl();
1108     if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
1109       if (RD.mayHaveOtherDefinitions()) {
1110         AnalyzerOptions &Options = getAnalysisManager().options;
1111 
1112         // Explore with and without inlining the call.
1113         if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
1114           BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
1115           return;
1116         }
1117 
1118         // Don't inline if we're not in any dynamic dispatch mode.
1119         if (Options.getIPAMode() != IPAK_DynamicDispatch) {
1120           conservativeEvalCall(*Call, Bldr, Pred, State);
1121           return;
1122         }
1123       }
1124       ctuBifurcate(*Call, D, Bldr, Pred, State);
1125       return;
1126     }
1127   }
1128 
1129   // If we can't inline it, handle the return value and invalidate the regions.
1130   conservativeEvalCall(*Call, Bldr, Pred, State);
1131 }
1132 
1133 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
1134                                const CallEvent &Call, const Decl *D,
1135                                NodeBuilder &Bldr, ExplodedNode *Pred) {
1136   assert(BifurReg);
1137   BifurReg = BifurReg->StripCasts();
1138 
1139   // Check if we've performed the split already - note, we only want
1140   // to split the path once per memory region.
1141   ProgramStateRef State = Pred->getState();
1142   const unsigned *BState =
1143                         State->get<DynamicDispatchBifurcationMap>(BifurReg);
1144   if (BState) {
1145     // If we are on "inline path", keep inlining if possible.
1146     if (*BState == DynamicDispatchModeInlined)
1147       ctuBifurcate(Call, D, Bldr, Pred, State);
1148     // If inline failed, or we are on the path where we assume we
1149     // don't have enough info about the receiver to inline, conjure the
1150     // return value and invalidate the regions.
1151     conservativeEvalCall(Call, Bldr, Pred, State);
1152     return;
1153   }
1154 
1155   // If we got here, this is the first time we process a message to this
1156   // region, so split the path.
1157   ProgramStateRef IState =
1158       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1159                                                DynamicDispatchModeInlined);
1160   ctuBifurcate(Call, D, Bldr, Pred, IState);
1161 
1162   ProgramStateRef NoIState =
1163       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1164                                                DynamicDispatchModeConservative);
1165   conservativeEvalCall(Call, Bldr, Pred, NoIState);
1166 
1167   NumOfDynamicDispatchPathSplits++;
1168 }
1169 
1170 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1171                                  ExplodedNodeSet &Dst) {
1172   ExplodedNodeSet dstPreVisit;
1173   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1174 
1175   StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1176 
1177   if (RS->getRetValue()) {
1178     for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1179                                   ei = dstPreVisit.end(); it != ei; ++it) {
1180       B.generateNode(RS, *it, (*it)->getState());
1181     }
1182   }
1183 }
1184