1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file defines ExprEngine's support for calls and returns.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PrettyStackTraceLocationContext.h"
14 #include "clang/AST/CXXInheritance.h"
15 #include "clang/AST/Decl.h"
16 #include "clang/AST/DeclCXX.h"
17 #include "clang/Analysis/Analyses/LiveVariables.h"
18 #include "clang/Analysis/ConstructionContext.h"
19 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
20 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
21 #include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
22 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Support/Casting.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/SaveAndRestore.h"
28 
29 using namespace clang;
30 using namespace ento;
31 
32 #define DEBUG_TYPE "ExprEngine"
33 
34 STATISTIC(NumOfDynamicDispatchPathSplits,
35   "The # of times we split the path due to imprecise dynamic dispatch info");
36 
37 STATISTIC(NumInlinedCalls,
38   "The # of times we inlined a call");
39 
40 STATISTIC(NumReachedInlineCountMax,
41   "The # of times we reached inline count maximum");
42 
processCallEnter(NodeBuilderContext & BC,CallEnter CE,ExplodedNode * Pred)43 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
44                                   ExplodedNode *Pred) {
45   // Get the entry block in the CFG of the callee.
46   const StackFrameContext *calleeCtx = CE.getCalleeContext();
47   PrettyStackTraceLocationContext CrashInfo(calleeCtx);
48   const CFGBlock *Entry = CE.getEntry();
49 
50   // Validate the CFG.
51   assert(Entry->empty());
52   assert(Entry->succ_size() == 1);
53 
54   // Get the solitary successor.
55   const CFGBlock *Succ = *(Entry->succ_begin());
56 
57   // Construct an edge representing the starting location in the callee.
58   BlockEdge Loc(Entry, Succ, calleeCtx);
59 
60   ProgramStateRef state = Pred->getState();
61 
62   // Construct a new node, notify checkers that analysis of the function has
63   // begun, and add the resultant nodes to the worklist.
64   bool isNew;
65   ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
66   Node->addPredecessor(Pred, G);
67   if (isNew) {
68     ExplodedNodeSet DstBegin;
69     processBeginOfFunction(BC, Node, DstBegin, Loc);
70     Engine.enqueue(DstBegin);
71   }
72 }
73 
74 // Find the last statement on the path to the exploded node and the
75 // corresponding Block.
76 static std::pair<const Stmt*,
getLastStmt(const ExplodedNode * Node)77                  const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
78   const Stmt *S = nullptr;
79   const CFGBlock *Blk = nullptr;
80   const StackFrameContext *SF = Node->getStackFrame();
81 
82   // Back up through the ExplodedGraph until we reach a statement node in this
83   // stack frame.
84   while (Node) {
85     const ProgramPoint &PP = Node->getLocation();
86 
87     if (PP.getStackFrame() == SF) {
88       if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
89         S = SP->getStmt();
90         break;
91       } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
92         S = CEE->getCalleeContext()->getCallSite();
93         if (S)
94           break;
95 
96         // If there is no statement, this is an implicitly-generated call.
97         // We'll walk backwards over it and then continue the loop to find
98         // an actual statement.
99         Optional<CallEnter> CE;
100         do {
101           Node = Node->getFirstPred();
102           CE = Node->getLocationAs<CallEnter>();
103         } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
104 
105         // Continue searching the graph.
106       } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
107         Blk = BE->getSrc();
108       }
109     } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
110       // If we reached the CallEnter for this function, it has no statements.
111       if (CE->getCalleeContext() == SF)
112         break;
113     }
114 
115     if (Node->pred_empty())
116       return std::make_pair(nullptr, nullptr);
117 
118     Node = *Node->pred_begin();
119   }
120 
121   return std::make_pair(S, Blk);
122 }
123 
124 /// Adjusts a return value when the called function's return type does not
125 /// match the caller's expression type. This can happen when a dynamic call
126 /// is devirtualized, and the overriding method has a covariant (more specific)
127 /// return type than the parent's method. For C++ objects, this means we need
128 /// to add base casts.
adjustReturnValue(SVal V,QualType ExpectedTy,QualType ActualTy,StoreManager & StoreMgr)129 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
130                               StoreManager &StoreMgr) {
131   // For now, the only adjustments we handle apply only to locations.
132   if (!V.getAs<Loc>())
133     return V;
134 
135   // If the types already match, don't do any unnecessary work.
136   ExpectedTy = ExpectedTy.getCanonicalType();
137   ActualTy = ActualTy.getCanonicalType();
138   if (ExpectedTy == ActualTy)
139     return V;
140 
141   // No adjustment is needed between Objective-C pointer types.
142   if (ExpectedTy->isObjCObjectPointerType() &&
143       ActualTy->isObjCObjectPointerType())
144     return V;
145 
146   // C++ object pointers may need "derived-to-base" casts.
147   const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
148   const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
149   if (ExpectedClass && ActualClass) {
150     CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
151                        /*DetectVirtual=*/false);
152     if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
153         !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
154       return StoreMgr.evalDerivedToBase(V, Paths.front());
155     }
156   }
157 
158   // Unfortunately, Objective-C does not enforce that overridden methods have
159   // covariant return types, so we can't assert that that never happens.
160   // Be safe and return UnknownVal().
161   return UnknownVal();
162 }
163 
removeDeadOnEndOfFunction(NodeBuilderContext & BC,ExplodedNode * Pred,ExplodedNodeSet & Dst)164 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
165                                            ExplodedNode *Pred,
166                                            ExplodedNodeSet &Dst) {
167   // Find the last statement in the function and the corresponding basic block.
168   const Stmt *LastSt = nullptr;
169   const CFGBlock *Blk = nullptr;
170   std::tie(LastSt, Blk) = getLastStmt(Pred);
171   if (!Blk || !LastSt) {
172     Dst.Add(Pred);
173     return;
174   }
175 
176   // Here, we destroy the current location context. We use the current
177   // function's entire body as a diagnostic statement, with which the program
178   // point will be associated. However, we only want to use LastStmt as a
179   // reference for what to clean up if it's a ReturnStmt; otherwise, everything
180   // is dead.
181   SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
182   const LocationContext *LCtx = Pred->getLocationContext();
183   removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
184              LCtx->getAnalysisDeclContext()->getBody(),
185              ProgramPoint::PostStmtPurgeDeadSymbolsKind);
186 }
187 
wasDifferentDeclUsedForInlining(CallEventRef<> Call,const StackFrameContext * calleeCtx)188 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
189     const StackFrameContext *calleeCtx) {
190   const Decl *RuntimeCallee = calleeCtx->getDecl();
191   const Decl *StaticDecl = Call->getDecl();
192   assert(RuntimeCallee);
193   if (!StaticDecl)
194     return true;
195   return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
196 }
197 
198 /// The call exit is simulated with a sequence of nodes, which occur between
199 /// CallExitBegin and CallExitEnd. The following operations occur between the
200 /// two program points:
201 /// 1. CallExitBegin (triggers the start of call exit sequence)
202 /// 2. Bind the return value
203 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
204 /// 4. CallExitEnd (switch to the caller context)
205 /// 5. PostStmt<CallExpr>
processCallExit(ExplodedNode * CEBNode)206 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
207   // Step 1 CEBNode was generated before the call.
208   PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
209   const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
210 
211   // The parent context might not be a stack frame, so make sure we
212   // look up the first enclosing stack frame.
213   const StackFrameContext *callerCtx =
214     calleeCtx->getParent()->getStackFrame();
215 
216   const Stmt *CE = calleeCtx->getCallSite();
217   ProgramStateRef state = CEBNode->getState();
218   // Find the last statement in the function and the corresponding basic block.
219   const Stmt *LastSt = nullptr;
220   const CFGBlock *Blk = nullptr;
221   std::tie(LastSt, Blk) = getLastStmt(CEBNode);
222 
223   // Generate a CallEvent /before/ cleaning the state, so that we can get the
224   // correct value for 'this' (if necessary).
225   CallEventManager &CEMgr = getStateManager().getCallEventManager();
226   CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
227 
228   // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
229 
230   // If the callee returns an expression, bind its value to CallExpr.
231   if (CE) {
232     if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
233       const LocationContext *LCtx = CEBNode->getLocationContext();
234       SVal V = state->getSVal(RS, LCtx);
235 
236       // Ensure that the return type matches the type of the returned Expr.
237       if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
238         QualType ReturnedTy =
239           CallEvent::getDeclaredResultType(calleeCtx->getDecl());
240         if (!ReturnedTy.isNull()) {
241           if (const Expr *Ex = dyn_cast<Expr>(CE)) {
242             V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
243                                   getStoreManager());
244           }
245         }
246       }
247 
248       state = state->BindExpr(CE, callerCtx, V);
249     }
250 
251     // Bind the constructed object value to CXXConstructExpr.
252     if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
253       loc::MemRegionVal This =
254         svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
255       SVal ThisV = state->getSVal(This);
256       ThisV = state->getSVal(ThisV.castAs<Loc>());
257       state = state->BindExpr(CCE, callerCtx, ThisV);
258     }
259 
260     if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
261       // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
262       // while to reach the actual CXXNewExpr element from here, so keep the
263       // region for later use.
264       // Additionally cast the return value of the inlined operator new
265       // (which is of type 'void *') to the correct object type.
266       SVal AllocV = state->getSVal(CNE, callerCtx);
267       AllocV = svalBuilder.evalCast(
268           AllocV, CNE->getType(),
269           getContext().getPointerType(getContext().VoidTy));
270 
271       state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
272                                          AllocV);
273     }
274   }
275 
276   // Step 3: BindedRetNode -> CleanedNodes
277   // If we can find a statement and a block in the inlined function, run remove
278   // dead bindings before returning from the call. This is important to ensure
279   // that we report the issues such as leaks in the stack contexts in which
280   // they occurred.
281   ExplodedNodeSet CleanedNodes;
282   if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
283     static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
284     PostStmt Loc(LastSt, calleeCtx, &retValBind);
285     bool isNew;
286     ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
287     BindedRetNode->addPredecessor(CEBNode, G);
288     if (!isNew)
289       return;
290 
291     NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
292     currBldrCtx = &Ctx;
293     // Here, we call the Symbol Reaper with 0 statement and callee location
294     // context, telling it to clean up everything in the callee's context
295     // (and its children). We use the callee's function body as a diagnostic
296     // statement, with which the program point will be associated.
297     removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
298                calleeCtx->getAnalysisDeclContext()->getBody(),
299                ProgramPoint::PostStmtPurgeDeadSymbolsKind);
300     currBldrCtx = nullptr;
301   } else {
302     CleanedNodes.Add(CEBNode);
303   }
304 
305   for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
306                                  E = CleanedNodes.end(); I != E; ++I) {
307 
308     // Step 4: Generate the CallExit and leave the callee's context.
309     // CleanedNodes -> CEENode
310     CallExitEnd Loc(calleeCtx, callerCtx);
311     bool isNew;
312     ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
313 
314     ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
315     CEENode->addPredecessor(*I, G);
316     if (!isNew)
317       return;
318 
319     // Step 5: Perform the post-condition check of the CallExpr and enqueue the
320     // result onto the work list.
321     // CEENode -> Dst -> WorkList
322     NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
323     SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
324         &Ctx);
325     SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
326 
327     CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
328 
329     ExplodedNodeSet DstPostCall;
330     if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
331       ExplodedNodeSet DstPostPostCallCallback;
332       getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
333                                                  CEENode, *UpdatedCall, *this,
334                                                  /*wasInlined=*/true);
335       for (ExplodedNode *I : DstPostPostCallCallback) {
336         getCheckerManager().runCheckersForNewAllocator(
337             cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
338             /*wasInlined=*/true);
339       }
340     } else {
341       getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
342                                                  *UpdatedCall, *this,
343                                                  /*wasInlined=*/true);
344     }
345     ExplodedNodeSet Dst;
346     if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
347       getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
348                                                         *this,
349                                                         /*wasInlined=*/true);
350     } else if (CE &&
351                !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
352                  AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
353       getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
354                                                  *this, /*wasInlined=*/true);
355     } else {
356       Dst.insert(DstPostCall);
357     }
358 
359     // Enqueue the next element in the block.
360     for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
361                                    PSI != PSE; ++PSI) {
362       Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
363                                     calleeCtx->getIndex()+1);
364     }
365   }
366 }
367 
isSmall(AnalysisDeclContext * ADC) const368 bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
369   // When there are no branches in the function, it means that there's no
370   // exponential complexity introduced by inlining such function.
371   // Such functions also don't trigger various fundamental problems
372   // with our inlining mechanism, such as the problem of
373   // inlined defensive checks. Hence isLinear().
374   const CFG *Cfg = ADC->getCFG();
375   return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
376 }
377 
isLarge(AnalysisDeclContext * ADC) const378 bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
379   const CFG *Cfg = ADC->getCFG();
380   return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
381 }
382 
isHuge(AnalysisDeclContext * ADC) const383 bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
384   const CFG *Cfg = ADC->getCFG();
385   return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
386 }
387 
examineStackFrames(const Decl * D,const LocationContext * LCtx,bool & IsRecursive,unsigned & StackDepth)388 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
389                                bool &IsRecursive, unsigned &StackDepth) {
390   IsRecursive = false;
391   StackDepth = 0;
392 
393   while (LCtx) {
394     if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
395       const Decl *DI = SFC->getDecl();
396 
397       // Mark recursive (and mutually recursive) functions and always count
398       // them when measuring the stack depth.
399       if (DI == D) {
400         IsRecursive = true;
401         ++StackDepth;
402         LCtx = LCtx->getParent();
403         continue;
404       }
405 
406       // Do not count the small functions when determining the stack depth.
407       AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
408       if (!isSmall(CalleeADC))
409         ++StackDepth;
410     }
411     LCtx = LCtx->getParent();
412   }
413 }
414 
415 // The GDM component containing the dynamic dispatch bifurcation info. When
416 // the exact type of the receiver is not known, we want to explore both paths -
417 // one on which we do inline it and the other one on which we don't. This is
418 // done to ensure we do not drop coverage.
419 // This is the map from the receiver region to a bool, specifying either we
420 // consider this region's information precise or not along the given path.
421 namespace {
422   enum DynamicDispatchMode {
423     DynamicDispatchModeInlined = 1,
424     DynamicDispatchModeConservative
425   };
426 } // end anonymous namespace
427 
REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,const MemRegion *,unsigned)428 REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
429                                const MemRegion *, unsigned)
430 
431 bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
432                             NodeBuilder &Bldr, ExplodedNode *Pred,
433                             ProgramStateRef State) {
434   assert(D);
435 
436   const LocationContext *CurLC = Pred->getLocationContext();
437   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
438   const LocationContext *ParentOfCallee = CallerSFC;
439   if (Call.getKind() == CE_Block &&
440       !cast<BlockCall>(Call).isConversionFromLambda()) {
441     const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
442     assert(BR && "If we have the block definition we should have its region");
443     AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
444     ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
445                                                          cast<BlockDecl>(D),
446                                                          BR);
447   }
448 
449   // This may be NULL, but that's fine.
450   const Expr *CallE = Call.getOriginExpr();
451 
452   // Construct a new stack frame for the callee.
453   AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
454   const StackFrameContext *CalleeSFC =
455       CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(),
456                                currBldrCtx->blockCount(), currStmtIdx);
457 
458   CallEnter Loc(CallE, CalleeSFC, CurLC);
459 
460   // Construct a new state which contains the mapping from actual to
461   // formal arguments.
462   State = State->enterStackFrame(Call, CalleeSFC);
463 
464   bool isNew;
465   if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
466     N->addPredecessor(Pred, G);
467     if (isNew)
468       Engine.getWorkList()->enqueue(N);
469   }
470 
471   // If we decided to inline the call, the successor has been manually
472   // added onto the work list so remove it from the node builder.
473   Bldr.takeNodes(Pred);
474 
475   NumInlinedCalls++;
476   Engine.FunctionSummaries->bumpNumTimesInlined(D);
477 
478   // Mark the decl as visited.
479   if (VisitedCallees)
480     VisitedCallees->insert(D);
481 
482   return true;
483 }
484 
getInlineFailedState(ProgramStateRef State,const Stmt * CallE)485 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
486                                             const Stmt *CallE) {
487   const void *ReplayState = State->get<ReplayWithoutInlining>();
488   if (!ReplayState)
489     return nullptr;
490 
491   assert(ReplayState == CallE && "Backtracked to the wrong call.");
492   (void)CallE;
493 
494   return State->remove<ReplayWithoutInlining>();
495 }
496 
VisitCallExpr(const CallExpr * CE,ExplodedNode * Pred,ExplodedNodeSet & dst)497 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
498                                ExplodedNodeSet &dst) {
499   // Perform the previsit of the CallExpr.
500   ExplodedNodeSet dstPreVisit;
501   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
502 
503   // Get the call in its initial state. We use this as a template to perform
504   // all the checks.
505   CallEventManager &CEMgr = getStateManager().getCallEventManager();
506   CallEventRef<> CallTemplate
507     = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
508 
509   // Evaluate the function call.  We try each of the checkers
510   // to see if the can evaluate the function call.
511   ExplodedNodeSet dstCallEvaluated;
512   for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
513        I != E; ++I) {
514     evalCall(dstCallEvaluated, *I, *CallTemplate);
515   }
516 
517   // Finally, perform the post-condition check of the CallExpr and store
518   // the created nodes in 'Dst'.
519   // Note that if the call was inlined, dstCallEvaluated will be empty.
520   // The post-CallExpr check will occur in processCallExit.
521   getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
522                                              *this);
523 }
524 
finishArgumentConstruction(ProgramStateRef State,const CallEvent & Call)525 ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
526                                                        const CallEvent &Call) {
527   const Expr *E = Call.getOriginExpr();
528   // FIXME: Constructors to placement arguments of operator new
529   // are not supported yet.
530   if (!E || isa<CXXNewExpr>(E))
531     return State;
532 
533   const LocationContext *LC = Call.getLocationContext();
534   for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
535     unsigned I = Call.getASTArgumentIndex(CallI);
536     if (Optional<SVal> V =
537             getObjectUnderConstruction(State, {E, I}, LC)) {
538       SVal VV = *V;
539       (void)VV;
540       assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
541                  ->getStackFrame()->getParent()
542                  ->getStackFrame() == LC->getStackFrame());
543       State = finishObjectConstruction(State, {E, I}, LC);
544     }
545   }
546 
547   return State;
548 }
549 
finishArgumentConstruction(ExplodedNodeSet & Dst,ExplodedNode * Pred,const CallEvent & Call)550 void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
551                                             ExplodedNode *Pred,
552                                             const CallEvent &Call) {
553   ProgramStateRef State = Pred->getState();
554   ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
555   if (CleanedState == State) {
556     Dst.insert(Pred);
557     return;
558   }
559 
560   const Expr *E = Call.getOriginExpr();
561   const LocationContext *LC = Call.getLocationContext();
562   NodeBuilder B(Pred, Dst, *currBldrCtx);
563   static SimpleProgramPointTag Tag("ExprEngine",
564                                    "Finish argument construction");
565   PreStmt PP(E, LC, &Tag);
566   B.generateNode(PP, CleanedState, Pred);
567 }
568 
evalCall(ExplodedNodeSet & Dst,ExplodedNode * Pred,const CallEvent & Call)569 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
570                           const CallEvent &Call) {
571   // WARNING: At this time, the state attached to 'Call' may be older than the
572   // state in 'Pred'. This is a minor optimization since CheckerManager will
573   // use an updated CallEvent instance when calling checkers, but if 'Call' is
574   // ever used directly in this function all callers should be updated to pass
575   // the most recent state. (It is probably not worth doing the work here since
576   // for some callers this will not be necessary.)
577 
578   // Run any pre-call checks using the generic call interface.
579   ExplodedNodeSet dstPreVisit;
580   getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
581                                             Call, *this);
582 
583   // Actually evaluate the function call.  We try each of the checkers
584   // to see if the can evaluate the function call, and get a callback at
585   // defaultEvalCall if all of them fail.
586   ExplodedNodeSet dstCallEvaluated;
587   getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
588                                              Call, *this, EvalCallOptions());
589 
590   // If there were other constructors called for object-type arguments
591   // of this call, clean them up.
592   ExplodedNodeSet dstArgumentCleanup;
593   for (ExplodedNode *I : dstCallEvaluated)
594     finishArgumentConstruction(dstArgumentCleanup, I, Call);
595 
596   ExplodedNodeSet dstPostCall;
597   getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
598                                              Call, *this);
599 
600   // Escaping symbols conjured during invalidating the regions above.
601   // Note that, for inlined calls the nodes were put back into the worklist,
602   // so we can assume that every node belongs to a conservative call at this
603   // point.
604 
605   // Run pointerEscape callback with the newly conjured symbols.
606   SmallVector<std::pair<SVal, SVal>, 8> Escaped;
607   for (ExplodedNode *I : dstPostCall) {
608     NodeBuilder B(I, Dst, *currBldrCtx);
609     ProgramStateRef State = I->getState();
610     Escaped.clear();
611     {
612       unsigned Arg = -1;
613       for (const ParmVarDecl *PVD : Call.parameters()) {
614         ++Arg;
615         QualType ParamTy = PVD->getType();
616         if (ParamTy.isNull() ||
617             (!ParamTy->isPointerType() && !ParamTy->isReferenceType()))
618           continue;
619         QualType Pointee = ParamTy->getPointeeType();
620         if (Pointee.isConstQualified() || Pointee->isVoidType())
621           continue;
622         if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
623           Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
624       }
625     }
626 
627     State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
628                                         PSK_EscapeOutParameters, &Call);
629 
630     if (State == I->getState())
631       Dst.insert(I);
632     else
633       B.generateNode(I->getLocation(), State, I);
634   }
635 }
636 
bindReturnValue(const CallEvent & Call,const LocationContext * LCtx,ProgramStateRef State)637 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
638                                             const LocationContext *LCtx,
639                                             ProgramStateRef State) {
640   const Expr *E = Call.getOriginExpr();
641   if (!E)
642     return State;
643 
644   // Some method families have known return values.
645   if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
646     switch (Msg->getMethodFamily()) {
647     default:
648       break;
649     case OMF_autorelease:
650     case OMF_retain:
651     case OMF_self: {
652       // These methods return their receivers.
653       return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
654     }
655     }
656   } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
657     SVal ThisV = C->getCXXThisVal();
658     ThisV = State->getSVal(ThisV.castAs<Loc>());
659     return State->BindExpr(E, LCtx, ThisV);
660   }
661 
662   SVal R;
663   QualType ResultTy = Call.getResultType();
664   unsigned Count = currBldrCtx->blockCount();
665   if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
666     // Conjure a temporary if the function returns an object by value.
667     SVal Target;
668     assert(RTC->getStmt() == Call.getOriginExpr());
669     EvalCallOptions CallOpts; // FIXME: We won't really need those.
670     std::tie(State, Target) =
671         handleConstructionContext(Call.getOriginExpr(), State, LCtx,
672                                   RTC->getConstructionContext(), CallOpts);
673     const MemRegion *TargetR = Target.getAsRegion();
674     assert(TargetR);
675     // Invalidate the region so that it didn't look uninitialized. If this is
676     // a field or element constructor, we do not want to invalidate
677     // the whole structure. Pointer escape is meaningless because
678     // the structure is a product of conservative evaluation
679     // and therefore contains nothing interesting at this point.
680     RegionAndSymbolInvalidationTraits ITraits;
681     ITraits.setTrait(TargetR,
682         RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
683     State = State->invalidateRegions(TargetR, E, Count, LCtx,
684                                      /* CausesPointerEscape=*/false, nullptr,
685                                      &Call, &ITraits);
686 
687     R = State->getSVal(Target.castAs<Loc>(), E->getType());
688   } else {
689     // Conjure a symbol if the return value is unknown.
690 
691     // See if we need to conjure a heap pointer instead of
692     // a regular unknown pointer.
693     const auto *CNE = dyn_cast<CXXNewExpr>(E);
694     if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
695       R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count);
696       const MemRegion *MR = R.getAsRegion()->StripCasts();
697 
698       // Store the extent of the allocated object(s).
699       SVal ElementCount;
700       if (const Expr *SizeExpr = CNE->getArraySize().getValueOr(nullptr)) {
701         ElementCount = State->getSVal(SizeExpr, LCtx);
702       } else {
703         ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
704       }
705 
706       SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder);
707 
708       SVal Size =
709           svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize,
710                                 svalBuilder.getArrayIndexType());
711 
712       State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(),
713                                svalBuilder);
714     } else {
715       R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
716     }
717   }
718   return State->BindExpr(E, LCtx, R);
719 }
720 
721 // Conservatively evaluate call by invalidating regions and binding
722 // a conjured return value.
conservativeEvalCall(const CallEvent & Call,NodeBuilder & Bldr,ExplodedNode * Pred,ProgramStateRef State)723 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
724                                       ExplodedNode *Pred, ProgramStateRef State) {
725   State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
726   State = bindReturnValue(Call, Pred->getLocationContext(), State);
727 
728   // And make the result node.
729   Bldr.generateNode(Call.getProgramPoint(), State, Pred);
730 }
731 
732 ExprEngine::CallInlinePolicy
mayInlineCallKind(const CallEvent & Call,const ExplodedNode * Pred,AnalyzerOptions & Opts,const EvalCallOptions & CallOpts)733 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
734                               AnalyzerOptions &Opts,
735                               const EvalCallOptions &CallOpts) {
736   const LocationContext *CurLC = Pred->getLocationContext();
737   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
738   switch (Call.getKind()) {
739   case CE_Function:
740   case CE_Block:
741     break;
742   case CE_CXXMember:
743   case CE_CXXMemberOperator:
744     if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
745       return CIP_DisallowedAlways;
746     break;
747   case CE_CXXConstructor: {
748     if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
749       return CIP_DisallowedAlways;
750 
751     const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
752 
753     const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
754 
755     auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
756     const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
757                                         : nullptr;
758 
759     if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
760         !Opts.MayInlineCXXAllocator)
761       return CIP_DisallowedOnce;
762 
763     // FIXME: We don't handle constructors or destructors for arrays properly.
764     // Even once we do, we still need to be careful about implicitly-generated
765     // initializers for array fields in default move/copy constructors.
766     // We still allow construction into ElementRegion targets when they don't
767     // represent array elements.
768     if (CallOpts.IsArrayCtorOrDtor)
769       return CIP_DisallowedOnce;
770 
771     // Inlining constructors requires including initializers in the CFG.
772     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
773     assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
774     (void)ADC;
775 
776     // If the destructor is trivial, it's always safe to inline the constructor.
777     if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
778       break;
779 
780     // For other types, only inline constructors if destructor inlining is
781     // also enabled.
782     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
783       return CIP_DisallowedAlways;
784 
785     if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) {
786       // If we don't handle temporary destructors, we shouldn't inline
787       // their constructors.
788       if (CallOpts.IsTemporaryCtorOrDtor &&
789           !Opts.ShouldIncludeTemporaryDtorsInCFG)
790         return CIP_DisallowedOnce;
791 
792       // If we did not find the correct this-region, it would be pointless
793       // to inline the constructor. Instead we will simply invalidate
794       // the fake temporary target.
795       if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
796         return CIP_DisallowedOnce;
797 
798       // If the temporary is lifetime-extended by binding it to a reference-type
799       // field within an aggregate, automatic destructors don't work properly.
800       if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
801         return CIP_DisallowedOnce;
802     }
803 
804     break;
805   }
806   case CE_CXXInheritedConstructor: {
807     // This doesn't really increase the cost of inlining ever, because
808     // the stack frame of the inherited constructor is trivial.
809     return CIP_Allowed;
810   }
811   case CE_CXXDestructor: {
812     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
813       return CIP_DisallowedAlways;
814 
815     // Inlining destructors requires building the CFG correctly.
816     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
817     assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
818     (void)ADC;
819 
820     // FIXME: We don't handle constructors or destructors for arrays properly.
821     if (CallOpts.IsArrayCtorOrDtor)
822       return CIP_DisallowedOnce;
823 
824     // Allow disabling temporary destructor inlining with a separate option.
825     if (CallOpts.IsTemporaryCtorOrDtor &&
826         !Opts.MayInlineCXXTemporaryDtors)
827       return CIP_DisallowedOnce;
828 
829     // If we did not find the correct this-region, it would be pointless
830     // to inline the destructor. Instead we will simply invalidate
831     // the fake temporary target.
832     if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
833       return CIP_DisallowedOnce;
834     break;
835   }
836   case CE_CXXDeallocator:
837     LLVM_FALLTHROUGH;
838   case CE_CXXAllocator:
839     if (Opts.MayInlineCXXAllocator)
840       break;
841     // Do not inline allocators until we model deallocators.
842     // This is unfortunate, but basically necessary for smart pointers and such.
843     return CIP_DisallowedAlways;
844   case CE_ObjCMessage:
845     if (!Opts.MayInlineObjCMethod)
846       return CIP_DisallowedAlways;
847     if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
848           Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
849       return CIP_DisallowedAlways;
850     break;
851   }
852 
853   return CIP_Allowed;
854 }
855 
856 /// Returns true if the given C++ class contains a member with the given name.
hasMember(const ASTContext & Ctx,const CXXRecordDecl * RD,StringRef Name)857 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
858                       StringRef Name) {
859   const IdentifierInfo &II = Ctx.Idents.get(Name);
860   return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II));
861 }
862 
863 /// Returns true if the given C++ class is a container or iterator.
864 ///
865 /// Our heuristic for this is whether it contains a method named 'begin()' or a
866 /// nested type named 'iterator' or 'iterator_category'.
isContainerClass(const ASTContext & Ctx,const CXXRecordDecl * RD)867 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
868   return hasMember(Ctx, RD, "begin") ||
869          hasMember(Ctx, RD, "iterator") ||
870          hasMember(Ctx, RD, "iterator_category");
871 }
872 
873 /// Returns true if the given function refers to a method of a C++ container
874 /// or iterator.
875 ///
876 /// We generally do a poor job modeling most containers right now, and might
877 /// prefer not to inline their methods.
isContainerMethod(const ASTContext & Ctx,const FunctionDecl * FD)878 static bool isContainerMethod(const ASTContext &Ctx,
879                               const FunctionDecl *FD) {
880   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
881     return isContainerClass(Ctx, MD->getParent());
882   return false;
883 }
884 
885 /// Returns true if the given function is the destructor of a class named
886 /// "shared_ptr".
isCXXSharedPtrDtor(const FunctionDecl * FD)887 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
888   const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
889   if (!Dtor)
890     return false;
891 
892   const CXXRecordDecl *RD = Dtor->getParent();
893   if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
894     if (II->isStr("shared_ptr"))
895         return true;
896 
897   return false;
898 }
899 
900 /// Returns true if the function in \p CalleeADC may be inlined in general.
901 ///
902 /// This checks static properties of the function, such as its signature and
903 /// CFG, to determine whether the analyzer should ever consider inlining it,
904 /// in any context.
mayInlineDecl(AnalysisDeclContext * CalleeADC) const905 bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
906   AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
907   // FIXME: Do not inline variadic calls.
908   if (CallEvent::isVariadic(CalleeADC->getDecl()))
909     return false;
910 
911   // Check certain C++-related inlining policies.
912   ASTContext &Ctx = CalleeADC->getASTContext();
913   if (Ctx.getLangOpts().CPlusPlus) {
914     if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
915       // Conditionally control the inlining of template functions.
916       if (!Opts.MayInlineTemplateFunctions)
917         if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
918           return false;
919 
920       // Conditionally control the inlining of C++ standard library functions.
921       if (!Opts.MayInlineCXXStandardLibrary)
922         if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
923           if (AnalysisDeclContext::isInStdNamespace(FD))
924             return false;
925 
926       // Conditionally control the inlining of methods on objects that look
927       // like C++ containers.
928       if (!Opts.MayInlineCXXContainerMethods)
929         if (!AMgr.isInCodeFile(FD->getLocation()))
930           if (isContainerMethod(Ctx, FD))
931             return false;
932 
933       // Conditionally control the inlining of the destructor of C++ shared_ptr.
934       // We don't currently do a good job modeling shared_ptr because we can't
935       // see the reference count, so treating as opaque is probably the best
936       // idea.
937       if (!Opts.MayInlineCXXSharedPtrDtor)
938         if (isCXXSharedPtrDtor(FD))
939           return false;
940     }
941   }
942 
943   // It is possible that the CFG cannot be constructed.
944   // Be safe, and check if the CalleeCFG is valid.
945   const CFG *CalleeCFG = CalleeADC->getCFG();
946   if (!CalleeCFG)
947     return false;
948 
949   // Do not inline large functions.
950   if (isHuge(CalleeADC))
951     return false;
952 
953   // It is possible that the live variables analysis cannot be
954   // run.  If so, bail out.
955   if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
956     return false;
957 
958   return true;
959 }
960 
shouldInlineCall(const CallEvent & Call,const Decl * D,const ExplodedNode * Pred,const EvalCallOptions & CallOpts)961 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
962                                   const ExplodedNode *Pred,
963                                   const EvalCallOptions &CallOpts) {
964   if (!D)
965     return false;
966 
967   AnalysisManager &AMgr = getAnalysisManager();
968   AnalyzerOptions &Opts = AMgr.options;
969   AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
970   AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
971 
972   // The auto-synthesized bodies are essential to inline as they are
973   // usually small and commonly used. Note: we should do this check early on to
974   // ensure we always inline these calls.
975   if (CalleeADC->isBodyAutosynthesized())
976     return true;
977 
978   if (!AMgr.shouldInlineCall())
979     return false;
980 
981   // Check if this function has been marked as non-inlinable.
982   Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
983   if (MayInline.hasValue()) {
984     if (!MayInline.getValue())
985       return false;
986 
987   } else {
988     // We haven't actually checked the static properties of this function yet.
989     // Do that now, and record our decision in the function summaries.
990     if (mayInlineDecl(CalleeADC)) {
991       Engine.FunctionSummaries->markMayInline(D);
992     } else {
993       Engine.FunctionSummaries->markShouldNotInline(D);
994       return false;
995     }
996   }
997 
998   // Check if we should inline a call based on its kind.
999   // FIXME: this checks both static and dynamic properties of the call, which
1000   // means we're redoing a bit of work that could be cached in the function
1001   // summary.
1002   CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
1003   if (CIP != CIP_Allowed) {
1004     if (CIP == CIP_DisallowedAlways) {
1005       assert(!MayInline.hasValue() || MayInline.getValue());
1006       Engine.FunctionSummaries->markShouldNotInline(D);
1007     }
1008     return false;
1009   }
1010 
1011   // Do not inline if recursive or we've reached max stack frame count.
1012   bool IsRecursive = false;
1013   unsigned StackDepth = 0;
1014   examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
1015   if ((StackDepth >= Opts.InlineMaxStackDepth) &&
1016       (!isSmall(CalleeADC) || IsRecursive))
1017     return false;
1018 
1019   // Do not inline large functions too many times.
1020   if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
1021        Opts.MaxTimesInlineLarge) &&
1022       isLarge(CalleeADC)) {
1023     NumReachedInlineCountMax++;
1024     return false;
1025   }
1026 
1027   if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
1028     return false;
1029 
1030   return true;
1031 }
1032 
isTrivialObjectAssignment(const CallEvent & Call)1033 static bool isTrivialObjectAssignment(const CallEvent &Call) {
1034   const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
1035   if (!ICall)
1036     return false;
1037 
1038   const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
1039   if (!MD)
1040     return false;
1041   if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
1042     return false;
1043 
1044   return MD->isTrivial();
1045 }
1046 
defaultEvalCall(NodeBuilder & Bldr,ExplodedNode * Pred,const CallEvent & CallTemplate,const EvalCallOptions & CallOpts)1047 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
1048                                  const CallEvent &CallTemplate,
1049                                  const EvalCallOptions &CallOpts) {
1050   // Make sure we have the most recent state attached to the call.
1051   ProgramStateRef State = Pred->getState();
1052   CallEventRef<> Call = CallTemplate.cloneWithState(State);
1053 
1054   // Special-case trivial assignment operators.
1055   if (isTrivialObjectAssignment(*Call)) {
1056     performTrivialCopy(Bldr, Pred, *Call);
1057     return;
1058   }
1059 
1060   // Try to inline the call.
1061   // The origin expression here is just used as a kind of checksum;
1062   // this should still be safe even for CallEvents that don't come from exprs.
1063   const Expr *E = Call->getOriginExpr();
1064 
1065   ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
1066   if (InlinedFailedState) {
1067     // If we already tried once and failed, make sure we don't retry later.
1068     State = InlinedFailedState;
1069   } else {
1070     RuntimeDefinition RD = Call->getRuntimeDefinition();
1071     const Decl *D = RD.getDecl();
1072     if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
1073       if (RD.mayHaveOtherDefinitions()) {
1074         AnalyzerOptions &Options = getAnalysisManager().options;
1075 
1076         // Explore with and without inlining the call.
1077         if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
1078           BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
1079           return;
1080         }
1081 
1082         // Don't inline if we're not in any dynamic dispatch mode.
1083         if (Options.getIPAMode() != IPAK_DynamicDispatch) {
1084           conservativeEvalCall(*Call, Bldr, Pred, State);
1085           return;
1086         }
1087       }
1088 
1089       // We are not bifurcating and we do have a Decl, so just inline.
1090       if (inlineCall(*Call, D, Bldr, Pred, State))
1091         return;
1092     }
1093   }
1094 
1095   // If we can't inline it, handle the return value and invalidate the regions.
1096   conservativeEvalCall(*Call, Bldr, Pred, State);
1097 }
1098 
BifurcateCall(const MemRegion * BifurReg,const CallEvent & Call,const Decl * D,NodeBuilder & Bldr,ExplodedNode * Pred)1099 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
1100                                const CallEvent &Call, const Decl *D,
1101                                NodeBuilder &Bldr, ExplodedNode *Pred) {
1102   assert(BifurReg);
1103   BifurReg = BifurReg->StripCasts();
1104 
1105   // Check if we've performed the split already - note, we only want
1106   // to split the path once per memory region.
1107   ProgramStateRef State = Pred->getState();
1108   const unsigned *BState =
1109                         State->get<DynamicDispatchBifurcationMap>(BifurReg);
1110   if (BState) {
1111     // If we are on "inline path", keep inlining if possible.
1112     if (*BState == DynamicDispatchModeInlined)
1113       if (inlineCall(Call, D, Bldr, Pred, State))
1114         return;
1115     // If inline failed, or we are on the path where we assume we
1116     // don't have enough info about the receiver to inline, conjure the
1117     // return value and invalidate the regions.
1118     conservativeEvalCall(Call, Bldr, Pred, State);
1119     return;
1120   }
1121 
1122   // If we got here, this is the first time we process a message to this
1123   // region, so split the path.
1124   ProgramStateRef IState =
1125       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1126                                                DynamicDispatchModeInlined);
1127   inlineCall(Call, D, Bldr, Pred, IState);
1128 
1129   ProgramStateRef NoIState =
1130       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1131                                                DynamicDispatchModeConservative);
1132   conservativeEvalCall(Call, Bldr, Pred, NoIState);
1133 
1134   NumOfDynamicDispatchPathSplits++;
1135 }
1136 
VisitReturnStmt(const ReturnStmt * RS,ExplodedNode * Pred,ExplodedNodeSet & Dst)1137 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1138                                  ExplodedNodeSet &Dst) {
1139   ExplodedNodeSet dstPreVisit;
1140   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1141 
1142   StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1143 
1144   if (RS->getRetValue()) {
1145     for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1146                                   ei = dstPreVisit.end(); it != ei; ++it) {
1147       B.generateNode(RS, *it, (*it)->getState());
1148     }
1149   }
1150 }
1151