1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  This file defines ExprEngine's support for calls and returns.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PrettyStackTraceLocationContext.h"
14 #include "clang/AST/CXXInheritance.h"
15 #include "clang/AST/Decl.h"
16 #include "clang/AST/DeclCXX.h"
17 #include "clang/Analysis/Analyses/LiveVariables.h"
18 #include "clang/Analysis/ConstructionContext.h"
19 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
20 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
21 #include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
22 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Support/Casting.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/SaveAndRestore.h"
28 #include <optional>
29 
30 using namespace clang;
31 using namespace ento;
32 
33 #define DEBUG_TYPE "ExprEngine"
34 
35 STATISTIC(NumOfDynamicDispatchPathSplits,
36   "The # of times we split the path due to imprecise dynamic dispatch info");
37 
38 STATISTIC(NumInlinedCalls,
39   "The # of times we inlined a call");
40 
41 STATISTIC(NumReachedInlineCountMax,
42   "The # of times we reached inline count maximum");
43 
44 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
45                                   ExplodedNode *Pred) {
46   // Get the entry block in the CFG of the callee.
47   const StackFrameContext *calleeCtx = CE.getCalleeContext();
48   PrettyStackTraceLocationContext CrashInfo(calleeCtx);
49   const CFGBlock *Entry = CE.getEntry();
50 
51   // Validate the CFG.
52   assert(Entry->empty());
53   assert(Entry->succ_size() == 1);
54 
55   // Get the solitary successor.
56   const CFGBlock *Succ = *(Entry->succ_begin());
57 
58   // Construct an edge representing the starting location in the callee.
59   BlockEdge Loc(Entry, Succ, calleeCtx);
60 
61   ProgramStateRef state = Pred->getState();
62 
63   // Construct a new node, notify checkers that analysis of the function has
64   // begun, and add the resultant nodes to the worklist.
65   bool isNew;
66   ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
67   Node->addPredecessor(Pred, G);
68   if (isNew) {
69     ExplodedNodeSet DstBegin;
70     processBeginOfFunction(BC, Node, DstBegin, Loc);
71     Engine.enqueue(DstBegin);
72   }
73 }
74 
75 // Find the last statement on the path to the exploded node and the
76 // corresponding Block.
77 static std::pair<const Stmt*,
78                  const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
79   const Stmt *S = nullptr;
80   const CFGBlock *Blk = nullptr;
81   const StackFrameContext *SF = Node->getStackFrame();
82 
83   // Back up through the ExplodedGraph until we reach a statement node in this
84   // stack frame.
85   while (Node) {
86     const ProgramPoint &PP = Node->getLocation();
87 
88     if (PP.getStackFrame() == SF) {
89       if (std::optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
90         S = SP->getStmt();
91         break;
92       } else if (std::optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
93         S = CEE->getCalleeContext()->getCallSite();
94         if (S)
95           break;
96 
97         // If there is no statement, this is an implicitly-generated call.
98         // We'll walk backwards over it and then continue the loop to find
99         // an actual statement.
100         std::optional<CallEnter> CE;
101         do {
102           Node = Node->getFirstPred();
103           CE = Node->getLocationAs<CallEnter>();
104         } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
105 
106         // Continue searching the graph.
107       } else if (std::optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
108         Blk = BE->getSrc();
109       }
110     } else if (std::optional<CallEnter> CE = PP.getAs<CallEnter>()) {
111       // If we reached the CallEnter for this function, it has no statements.
112       if (CE->getCalleeContext() == SF)
113         break;
114     }
115 
116     if (Node->pred_empty())
117       return std::make_pair(nullptr, nullptr);
118 
119     Node = *Node->pred_begin();
120   }
121 
122   return std::make_pair(S, Blk);
123 }
124 
125 /// Adjusts a return value when the called function's return type does not
126 /// match the caller's expression type. This can happen when a dynamic call
127 /// is devirtualized, and the overriding method has a covariant (more specific)
128 /// return type than the parent's method. For C++ objects, this means we need
129 /// to add base casts.
130 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
131                               StoreManager &StoreMgr) {
132   // For now, the only adjustments we handle apply only to locations.
133   if (!isa<Loc>(V))
134     return V;
135 
136   // If the types already match, don't do any unnecessary work.
137   ExpectedTy = ExpectedTy.getCanonicalType();
138   ActualTy = ActualTy.getCanonicalType();
139   if (ExpectedTy == ActualTy)
140     return V;
141 
142   // No adjustment is needed between Objective-C pointer types.
143   if (ExpectedTy->isObjCObjectPointerType() &&
144       ActualTy->isObjCObjectPointerType())
145     return V;
146 
147   // C++ object pointers may need "derived-to-base" casts.
148   const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
149   const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
150   if (ExpectedClass && ActualClass) {
151     CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
152                        /*DetectVirtual=*/false);
153     if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
154         !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
155       return StoreMgr.evalDerivedToBase(V, Paths.front());
156     }
157   }
158 
159   // Unfortunately, Objective-C does not enforce that overridden methods have
160   // covariant return types, so we can't assert that that never happens.
161   // Be safe and return UnknownVal().
162   return UnknownVal();
163 }
164 
165 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
166                                            ExplodedNode *Pred,
167                                            ExplodedNodeSet &Dst) {
168   // Find the last statement in the function and the corresponding basic block.
169   const Stmt *LastSt = nullptr;
170   const CFGBlock *Blk = nullptr;
171   std::tie(LastSt, Blk) = getLastStmt(Pred);
172   if (!Blk || !LastSt) {
173     Dst.Add(Pred);
174     return;
175   }
176 
177   // Here, we destroy the current location context. We use the current
178   // function's entire body as a diagnostic statement, with which the program
179   // point will be associated. However, we only want to use LastStmt as a
180   // reference for what to clean up if it's a ReturnStmt; otherwise, everything
181   // is dead.
182   SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
183   const LocationContext *LCtx = Pred->getLocationContext();
184   removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
185              LCtx->getAnalysisDeclContext()->getBody(),
186              ProgramPoint::PostStmtPurgeDeadSymbolsKind);
187 }
188 
189 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
190     const StackFrameContext *calleeCtx) {
191   const Decl *RuntimeCallee = calleeCtx->getDecl();
192   const Decl *StaticDecl = Call->getDecl();
193   assert(RuntimeCallee);
194   if (!StaticDecl)
195     return true;
196   return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
197 }
198 
199 // Returns the number of elements in the array currently being destructed.
200 // If the element count is not found 0 will be returned.
201 static unsigned getElementCountOfArrayBeingDestructed(
202     const CallEvent &Call, const ProgramStateRef State, SValBuilder &SVB) {
203   assert(isa<CXXDestructorCall>(Call) &&
204          "The call event is not a destructor call!");
205 
206   const auto &DtorCall = cast<CXXDestructorCall>(Call);
207 
208   auto ThisVal = DtorCall.getCXXThisVal();
209 
210   if (auto ThisElementRegion = dyn_cast<ElementRegion>(ThisVal.getAsRegion())) {
211     auto ArrayRegion = ThisElementRegion->getAsArrayOffset().getRegion();
212     auto ElementType = ThisElementRegion->getElementType();
213 
214     auto ElementCount =
215         getDynamicElementCount(State, ArrayRegion, SVB, ElementType);
216 
217     if (!ElementCount.isConstant())
218       return 0;
219 
220     return ElementCount.getAsInteger()->getLimitedValue();
221   }
222 
223   return 0;
224 }
225 
226 ProgramStateRef ExprEngine::removeStateTraitsUsedForArrayEvaluation(
227     ProgramStateRef State, const CXXConstructExpr *E,
228     const LocationContext *LCtx) {
229 
230   assert(LCtx && "Location context must be provided!");
231 
232   if (E) {
233     if (getPendingInitLoop(State, E, LCtx))
234       State = removePendingInitLoop(State, E, LCtx);
235 
236     if (getIndexOfElementToConstruct(State, E, LCtx))
237       State = removeIndexOfElementToConstruct(State, E, LCtx);
238   }
239 
240   if (getPendingArrayDestruction(State, LCtx))
241     State = removePendingArrayDestruction(State, LCtx);
242 
243   return State;
244 }
245 
246 /// The call exit is simulated with a sequence of nodes, which occur between
247 /// CallExitBegin and CallExitEnd. The following operations occur between the
248 /// two program points:
249 /// 1. CallExitBegin (triggers the start of call exit sequence)
250 /// 2. Bind the return value
251 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
252 /// 4. CallExitEnd (switch to the caller context)
253 /// 5. PostStmt<CallExpr>
254 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
255   // Step 1 CEBNode was generated before the call.
256   PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
257   const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
258 
259   // The parent context might not be a stack frame, so make sure we
260   // look up the first enclosing stack frame.
261   const StackFrameContext *callerCtx =
262     calleeCtx->getParent()->getStackFrame();
263 
264   const Stmt *CE = calleeCtx->getCallSite();
265   ProgramStateRef state = CEBNode->getState();
266   // Find the last statement in the function and the corresponding basic block.
267   const Stmt *LastSt = nullptr;
268   const CFGBlock *Blk = nullptr;
269   std::tie(LastSt, Blk) = getLastStmt(CEBNode);
270 
271   // Generate a CallEvent /before/ cleaning the state, so that we can get the
272   // correct value for 'this' (if necessary).
273   CallEventManager &CEMgr = getStateManager().getCallEventManager();
274   CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
275 
276   // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
277 
278   // If this variable is set to 'true' the analyzer will evaluate the call
279   // statement we are about to exit again, instead of continuing the execution
280   // from the statement after the call. This is useful for non-POD type array
281   // construction where the CXXConstructExpr is referenced only once in the CFG,
282   // but we want to evaluate it as many times as many elements the array has.
283   bool ShouldRepeatCall = false;
284 
285   if (const auto *DtorDecl =
286           dyn_cast_or_null<CXXDestructorDecl>(Call->getDecl())) {
287     if (auto Idx = getPendingArrayDestruction(state, callerCtx)) {
288       ShouldRepeatCall = *Idx > 0;
289 
290       auto ThisVal = svalBuilder.getCXXThis(DtorDecl->getParent(), calleeCtx);
291       state = state->killBinding(ThisVal);
292     }
293   }
294 
295   // If the callee returns an expression, bind its value to CallExpr.
296   if (CE) {
297     if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
298       const LocationContext *LCtx = CEBNode->getLocationContext();
299       SVal V = state->getSVal(RS, LCtx);
300 
301       // Ensure that the return type matches the type of the returned Expr.
302       if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
303         QualType ReturnedTy =
304           CallEvent::getDeclaredResultType(calleeCtx->getDecl());
305         if (!ReturnedTy.isNull()) {
306           if (const Expr *Ex = dyn_cast<Expr>(CE)) {
307             V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
308                                   getStoreManager());
309           }
310         }
311       }
312 
313       state = state->BindExpr(CE, callerCtx, V);
314     }
315 
316     // Bind the constructed object value to CXXConstructExpr.
317     if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
318       loc::MemRegionVal This =
319         svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
320       SVal ThisV = state->getSVal(This);
321       ThisV = state->getSVal(ThisV.castAs<Loc>());
322       state = state->BindExpr(CCE, callerCtx, ThisV);
323 
324       ShouldRepeatCall = shouldRepeatCtorCall(state, CCE, callerCtx);
325     }
326 
327     if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
328       // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
329       // while to reach the actual CXXNewExpr element from here, so keep the
330       // region for later use.
331       // Additionally cast the return value of the inlined operator new
332       // (which is of type 'void *') to the correct object type.
333       SVal AllocV = state->getSVal(CNE, callerCtx);
334       AllocV = svalBuilder.evalCast(
335           AllocV, CNE->getType(),
336           getContext().getPointerType(getContext().VoidTy));
337 
338       state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
339                                          AllocV);
340     }
341   }
342 
343   if (!ShouldRepeatCall) {
344     state = removeStateTraitsUsedForArrayEvaluation(
345         state, dyn_cast_or_null<CXXConstructExpr>(CE), callerCtx);
346   }
347 
348   // Step 3: BindedRetNode -> CleanedNodes
349   // If we can find a statement and a block in the inlined function, run remove
350   // dead bindings before returning from the call. This is important to ensure
351   // that we report the issues such as leaks in the stack contexts in which
352   // they occurred.
353   ExplodedNodeSet CleanedNodes;
354   if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
355     static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
356     PostStmt Loc(LastSt, calleeCtx, &retValBind);
357     bool isNew;
358     ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
359     BindedRetNode->addPredecessor(CEBNode, G);
360     if (!isNew)
361       return;
362 
363     NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
364     currBldrCtx = &Ctx;
365     // Here, we call the Symbol Reaper with 0 statement and callee location
366     // context, telling it to clean up everything in the callee's context
367     // (and its children). We use the callee's function body as a diagnostic
368     // statement, with which the program point will be associated.
369     removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
370                calleeCtx->getAnalysisDeclContext()->getBody(),
371                ProgramPoint::PostStmtPurgeDeadSymbolsKind);
372     currBldrCtx = nullptr;
373   } else {
374     CleanedNodes.Add(CEBNode);
375   }
376 
377   for (ExplodedNode *N : CleanedNodes) {
378     // Step 4: Generate the CallExit and leave the callee's context.
379     // CleanedNodes -> CEENode
380     CallExitEnd Loc(calleeCtx, callerCtx);
381     bool isNew;
382     ProgramStateRef CEEState = (N == CEBNode) ? state : N->getState();
383 
384     ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
385     CEENode->addPredecessor(N, G);
386     if (!isNew)
387       return;
388 
389     // Step 5: Perform the post-condition check of the CallExpr and enqueue the
390     // result onto the work list.
391     // CEENode -> Dst -> WorkList
392     NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
393     SaveAndRestore<const NodeBuilderContext *> NBCSave(currBldrCtx, &Ctx);
394     SaveAndRestore CBISave(currStmtIdx, calleeCtx->getIndex());
395 
396     CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
397 
398     ExplodedNodeSet DstPostCall;
399     if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
400       ExplodedNodeSet DstPostPostCallCallback;
401       getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
402                                                  CEENode, *UpdatedCall, *this,
403                                                  /*wasInlined=*/true);
404       for (ExplodedNode *I : DstPostPostCallCallback) {
405         getCheckerManager().runCheckersForNewAllocator(
406             cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
407             /*wasInlined=*/true);
408       }
409     } else {
410       getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
411                                                  *UpdatedCall, *this,
412                                                  /*wasInlined=*/true);
413     }
414     ExplodedNodeSet Dst;
415     if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
416       getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
417                                                         *this,
418                                                         /*wasInlined=*/true);
419     } else if (CE &&
420                !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
421                  AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
422       getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
423                                                  *this, /*wasInlined=*/true);
424     } else {
425       Dst.insert(DstPostCall);
426     }
427 
428     // Enqueue the next element in the block.
429     for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
430          PSI != PSE; ++PSI) {
431       unsigned Idx = calleeCtx->getIndex() + (ShouldRepeatCall ? 0 : 1);
432 
433       Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), Idx);
434     }
435   }
436 }
437 
438 bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
439   // When there are no branches in the function, it means that there's no
440   // exponential complexity introduced by inlining such function.
441   // Such functions also don't trigger various fundamental problems
442   // with our inlining mechanism, such as the problem of
443   // inlined defensive checks. Hence isLinear().
444   const CFG *Cfg = ADC->getCFG();
445   return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
446 }
447 
448 bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
449   const CFG *Cfg = ADC->getCFG();
450   return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
451 }
452 
453 bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
454   const CFG *Cfg = ADC->getCFG();
455   return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
456 }
457 
458 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
459                                bool &IsRecursive, unsigned &StackDepth) {
460   IsRecursive = false;
461   StackDepth = 0;
462 
463   while (LCtx) {
464     if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
465       const Decl *DI = SFC->getDecl();
466 
467       // Mark recursive (and mutually recursive) functions and always count
468       // them when measuring the stack depth.
469       if (DI == D) {
470         IsRecursive = true;
471         ++StackDepth;
472         LCtx = LCtx->getParent();
473         continue;
474       }
475 
476       // Do not count the small functions when determining the stack depth.
477       AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
478       if (!isSmall(CalleeADC))
479         ++StackDepth;
480     }
481     LCtx = LCtx->getParent();
482   }
483 }
484 
485 // The GDM component containing the dynamic dispatch bifurcation info. When
486 // the exact type of the receiver is not known, we want to explore both paths -
487 // one on which we do inline it and the other one on which we don't. This is
488 // done to ensure we do not drop coverage.
489 // This is the map from the receiver region to a bool, specifying either we
490 // consider this region's information precise or not along the given path.
491 namespace {
492   enum DynamicDispatchMode {
493     DynamicDispatchModeInlined = 1,
494     DynamicDispatchModeConservative
495   };
496 } // end anonymous namespace
497 
498 REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
499                                const MemRegion *, unsigned)
500 REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool)
501 
502 void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D,
503                               NodeBuilder &Bldr, ExplodedNode *Pred,
504                               ProgramStateRef State) {
505   ProgramStateRef ConservativeEvalState = nullptr;
506   if (Call.isForeign() && !isSecondPhaseCTU()) {
507     const auto IK = AMgr.options.getCTUPhase1Inlining();
508     const bool DoInline = IK == CTUPhase1InliningKind::All ||
509                           (IK == CTUPhase1InliningKind::Small &&
510                            isSmall(AMgr.getAnalysisDeclContext(D)));
511     if (DoInline) {
512       inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
513       return;
514     }
515     const bool BState = State->get<CTUDispatchBifurcation>();
516     if (!BState) { // This is the first time we see this foreign function.
517       // Enqueue it to be analyzed in the second (ctu) phase.
518       inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State);
519       // Conservatively evaluate in the first phase.
520       ConservativeEvalState = State->set<CTUDispatchBifurcation>(true);
521       conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState);
522     } else {
523       conservativeEvalCall(Call, Bldr, Pred, State);
524     }
525     return;
526   }
527   inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
528 }
529 
530 void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call,
531                             const Decl *D, NodeBuilder &Bldr,
532                             ExplodedNode *Pred, ProgramStateRef State) {
533   assert(D);
534 
535   const LocationContext *CurLC = Pred->getLocationContext();
536   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
537   const LocationContext *ParentOfCallee = CallerSFC;
538   if (Call.getKind() == CE_Block &&
539       !cast<BlockCall>(Call).isConversionFromLambda()) {
540     const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
541     assert(BR && "If we have the block definition we should have its region");
542     AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
543     ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
544                                                          cast<BlockDecl>(D),
545                                                          BR);
546   }
547 
548   // This may be NULL, but that's fine.
549   const Expr *CallE = Call.getOriginExpr();
550 
551   // Construct a new stack frame for the callee.
552   AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
553   const StackFrameContext *CalleeSFC =
554       CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(),
555                                currBldrCtx->blockCount(), currStmtIdx);
556 
557   CallEnter Loc(CallE, CalleeSFC, CurLC);
558 
559   // Construct a new state which contains the mapping from actual to
560   // formal arguments.
561   State = State->enterStackFrame(Call, CalleeSFC);
562 
563   bool isNew;
564   if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
565     N->addPredecessor(Pred, G);
566     if (isNew)
567       WList->enqueue(N);
568   }
569 
570   // If we decided to inline the call, the successor has been manually
571   // added onto the work list so remove it from the node builder.
572   Bldr.takeNodes(Pred);
573 
574   NumInlinedCalls++;
575   Engine.FunctionSummaries->bumpNumTimesInlined(D);
576 
577   // Do not mark as visited in the 2nd run (CTUWList), so the function will
578   // be visited as top-level, this way we won't loose reports in non-ctu
579   // mode. Considering the case when a function in a foreign TU calls back
580   // into the main TU.
581   // Note, during the 1st run, it doesn't matter if we mark the foreign
582   // functions as visited (or not) because they can never appear as a top level
583   // function in the main TU.
584   if (!isSecondPhaseCTU())
585     // Mark the decl as visited.
586     if (VisitedCallees)
587       VisitedCallees->insert(D);
588 }
589 
590 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
591                                             const Stmt *CallE) {
592   const void *ReplayState = State->get<ReplayWithoutInlining>();
593   if (!ReplayState)
594     return nullptr;
595 
596   assert(ReplayState == CallE && "Backtracked to the wrong call.");
597   (void)CallE;
598 
599   return State->remove<ReplayWithoutInlining>();
600 }
601 
602 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
603                                ExplodedNodeSet &dst) {
604   // Perform the previsit of the CallExpr.
605   ExplodedNodeSet dstPreVisit;
606   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
607 
608   // Get the call in its initial state. We use this as a template to perform
609   // all the checks.
610   CallEventManager &CEMgr = getStateManager().getCallEventManager();
611   CallEventRef<> CallTemplate = CEMgr.getSimpleCall(
612       CE, Pred->getState(), Pred->getLocationContext(), getCFGElementRef());
613 
614   // Evaluate the function call.  We try each of the checkers
615   // to see if the can evaluate the function call.
616   ExplodedNodeSet dstCallEvaluated;
617   for (ExplodedNode *N : dstPreVisit) {
618     evalCall(dstCallEvaluated, N, *CallTemplate);
619   }
620 
621   // Finally, perform the post-condition check of the CallExpr and store
622   // the created nodes in 'Dst'.
623   // Note that if the call was inlined, dstCallEvaluated will be empty.
624   // The post-CallExpr check will occur in processCallExit.
625   getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
626                                              *this);
627 }
628 
629 ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
630                                                        const CallEvent &Call) {
631   const Expr *E = Call.getOriginExpr();
632   // FIXME: Constructors to placement arguments of operator new
633   // are not supported yet.
634   if (!E || isa<CXXNewExpr>(E))
635     return State;
636 
637   const LocationContext *LC = Call.getLocationContext();
638   for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
639     unsigned I = Call.getASTArgumentIndex(CallI);
640     if (std::optional<SVal> V = getObjectUnderConstruction(State, {E, I}, LC)) {
641       SVal VV = *V;
642       (void)VV;
643       assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
644                  ->getStackFrame()->getParent()
645                  ->getStackFrame() == LC->getStackFrame());
646       State = finishObjectConstruction(State, {E, I}, LC);
647     }
648   }
649 
650   return State;
651 }
652 
653 void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
654                                             ExplodedNode *Pred,
655                                             const CallEvent &Call) {
656   ProgramStateRef State = Pred->getState();
657   ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
658   if (CleanedState == State) {
659     Dst.insert(Pred);
660     return;
661   }
662 
663   const Expr *E = Call.getOriginExpr();
664   const LocationContext *LC = Call.getLocationContext();
665   NodeBuilder B(Pred, Dst, *currBldrCtx);
666   static SimpleProgramPointTag Tag("ExprEngine",
667                                    "Finish argument construction");
668   PreStmt PP(E, LC, &Tag);
669   B.generateNode(PP, CleanedState, Pred);
670 }
671 
672 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
673                           const CallEvent &Call) {
674   // WARNING: At this time, the state attached to 'Call' may be older than the
675   // state in 'Pred'. This is a minor optimization since CheckerManager will
676   // use an updated CallEvent instance when calling checkers, but if 'Call' is
677   // ever used directly in this function all callers should be updated to pass
678   // the most recent state. (It is probably not worth doing the work here since
679   // for some callers this will not be necessary.)
680 
681   // Run any pre-call checks using the generic call interface.
682   ExplodedNodeSet dstPreVisit;
683   getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
684                                             Call, *this);
685 
686   // Actually evaluate the function call.  We try each of the checkers
687   // to see if the can evaluate the function call, and get a callback at
688   // defaultEvalCall if all of them fail.
689   ExplodedNodeSet dstCallEvaluated;
690   getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
691                                              Call, *this, EvalCallOptions());
692 
693   // If there were other constructors called for object-type arguments
694   // of this call, clean them up.
695   ExplodedNodeSet dstArgumentCleanup;
696   for (ExplodedNode *I : dstCallEvaluated)
697     finishArgumentConstruction(dstArgumentCleanup, I, Call);
698 
699   ExplodedNodeSet dstPostCall;
700   getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
701                                              Call, *this);
702 
703   // Escaping symbols conjured during invalidating the regions above.
704   // Note that, for inlined calls the nodes were put back into the worklist,
705   // so we can assume that every node belongs to a conservative call at this
706   // point.
707 
708   // Run pointerEscape callback with the newly conjured symbols.
709   SmallVector<std::pair<SVal, SVal>, 8> Escaped;
710   for (ExplodedNode *I : dstPostCall) {
711     NodeBuilder B(I, Dst, *currBldrCtx);
712     ProgramStateRef State = I->getState();
713     Escaped.clear();
714     {
715       unsigned Arg = -1;
716       for (const ParmVarDecl *PVD : Call.parameters()) {
717         ++Arg;
718         QualType ParamTy = PVD->getType();
719         if (ParamTy.isNull() ||
720             (!ParamTy->isPointerType() && !ParamTy->isReferenceType()))
721           continue;
722         QualType Pointee = ParamTy->getPointeeType();
723         if (Pointee.isConstQualified() || Pointee->isVoidType())
724           continue;
725         if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
726           Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
727       }
728     }
729 
730     State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
731                                         PSK_EscapeOutParameters, &Call);
732 
733     if (State == I->getState())
734       Dst.insert(I);
735     else
736       B.generateNode(I->getLocation(), State, I);
737   }
738 }
739 
740 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
741                                             const LocationContext *LCtx,
742                                             ProgramStateRef State) {
743   const Expr *E = Call.getOriginExpr();
744   if (!E)
745     return State;
746 
747   // Some method families have known return values.
748   if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
749     switch (Msg->getMethodFamily()) {
750     default:
751       break;
752     case OMF_autorelease:
753     case OMF_retain:
754     case OMF_self: {
755       // These methods return their receivers.
756       return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
757     }
758     }
759   } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
760     SVal ThisV = C->getCXXThisVal();
761     ThisV = State->getSVal(ThisV.castAs<Loc>());
762     return State->BindExpr(E, LCtx, ThisV);
763   }
764 
765   SVal R;
766   QualType ResultTy = Call.getResultType();
767   unsigned Count = currBldrCtx->blockCount();
768   if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
769     // Conjure a temporary if the function returns an object by value.
770     SVal Target;
771     assert(RTC->getStmt() == Call.getOriginExpr());
772     EvalCallOptions CallOpts; // FIXME: We won't really need those.
773     std::tie(State, Target) = handleConstructionContext(
774         Call.getOriginExpr(), State, currBldrCtx, LCtx,
775         RTC->getConstructionContext(), CallOpts);
776     const MemRegion *TargetR = Target.getAsRegion();
777     assert(TargetR);
778     // Invalidate the region so that it didn't look uninitialized. If this is
779     // a field or element constructor, we do not want to invalidate
780     // the whole structure. Pointer escape is meaningless because
781     // the structure is a product of conservative evaluation
782     // and therefore contains nothing interesting at this point.
783     RegionAndSymbolInvalidationTraits ITraits;
784     ITraits.setTrait(TargetR,
785         RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
786     State = State->invalidateRegions(TargetR, E, Count, LCtx,
787                                      /* CausesPointerEscape=*/false, nullptr,
788                                      &Call, &ITraits);
789 
790     R = State->getSVal(Target.castAs<Loc>(), E->getType());
791   } else {
792     // Conjure a symbol if the return value is unknown.
793 
794     // See if we need to conjure a heap pointer instead of
795     // a regular unknown pointer.
796     const auto *CNE = dyn_cast<CXXNewExpr>(E);
797     if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
798       R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count);
799       const MemRegion *MR = R.getAsRegion()->StripCasts();
800 
801       // Store the extent of the allocated object(s).
802       SVal ElementCount;
803       if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) {
804         ElementCount = State->getSVal(SizeExpr, LCtx);
805       } else {
806         ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
807       }
808 
809       SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder);
810 
811       SVal Size =
812           svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize,
813                                 svalBuilder.getArrayIndexType());
814 
815       // FIXME: This line is to prevent a crash. For more details please check
816       // issue #56264.
817       if (Size.isUndef())
818         Size = UnknownVal();
819 
820       State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(),
821                                svalBuilder);
822     } else {
823       R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
824     }
825   }
826   return State->BindExpr(E, LCtx, R);
827 }
828 
829 // Conservatively evaluate call by invalidating regions and binding
830 // a conjured return value.
831 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
832                                       ExplodedNode *Pred, ProgramStateRef State) {
833   State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
834   State = bindReturnValue(Call, Pred->getLocationContext(), State);
835 
836   // And make the result node.
837   static SimpleProgramPointTag PT("ExprEngine", "Conservative eval call");
838   Bldr.generateNode(Call.getProgramPoint(false, &PT), State, Pred);
839 }
840 
841 ExprEngine::CallInlinePolicy
842 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
843                               AnalyzerOptions &Opts,
844                               const EvalCallOptions &CallOpts) {
845   const LocationContext *CurLC = Pred->getLocationContext();
846   const StackFrameContext *CallerSFC = CurLC->getStackFrame();
847   switch (Call.getKind()) {
848   case CE_Function:
849   case CE_Block:
850     break;
851   case CE_CXXMember:
852   case CE_CXXMemberOperator:
853     if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
854       return CIP_DisallowedAlways;
855     break;
856   case CE_CXXConstructor: {
857     if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
858       return CIP_DisallowedAlways;
859 
860     const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
861 
862     const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
863 
864     auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
865     const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
866                                         : nullptr;
867 
868     if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
869         !Opts.MayInlineCXXAllocator)
870       return CIP_DisallowedOnce;
871 
872     if (CallOpts.IsArrayCtorOrDtor) {
873       if (!shouldInlineArrayConstruction(Pred->getState(), CtorExpr, CurLC))
874         return CIP_DisallowedOnce;
875     }
876 
877     // Inlining constructors requires including initializers in the CFG.
878     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
879     assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
880     (void)ADC;
881 
882     // If the destructor is trivial, it's always safe to inline the constructor.
883     if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
884       break;
885 
886     // For other types, only inline constructors if destructor inlining is
887     // also enabled.
888     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
889       return CIP_DisallowedAlways;
890 
891     if (CtorExpr->getConstructionKind() == CXXConstructionKind::Complete) {
892       // If we don't handle temporary destructors, we shouldn't inline
893       // their constructors.
894       if (CallOpts.IsTemporaryCtorOrDtor &&
895           !Opts.ShouldIncludeTemporaryDtorsInCFG)
896         return CIP_DisallowedOnce;
897 
898       // If we did not find the correct this-region, it would be pointless
899       // to inline the constructor. Instead we will simply invalidate
900       // the fake temporary target.
901       if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
902         return CIP_DisallowedOnce;
903 
904       // If the temporary is lifetime-extended by binding it to a reference-type
905       // field within an aggregate, automatic destructors don't work properly.
906       if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
907         return CIP_DisallowedOnce;
908     }
909 
910     break;
911   }
912   case CE_CXXInheritedConstructor: {
913     // This doesn't really increase the cost of inlining ever, because
914     // the stack frame of the inherited constructor is trivial.
915     return CIP_Allowed;
916   }
917   case CE_CXXDestructor: {
918     if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
919       return CIP_DisallowedAlways;
920 
921     // Inlining destructors requires building the CFG correctly.
922     const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
923     assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
924     (void)ADC;
925 
926     if (CallOpts.IsArrayCtorOrDtor) {
927       if (!shouldInlineArrayDestruction(getElementCountOfArrayBeingDestructed(
928               Call, Pred->getState(), svalBuilder))) {
929         return CIP_DisallowedOnce;
930       }
931     }
932 
933     // Allow disabling temporary destructor inlining with a separate option.
934     if (CallOpts.IsTemporaryCtorOrDtor &&
935         !Opts.MayInlineCXXTemporaryDtors)
936       return CIP_DisallowedOnce;
937 
938     // If we did not find the correct this-region, it would be pointless
939     // to inline the destructor. Instead we will simply invalidate
940     // the fake temporary target.
941     if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
942       return CIP_DisallowedOnce;
943     break;
944   }
945   case CE_CXXDeallocator:
946     [[fallthrough]];
947   case CE_CXXAllocator:
948     if (Opts.MayInlineCXXAllocator)
949       break;
950     // Do not inline allocators until we model deallocators.
951     // This is unfortunate, but basically necessary for smart pointers and such.
952     return CIP_DisallowedAlways;
953   case CE_ObjCMessage:
954     if (!Opts.MayInlineObjCMethod)
955       return CIP_DisallowedAlways;
956     if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
957           Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
958       return CIP_DisallowedAlways;
959     break;
960   }
961 
962   return CIP_Allowed;
963 }
964 
965 /// Returns true if the given C++ class contains a member with the given name.
966 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
967                       StringRef Name) {
968   const IdentifierInfo &II = Ctx.Idents.get(Name);
969   return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II));
970 }
971 
972 /// Returns true if the given C++ class is a container or iterator.
973 ///
974 /// Our heuristic for this is whether it contains a method named 'begin()' or a
975 /// nested type named 'iterator' or 'iterator_category'.
976 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
977   return hasMember(Ctx, RD, "begin") ||
978          hasMember(Ctx, RD, "iterator") ||
979          hasMember(Ctx, RD, "iterator_category");
980 }
981 
982 /// Returns true if the given function refers to a method of a C++ container
983 /// or iterator.
984 ///
985 /// We generally do a poor job modeling most containers right now, and might
986 /// prefer not to inline their methods.
987 static bool isContainerMethod(const ASTContext &Ctx,
988                               const FunctionDecl *FD) {
989   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
990     return isContainerClass(Ctx, MD->getParent());
991   return false;
992 }
993 
994 /// Returns true if the given function is the destructor of a class named
995 /// "shared_ptr".
996 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
997   const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
998   if (!Dtor)
999     return false;
1000 
1001   const CXXRecordDecl *RD = Dtor->getParent();
1002   if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
1003     if (II->isStr("shared_ptr"))
1004         return true;
1005 
1006   return false;
1007 }
1008 
1009 /// Returns true if the function in \p CalleeADC may be inlined in general.
1010 ///
1011 /// This checks static properties of the function, such as its signature and
1012 /// CFG, to determine whether the analyzer should ever consider inlining it,
1013 /// in any context.
1014 bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
1015   AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
1016   // FIXME: Do not inline variadic calls.
1017   if (CallEvent::isVariadic(CalleeADC->getDecl()))
1018     return false;
1019 
1020   // Check certain C++-related inlining policies.
1021   ASTContext &Ctx = CalleeADC->getASTContext();
1022   if (Ctx.getLangOpts().CPlusPlus) {
1023     if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
1024       // Conditionally control the inlining of template functions.
1025       if (!Opts.MayInlineTemplateFunctions)
1026         if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
1027           return false;
1028 
1029       // Conditionally control the inlining of C++ standard library functions.
1030       if (!Opts.MayInlineCXXStandardLibrary)
1031         if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
1032           if (AnalysisDeclContext::isInStdNamespace(FD))
1033             return false;
1034 
1035       // Conditionally control the inlining of methods on objects that look
1036       // like C++ containers.
1037       if (!Opts.MayInlineCXXContainerMethods)
1038         if (!AMgr.isInCodeFile(FD->getLocation()))
1039           if (isContainerMethod(Ctx, FD))
1040             return false;
1041 
1042       // Conditionally control the inlining of the destructor of C++ shared_ptr.
1043       // We don't currently do a good job modeling shared_ptr because we can't
1044       // see the reference count, so treating as opaque is probably the best
1045       // idea.
1046       if (!Opts.MayInlineCXXSharedPtrDtor)
1047         if (isCXXSharedPtrDtor(FD))
1048           return false;
1049     }
1050   }
1051 
1052   // It is possible that the CFG cannot be constructed.
1053   // Be safe, and check if the CalleeCFG is valid.
1054   const CFG *CalleeCFG = CalleeADC->getCFG();
1055   if (!CalleeCFG)
1056     return false;
1057 
1058   // Do not inline large functions.
1059   if (isHuge(CalleeADC))
1060     return false;
1061 
1062   // It is possible that the live variables analysis cannot be
1063   // run.  If so, bail out.
1064   if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
1065     return false;
1066 
1067   return true;
1068 }
1069 
1070 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
1071                                   const ExplodedNode *Pred,
1072                                   const EvalCallOptions &CallOpts) {
1073   if (!D)
1074     return false;
1075 
1076   AnalysisManager &AMgr = getAnalysisManager();
1077   AnalyzerOptions &Opts = AMgr.options;
1078   AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
1079   AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
1080 
1081   // The auto-synthesized bodies are essential to inline as they are
1082   // usually small and commonly used. Note: we should do this check early on to
1083   // ensure we always inline these calls.
1084   if (CalleeADC->isBodyAutosynthesized())
1085     return true;
1086 
1087   if (!AMgr.shouldInlineCall())
1088     return false;
1089 
1090   // Check if this function has been marked as non-inlinable.
1091   std::optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
1092   if (MayInline) {
1093     if (!*MayInline)
1094       return false;
1095 
1096   } else {
1097     // We haven't actually checked the static properties of this function yet.
1098     // Do that now, and record our decision in the function summaries.
1099     if (mayInlineDecl(CalleeADC)) {
1100       Engine.FunctionSummaries->markMayInline(D);
1101     } else {
1102       Engine.FunctionSummaries->markShouldNotInline(D);
1103       return false;
1104     }
1105   }
1106 
1107   // Check if we should inline a call based on its kind.
1108   // FIXME: this checks both static and dynamic properties of the call, which
1109   // means we're redoing a bit of work that could be cached in the function
1110   // summary.
1111   CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
1112   if (CIP != CIP_Allowed) {
1113     if (CIP == CIP_DisallowedAlways) {
1114       assert(!MayInline || *MayInline);
1115       Engine.FunctionSummaries->markShouldNotInline(D);
1116     }
1117     return false;
1118   }
1119 
1120   // Do not inline if recursive or we've reached max stack frame count.
1121   bool IsRecursive = false;
1122   unsigned StackDepth = 0;
1123   examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
1124   if ((StackDepth >= Opts.InlineMaxStackDepth) &&
1125       (!isSmall(CalleeADC) || IsRecursive))
1126     return false;
1127 
1128   // Do not inline large functions too many times.
1129   if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
1130        Opts.MaxTimesInlineLarge) &&
1131       isLarge(CalleeADC)) {
1132     NumReachedInlineCountMax++;
1133     return false;
1134   }
1135 
1136   if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
1137     return false;
1138 
1139   return true;
1140 }
1141 
1142 bool ExprEngine::shouldInlineArrayConstruction(const ProgramStateRef State,
1143                                                const CXXConstructExpr *CE,
1144                                                const LocationContext *LCtx) {
1145   if (!CE)
1146     return false;
1147 
1148   // FIXME: Handle other arrays types.
1149   if (const auto *CAT = dyn_cast<ConstantArrayType>(CE->getType())) {
1150     unsigned ArrSize = getContext().getConstantArrayElementCount(CAT);
1151 
1152     // This might seem conter-intuitive at first glance, but the functions are
1153     // closely related. Reasoning about destructors depends only on the type
1154     // of the expression that initialized the memory region, which is the
1155     // CXXConstructExpr. So to avoid code repetition, the work is delegated
1156     // to the function that reasons about destructor inlining. Also note that
1157     // if the constructors of the array elements are inlined, the destructors
1158     // can also be inlined and if the destructors can be inline, it's safe to
1159     // inline the constructors.
1160     return shouldInlineArrayDestruction(ArrSize);
1161   }
1162 
1163   // Check if we're inside an ArrayInitLoopExpr, and it's sufficiently small.
1164   if (auto Size = getPendingInitLoop(State, CE, LCtx))
1165     return shouldInlineArrayDestruction(*Size);
1166 
1167   return false;
1168 }
1169 
1170 bool ExprEngine::shouldInlineArrayDestruction(uint64_t Size) {
1171 
1172   uint64_t maxAllowedSize = AMgr.options.maxBlockVisitOnPath;
1173 
1174   // Declaring a 0 element array is also possible.
1175   return Size <= maxAllowedSize && Size > 0;
1176 }
1177 
1178 bool ExprEngine::shouldRepeatCtorCall(ProgramStateRef State,
1179                                       const CXXConstructExpr *E,
1180                                       const LocationContext *LCtx) {
1181 
1182   if (!E)
1183     return false;
1184 
1185   auto Ty = E->getType();
1186 
1187   // FIXME: Handle non constant array types
1188   if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty)) {
1189     unsigned Size = getContext().getConstantArrayElementCount(CAT);
1190     return Size > getIndexOfElementToConstruct(State, E, LCtx);
1191   }
1192 
1193   if (auto Size = getPendingInitLoop(State, E, LCtx))
1194     return Size > getIndexOfElementToConstruct(State, E, LCtx);
1195 
1196   return false;
1197 }
1198 
1199 static bool isTrivialObjectAssignment(const CallEvent &Call) {
1200   const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
1201   if (!ICall)
1202     return false;
1203 
1204   const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
1205   if (!MD)
1206     return false;
1207   if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
1208     return false;
1209 
1210   return MD->isTrivial();
1211 }
1212 
1213 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
1214                                  const CallEvent &CallTemplate,
1215                                  const EvalCallOptions &CallOpts) {
1216   // Make sure we have the most recent state attached to the call.
1217   ProgramStateRef State = Pred->getState();
1218   CallEventRef<> Call = CallTemplate.cloneWithState(State);
1219 
1220   // Special-case trivial assignment operators.
1221   if (isTrivialObjectAssignment(*Call)) {
1222     performTrivialCopy(Bldr, Pred, *Call);
1223     return;
1224   }
1225 
1226   // Try to inline the call.
1227   // The origin expression here is just used as a kind of checksum;
1228   // this should still be safe even for CallEvents that don't come from exprs.
1229   const Expr *E = Call->getOriginExpr();
1230 
1231   ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
1232   if (InlinedFailedState) {
1233     // If we already tried once and failed, make sure we don't retry later.
1234     State = InlinedFailedState;
1235   } else {
1236     RuntimeDefinition RD = Call->getRuntimeDefinition();
1237     Call->setForeign(RD.isForeign());
1238     const Decl *D = RD.getDecl();
1239     if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
1240       if (RD.mayHaveOtherDefinitions()) {
1241         AnalyzerOptions &Options = getAnalysisManager().options;
1242 
1243         // Explore with and without inlining the call.
1244         if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
1245           BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
1246           return;
1247         }
1248 
1249         // Don't inline if we're not in any dynamic dispatch mode.
1250         if (Options.getIPAMode() != IPAK_DynamicDispatch) {
1251           conservativeEvalCall(*Call, Bldr, Pred, State);
1252           return;
1253         }
1254       }
1255       ctuBifurcate(*Call, D, Bldr, Pred, State);
1256       return;
1257     }
1258   }
1259 
1260   // If we can't inline it, clean up the state traits used only if the function
1261   // is inlined.
1262   State = removeStateTraitsUsedForArrayEvaluation(
1263       State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext());
1264 
1265   // Also handle the return value and invalidate the regions.
1266   conservativeEvalCall(*Call, Bldr, Pred, State);
1267 }
1268 
1269 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
1270                                const CallEvent &Call, const Decl *D,
1271                                NodeBuilder &Bldr, ExplodedNode *Pred) {
1272   assert(BifurReg);
1273   BifurReg = BifurReg->StripCasts();
1274 
1275   // Check if we've performed the split already - note, we only want
1276   // to split the path once per memory region.
1277   ProgramStateRef State = Pred->getState();
1278   const unsigned *BState =
1279                         State->get<DynamicDispatchBifurcationMap>(BifurReg);
1280   if (BState) {
1281     // If we are on "inline path", keep inlining if possible.
1282     if (*BState == DynamicDispatchModeInlined)
1283       ctuBifurcate(Call, D, Bldr, Pred, State);
1284     // If inline failed, or we are on the path where we assume we
1285     // don't have enough info about the receiver to inline, conjure the
1286     // return value and invalidate the regions.
1287     conservativeEvalCall(Call, Bldr, Pred, State);
1288     return;
1289   }
1290 
1291   // If we got here, this is the first time we process a message to this
1292   // region, so split the path.
1293   ProgramStateRef IState =
1294       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1295                                                DynamicDispatchModeInlined);
1296   ctuBifurcate(Call, D, Bldr, Pred, IState);
1297 
1298   ProgramStateRef NoIState =
1299       State->set<DynamicDispatchBifurcationMap>(BifurReg,
1300                                                DynamicDispatchModeConservative);
1301   conservativeEvalCall(Call, Bldr, Pred, NoIState);
1302 
1303   NumOfDynamicDispatchPathSplits++;
1304 }
1305 
1306 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1307                                  ExplodedNodeSet &Dst) {
1308   ExplodedNodeSet dstPreVisit;
1309   getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1310 
1311   StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1312 
1313   if (RS->getRetValue()) {
1314     for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1315                                   ei = dstPreVisit.end(); it != ei; ++it) {
1316       B.generateNode(RS, *it, (*it)->getState());
1317     }
1318   }
1319 }
1320