1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines ExprEngine's support for calls and returns.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "PrettyStackTraceLocationContext.h"
14 #include "clang/AST/CXXInheritance.h"
15 #include "clang/AST/Decl.h"
16 #include "clang/AST/DeclCXX.h"
17 #include "clang/Analysis/Analyses/LiveVariables.h"
18 #include "clang/Analysis/ConstructionContext.h"
19 #include "clang/StaticAnalyzer/Core/CheckerManager.h"
20 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
21 #include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
22 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Support/Casting.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/SaveAndRestore.h"
28 #include <optional>
29
30 using namespace clang;
31 using namespace ento;
32
33 #define DEBUG_TYPE "ExprEngine"
34
35 STATISTIC(NumOfDynamicDispatchPathSplits,
36 "The # of times we split the path due to imprecise dynamic dispatch info");
37
38 STATISTIC(NumInlinedCalls,
39 "The # of times we inlined a call");
40
41 STATISTIC(NumReachedInlineCountMax,
42 "The # of times we reached inline count maximum");
43
processCallEnter(NodeBuilderContext & BC,CallEnter CE,ExplodedNode * Pred)44 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE,
45 ExplodedNode *Pred) {
46 // Get the entry block in the CFG of the callee.
47 const StackFrameContext *calleeCtx = CE.getCalleeContext();
48 PrettyStackTraceLocationContext CrashInfo(calleeCtx);
49 const CFGBlock *Entry = CE.getEntry();
50
51 // Validate the CFG.
52 assert(Entry->empty());
53 assert(Entry->succ_size() == 1);
54
55 // Get the solitary successor.
56 const CFGBlock *Succ = *(Entry->succ_begin());
57
58 // Construct an edge representing the starting location in the callee.
59 BlockEdge Loc(Entry, Succ, calleeCtx);
60
61 ProgramStateRef state = Pred->getState();
62
63 // Construct a new node, notify checkers that analysis of the function has
64 // begun, and add the resultant nodes to the worklist.
65 bool isNew;
66 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
67 Node->addPredecessor(Pred, G);
68 if (isNew) {
69 ExplodedNodeSet DstBegin;
70 processBeginOfFunction(BC, Node, DstBegin, Loc);
71 Engine.enqueue(DstBegin);
72 }
73 }
74
75 // Find the last statement on the path to the exploded node and the
76 // corresponding Block.
77 static std::pair<const Stmt*,
getLastStmt(const ExplodedNode * Node)78 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
79 const Stmt *S = nullptr;
80 const CFGBlock *Blk = nullptr;
81 const StackFrameContext *SF = Node->getStackFrame();
82
83 // Back up through the ExplodedGraph until we reach a statement node in this
84 // stack frame.
85 while (Node) {
86 const ProgramPoint &PP = Node->getLocation();
87
88 if (PP.getStackFrame() == SF) {
89 if (std::optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
90 S = SP->getStmt();
91 break;
92 } else if (std::optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
93 S = CEE->getCalleeContext()->getCallSite();
94 if (S)
95 break;
96
97 // If there is no statement, this is an implicitly-generated call.
98 // We'll walk backwards over it and then continue the loop to find
99 // an actual statement.
100 std::optional<CallEnter> CE;
101 do {
102 Node = Node->getFirstPred();
103 CE = Node->getLocationAs<CallEnter>();
104 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
105
106 // Continue searching the graph.
107 } else if (std::optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
108 Blk = BE->getSrc();
109 }
110 } else if (std::optional<CallEnter> CE = PP.getAs<CallEnter>()) {
111 // If we reached the CallEnter for this function, it has no statements.
112 if (CE->getCalleeContext() == SF)
113 break;
114 }
115
116 if (Node->pred_empty())
117 return std::make_pair(nullptr, nullptr);
118
119 Node = *Node->pred_begin();
120 }
121
122 return std::make_pair(S, Blk);
123 }
124
125 /// Adjusts a return value when the called function's return type does not
126 /// match the caller's expression type. This can happen when a dynamic call
127 /// is devirtualized, and the overriding method has a covariant (more specific)
128 /// return type than the parent's method. For C++ objects, this means we need
129 /// to add base casts.
adjustReturnValue(SVal V,QualType ExpectedTy,QualType ActualTy,StoreManager & StoreMgr)130 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
131 StoreManager &StoreMgr) {
132 // For now, the only adjustments we handle apply only to locations.
133 if (!isa<Loc>(V))
134 return V;
135
136 // If the types already match, don't do any unnecessary work.
137 ExpectedTy = ExpectedTy.getCanonicalType();
138 ActualTy = ActualTy.getCanonicalType();
139 if (ExpectedTy == ActualTy)
140 return V;
141
142 // No adjustment is needed between Objective-C pointer types.
143 if (ExpectedTy->isObjCObjectPointerType() &&
144 ActualTy->isObjCObjectPointerType())
145 return V;
146
147 // C++ object pointers may need "derived-to-base" casts.
148 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
149 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
150 if (ExpectedClass && ActualClass) {
151 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
152 /*DetectVirtual=*/false);
153 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
154 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
155 return StoreMgr.evalDerivedToBase(V, Paths.front());
156 }
157 }
158
159 // Unfortunately, Objective-C does not enforce that overridden methods have
160 // covariant return types, so we can't assert that that never happens.
161 // Be safe and return UnknownVal().
162 return UnknownVal();
163 }
164
removeDeadOnEndOfFunction(NodeBuilderContext & BC,ExplodedNode * Pred,ExplodedNodeSet & Dst)165 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
166 ExplodedNode *Pred,
167 ExplodedNodeSet &Dst) {
168 // Find the last statement in the function and the corresponding basic block.
169 const Stmt *LastSt = nullptr;
170 const CFGBlock *Blk = nullptr;
171 std::tie(LastSt, Blk) = getLastStmt(Pred);
172 if (!Blk || !LastSt) {
173 Dst.Add(Pred);
174 return;
175 }
176
177 // Here, we destroy the current location context. We use the current
178 // function's entire body as a diagnostic statement, with which the program
179 // point will be associated. However, we only want to use LastStmt as a
180 // reference for what to clean up if it's a ReturnStmt; otherwise, everything
181 // is dead.
182 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
183 const LocationContext *LCtx = Pred->getLocationContext();
184 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
185 LCtx->getAnalysisDeclContext()->getBody(),
186 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
187 }
188
wasDifferentDeclUsedForInlining(CallEventRef<> Call,const StackFrameContext * calleeCtx)189 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
190 const StackFrameContext *calleeCtx) {
191 const Decl *RuntimeCallee = calleeCtx->getDecl();
192 const Decl *StaticDecl = Call->getDecl();
193 assert(RuntimeCallee);
194 if (!StaticDecl)
195 return true;
196 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
197 }
198
199 // Returns the number of elements in the array currently being destructed.
200 // If the element count is not found 0 will be returned.
getElementCountOfArrayBeingDestructed(const CallEvent & Call,const ProgramStateRef State,SValBuilder & SVB)201 static unsigned getElementCountOfArrayBeingDestructed(
202 const CallEvent &Call, const ProgramStateRef State, SValBuilder &SVB) {
203 assert(isa<CXXDestructorCall>(Call) &&
204 "The call event is not a destructor call!");
205
206 const auto &DtorCall = cast<CXXDestructorCall>(Call);
207
208 auto ThisVal = DtorCall.getCXXThisVal();
209
210 if (auto ThisElementRegion = dyn_cast<ElementRegion>(ThisVal.getAsRegion())) {
211 auto ArrayRegion = ThisElementRegion->getAsArrayOffset().getRegion();
212 auto ElementType = ThisElementRegion->getElementType();
213
214 auto ElementCount =
215 getDynamicElementCount(State, ArrayRegion, SVB, ElementType);
216
217 if (!ElementCount.isConstant())
218 return 0;
219
220 return ElementCount.getAsInteger()->getLimitedValue();
221 }
222
223 return 0;
224 }
225
removeStateTraitsUsedForArrayEvaluation(ProgramStateRef State,const CXXConstructExpr * E,const LocationContext * LCtx)226 ProgramStateRef ExprEngine::removeStateTraitsUsedForArrayEvaluation(
227 ProgramStateRef State, const CXXConstructExpr *E,
228 const LocationContext *LCtx) {
229
230 assert(LCtx && "Location context must be provided!");
231
232 if (E) {
233 if (getPendingInitLoop(State, E, LCtx))
234 State = removePendingInitLoop(State, E, LCtx);
235
236 if (getIndexOfElementToConstruct(State, E, LCtx))
237 State = removeIndexOfElementToConstruct(State, E, LCtx);
238 }
239
240 if (getPendingArrayDestruction(State, LCtx))
241 State = removePendingArrayDestruction(State, LCtx);
242
243 return State;
244 }
245
246 /// The call exit is simulated with a sequence of nodes, which occur between
247 /// CallExitBegin and CallExitEnd. The following operations occur between the
248 /// two program points:
249 /// 1. CallExitBegin (triggers the start of call exit sequence)
250 /// 2. Bind the return value
251 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
252 /// 4. CallExitEnd (switch to the caller context)
253 /// 5. PostStmt<CallExpr>
processCallExit(ExplodedNode * CEBNode)254 void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
255 // Step 1 CEBNode was generated before the call.
256 PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext());
257 const StackFrameContext *calleeCtx = CEBNode->getStackFrame();
258
259 // The parent context might not be a stack frame, so make sure we
260 // look up the first enclosing stack frame.
261 const StackFrameContext *callerCtx =
262 calleeCtx->getParent()->getStackFrame();
263
264 const Stmt *CE = calleeCtx->getCallSite();
265 ProgramStateRef state = CEBNode->getState();
266 // Find the last statement in the function and the corresponding basic block.
267 const Stmt *LastSt = nullptr;
268 const CFGBlock *Blk = nullptr;
269 std::tie(LastSt, Blk) = getLastStmt(CEBNode);
270
271 // Generate a CallEvent /before/ cleaning the state, so that we can get the
272 // correct value for 'this' (if necessary).
273 CallEventManager &CEMgr = getStateManager().getCallEventManager();
274 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
275
276 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
277
278 // If this variable is set to 'true' the analyzer will evaluate the call
279 // statement we are about to exit again, instead of continuing the execution
280 // from the statement after the call. This is useful for non-POD type array
281 // construction where the CXXConstructExpr is referenced only once in the CFG,
282 // but we want to evaluate it as many times as many elements the array has.
283 bool ShouldRepeatCall = false;
284
285 if (const auto *DtorDecl =
286 dyn_cast_or_null<CXXDestructorDecl>(Call->getDecl())) {
287 if (auto Idx = getPendingArrayDestruction(state, callerCtx)) {
288 ShouldRepeatCall = *Idx > 0;
289
290 auto ThisVal = svalBuilder.getCXXThis(DtorDecl->getParent(), calleeCtx);
291 state = state->killBinding(ThisVal);
292 }
293 }
294
295 // If the callee returns an expression, bind its value to CallExpr.
296 if (CE) {
297 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
298 const LocationContext *LCtx = CEBNode->getLocationContext();
299 SVal V = state->getSVal(RS, LCtx);
300
301 // Ensure that the return type matches the type of the returned Expr.
302 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
303 QualType ReturnedTy =
304 CallEvent::getDeclaredResultType(calleeCtx->getDecl());
305 if (!ReturnedTy.isNull()) {
306 if (const Expr *Ex = dyn_cast<Expr>(CE)) {
307 V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
308 getStoreManager());
309 }
310 }
311 }
312
313 state = state->BindExpr(CE, callerCtx, V);
314 }
315
316 // Bind the constructed object value to CXXConstructExpr.
317 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
318 loc::MemRegionVal This =
319 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
320 SVal ThisV = state->getSVal(This);
321 ThisV = state->getSVal(ThisV.castAs<Loc>());
322 state = state->BindExpr(CCE, callerCtx, ThisV);
323
324 ShouldRepeatCall = shouldRepeatCtorCall(state, CCE, callerCtx);
325 }
326
327 if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
328 // We are currently evaluating a CXXNewAllocator CFGElement. It takes a
329 // while to reach the actual CXXNewExpr element from here, so keep the
330 // region for later use.
331 // Additionally cast the return value of the inlined operator new
332 // (which is of type 'void *') to the correct object type.
333 SVal AllocV = state->getSVal(CNE, callerCtx);
334 AllocV = svalBuilder.evalCast(
335 AllocV, CNE->getType(),
336 getContext().getPointerType(getContext().VoidTy));
337
338 state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(),
339 AllocV);
340 }
341 }
342
343 if (!ShouldRepeatCall) {
344 state = removeStateTraitsUsedForArrayEvaluation(
345 state, dyn_cast_or_null<CXXConstructExpr>(CE), callerCtx);
346 }
347
348 // Step 3: BindedRetNode -> CleanedNodes
349 // If we can find a statement and a block in the inlined function, run remove
350 // dead bindings before returning from the call. This is important to ensure
351 // that we report the issues such as leaks in the stack contexts in which
352 // they occurred.
353 ExplodedNodeSet CleanedNodes;
354 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
355 static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value");
356 PostStmt Loc(LastSt, calleeCtx, &retValBind);
357 bool isNew;
358 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
359 BindedRetNode->addPredecessor(CEBNode, G);
360 if (!isNew)
361 return;
362
363 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
364 currBldrCtx = &Ctx;
365 // Here, we call the Symbol Reaper with 0 statement and callee location
366 // context, telling it to clean up everything in the callee's context
367 // (and its children). We use the callee's function body as a diagnostic
368 // statement, with which the program point will be associated.
369 removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx,
370 calleeCtx->getAnalysisDeclContext()->getBody(),
371 ProgramPoint::PostStmtPurgeDeadSymbolsKind);
372 currBldrCtx = nullptr;
373 } else {
374 CleanedNodes.Add(CEBNode);
375 }
376
377 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
378 E = CleanedNodes.end(); I != E; ++I) {
379
380 // Step 4: Generate the CallExit and leave the callee's context.
381 // CleanedNodes -> CEENode
382 CallExitEnd Loc(calleeCtx, callerCtx);
383 bool isNew;
384 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
385
386 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
387 CEENode->addPredecessor(*I, G);
388 if (!isNew)
389 return;
390
391 // Step 5: Perform the post-condition check of the CallExpr and enqueue the
392 // result onto the work list.
393 // CEENode -> Dst -> WorkList
394 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
395 SaveAndRestore<const NodeBuilderContext *> NBCSave(currBldrCtx, &Ctx);
396 SaveAndRestore CBISave(currStmtIdx, calleeCtx->getIndex());
397
398 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
399
400 ExplodedNodeSet DstPostCall;
401 if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) {
402 ExplodedNodeSet DstPostPostCallCallback;
403 getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback,
404 CEENode, *UpdatedCall, *this,
405 /*wasInlined=*/true);
406 for (ExplodedNode *I : DstPostPostCallCallback) {
407 getCheckerManager().runCheckersForNewAllocator(
408 cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this,
409 /*wasInlined=*/true);
410 }
411 } else {
412 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
413 *UpdatedCall, *this,
414 /*wasInlined=*/true);
415 }
416 ExplodedNodeSet Dst;
417 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
418 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
419 *this,
420 /*wasInlined=*/true);
421 } else if (CE &&
422 !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr.
423 AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) {
424 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
425 *this, /*wasInlined=*/true);
426 } else {
427 Dst.insert(DstPostCall);
428 }
429
430 // Enqueue the next element in the block.
431 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
432 PSI != PSE; ++PSI) {
433 unsigned Idx = calleeCtx->getIndex() + (ShouldRepeatCall ? 0 : 1);
434
435 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), Idx);
436 }
437 }
438 }
439
isSmall(AnalysisDeclContext * ADC) const440 bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const {
441 // When there are no branches in the function, it means that there's no
442 // exponential complexity introduced by inlining such function.
443 // Such functions also don't trigger various fundamental problems
444 // with our inlining mechanism, such as the problem of
445 // inlined defensive checks. Hence isLinear().
446 const CFG *Cfg = ADC->getCFG();
447 return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize;
448 }
449
isLarge(AnalysisDeclContext * ADC) const450 bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const {
451 const CFG *Cfg = ADC->getCFG();
452 return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge;
453 }
454
isHuge(AnalysisDeclContext * ADC) const455 bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const {
456 const CFG *Cfg = ADC->getCFG();
457 return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize;
458 }
459
examineStackFrames(const Decl * D,const LocationContext * LCtx,bool & IsRecursive,unsigned & StackDepth)460 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
461 bool &IsRecursive, unsigned &StackDepth) {
462 IsRecursive = false;
463 StackDepth = 0;
464
465 while (LCtx) {
466 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
467 const Decl *DI = SFC->getDecl();
468
469 // Mark recursive (and mutually recursive) functions and always count
470 // them when measuring the stack depth.
471 if (DI == D) {
472 IsRecursive = true;
473 ++StackDepth;
474 LCtx = LCtx->getParent();
475 continue;
476 }
477
478 // Do not count the small functions when determining the stack depth.
479 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
480 if (!isSmall(CalleeADC))
481 ++StackDepth;
482 }
483 LCtx = LCtx->getParent();
484 }
485 }
486
487 // The GDM component containing the dynamic dispatch bifurcation info. When
488 // the exact type of the receiver is not known, we want to explore both paths -
489 // one on which we do inline it and the other one on which we don't. This is
490 // done to ensure we do not drop coverage.
491 // This is the map from the receiver region to a bool, specifying either we
492 // consider this region's information precise or not along the given path.
493 namespace {
494 enum DynamicDispatchMode {
495 DynamicDispatchModeInlined = 1,
496 DynamicDispatchModeConservative
497 };
498 } // end anonymous namespace
499
REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,const MemRegion *,unsigned)500 REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
501 const MemRegion *, unsigned)
502 REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool)
503
504 void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D,
505 NodeBuilder &Bldr, ExplodedNode *Pred,
506 ProgramStateRef State) {
507 ProgramStateRef ConservativeEvalState = nullptr;
508 if (Call.isForeign() && !isSecondPhaseCTU()) {
509 const auto IK = AMgr.options.getCTUPhase1Inlining();
510 const bool DoInline = IK == CTUPhase1InliningKind::All ||
511 (IK == CTUPhase1InliningKind::Small &&
512 isSmall(AMgr.getAnalysisDeclContext(D)));
513 if (DoInline) {
514 inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
515 return;
516 }
517 const bool BState = State->get<CTUDispatchBifurcation>();
518 if (!BState) { // This is the first time we see this foreign function.
519 // Enqueue it to be analyzed in the second (ctu) phase.
520 inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State);
521 // Conservatively evaluate in the first phase.
522 ConservativeEvalState = State->set<CTUDispatchBifurcation>(true);
523 conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState);
524 } else {
525 conservativeEvalCall(Call, Bldr, Pred, State);
526 }
527 return;
528 }
529 inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
530 }
531
inlineCall(WorkList * WList,const CallEvent & Call,const Decl * D,NodeBuilder & Bldr,ExplodedNode * Pred,ProgramStateRef State)532 void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call,
533 const Decl *D, NodeBuilder &Bldr,
534 ExplodedNode *Pred, ProgramStateRef State) {
535 assert(D);
536
537 const LocationContext *CurLC = Pred->getLocationContext();
538 const StackFrameContext *CallerSFC = CurLC->getStackFrame();
539 const LocationContext *ParentOfCallee = CallerSFC;
540 if (Call.getKind() == CE_Block &&
541 !cast<BlockCall>(Call).isConversionFromLambda()) {
542 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
543 assert(BR && "If we have the block definition we should have its region");
544 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
545 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
546 cast<BlockDecl>(D),
547 BR);
548 }
549
550 // This may be NULL, but that's fine.
551 const Expr *CallE = Call.getOriginExpr();
552
553 // Construct a new stack frame for the callee.
554 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
555 const StackFrameContext *CalleeSFC =
556 CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(),
557 currBldrCtx->blockCount(), currStmtIdx);
558
559 CallEnter Loc(CallE, CalleeSFC, CurLC);
560
561 // Construct a new state which contains the mapping from actual to
562 // formal arguments.
563 State = State->enterStackFrame(Call, CalleeSFC);
564
565 bool isNew;
566 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
567 N->addPredecessor(Pred, G);
568 if (isNew)
569 WList->enqueue(N);
570 }
571
572 // If we decided to inline the call, the successor has been manually
573 // added onto the work list so remove it from the node builder.
574 Bldr.takeNodes(Pred);
575
576 NumInlinedCalls++;
577 Engine.FunctionSummaries->bumpNumTimesInlined(D);
578
579 // Do not mark as visited in the 2nd run (CTUWList), so the function will
580 // be visited as top-level, this way we won't loose reports in non-ctu
581 // mode. Considering the case when a function in a foreign TU calls back
582 // into the main TU.
583 // Note, during the 1st run, it doesn't matter if we mark the foreign
584 // functions as visited (or not) because they can never appear as a top level
585 // function in the main TU.
586 if (!isSecondPhaseCTU())
587 // Mark the decl as visited.
588 if (VisitedCallees)
589 VisitedCallees->insert(D);
590 }
591
getInlineFailedState(ProgramStateRef State,const Stmt * CallE)592 static ProgramStateRef getInlineFailedState(ProgramStateRef State,
593 const Stmt *CallE) {
594 const void *ReplayState = State->get<ReplayWithoutInlining>();
595 if (!ReplayState)
596 return nullptr;
597
598 assert(ReplayState == CallE && "Backtracked to the wrong call.");
599 (void)CallE;
600
601 return State->remove<ReplayWithoutInlining>();
602 }
603
VisitCallExpr(const CallExpr * CE,ExplodedNode * Pred,ExplodedNodeSet & dst)604 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
605 ExplodedNodeSet &dst) {
606 // Perform the previsit of the CallExpr.
607 ExplodedNodeSet dstPreVisit;
608 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
609
610 // Get the call in its initial state. We use this as a template to perform
611 // all the checks.
612 CallEventManager &CEMgr = getStateManager().getCallEventManager();
613 CallEventRef<> CallTemplate
614 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
615
616 // Evaluate the function call. We try each of the checkers
617 // to see if the can evaluate the function call.
618 ExplodedNodeSet dstCallEvaluated;
619 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
620 I != E; ++I) {
621 evalCall(dstCallEvaluated, *I, *CallTemplate);
622 }
623
624 // Finally, perform the post-condition check of the CallExpr and store
625 // the created nodes in 'Dst'.
626 // Note that if the call was inlined, dstCallEvaluated will be empty.
627 // The post-CallExpr check will occur in processCallExit.
628 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
629 *this);
630 }
631
finishArgumentConstruction(ProgramStateRef State,const CallEvent & Call)632 ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
633 const CallEvent &Call) {
634 const Expr *E = Call.getOriginExpr();
635 // FIXME: Constructors to placement arguments of operator new
636 // are not supported yet.
637 if (!E || isa<CXXNewExpr>(E))
638 return State;
639
640 const LocationContext *LC = Call.getLocationContext();
641 for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
642 unsigned I = Call.getASTArgumentIndex(CallI);
643 if (std::optional<SVal> V = getObjectUnderConstruction(State, {E, I}, LC)) {
644 SVal VV = *V;
645 (void)VV;
646 assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
647 ->getStackFrame()->getParent()
648 ->getStackFrame() == LC->getStackFrame());
649 State = finishObjectConstruction(State, {E, I}, LC);
650 }
651 }
652
653 return State;
654 }
655
finishArgumentConstruction(ExplodedNodeSet & Dst,ExplodedNode * Pred,const CallEvent & Call)656 void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
657 ExplodedNode *Pred,
658 const CallEvent &Call) {
659 ProgramStateRef State = Pred->getState();
660 ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
661 if (CleanedState == State) {
662 Dst.insert(Pred);
663 return;
664 }
665
666 const Expr *E = Call.getOriginExpr();
667 const LocationContext *LC = Call.getLocationContext();
668 NodeBuilder B(Pred, Dst, *currBldrCtx);
669 static SimpleProgramPointTag Tag("ExprEngine",
670 "Finish argument construction");
671 PreStmt PP(E, LC, &Tag);
672 B.generateNode(PP, CleanedState, Pred);
673 }
674
evalCall(ExplodedNodeSet & Dst,ExplodedNode * Pred,const CallEvent & Call)675 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
676 const CallEvent &Call) {
677 // WARNING: At this time, the state attached to 'Call' may be older than the
678 // state in 'Pred'. This is a minor optimization since CheckerManager will
679 // use an updated CallEvent instance when calling checkers, but if 'Call' is
680 // ever used directly in this function all callers should be updated to pass
681 // the most recent state. (It is probably not worth doing the work here since
682 // for some callers this will not be necessary.)
683
684 // Run any pre-call checks using the generic call interface.
685 ExplodedNodeSet dstPreVisit;
686 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
687 Call, *this);
688
689 // Actually evaluate the function call. We try each of the checkers
690 // to see if the can evaluate the function call, and get a callback at
691 // defaultEvalCall if all of them fail.
692 ExplodedNodeSet dstCallEvaluated;
693 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
694 Call, *this, EvalCallOptions());
695
696 // If there were other constructors called for object-type arguments
697 // of this call, clean them up.
698 ExplodedNodeSet dstArgumentCleanup;
699 for (ExplodedNode *I : dstCallEvaluated)
700 finishArgumentConstruction(dstArgumentCleanup, I, Call);
701
702 ExplodedNodeSet dstPostCall;
703 getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
704 Call, *this);
705
706 // Escaping symbols conjured during invalidating the regions above.
707 // Note that, for inlined calls the nodes were put back into the worklist,
708 // so we can assume that every node belongs to a conservative call at this
709 // point.
710
711 // Run pointerEscape callback with the newly conjured symbols.
712 SmallVector<std::pair<SVal, SVal>, 8> Escaped;
713 for (ExplodedNode *I : dstPostCall) {
714 NodeBuilder B(I, Dst, *currBldrCtx);
715 ProgramStateRef State = I->getState();
716 Escaped.clear();
717 {
718 unsigned Arg = -1;
719 for (const ParmVarDecl *PVD : Call.parameters()) {
720 ++Arg;
721 QualType ParamTy = PVD->getType();
722 if (ParamTy.isNull() ||
723 (!ParamTy->isPointerType() && !ParamTy->isReferenceType()))
724 continue;
725 QualType Pointee = ParamTy->getPointeeType();
726 if (Pointee.isConstQualified() || Pointee->isVoidType())
727 continue;
728 if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
729 Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
730 }
731 }
732
733 State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
734 PSK_EscapeOutParameters, &Call);
735
736 if (State == I->getState())
737 Dst.insert(I);
738 else
739 B.generateNode(I->getLocation(), State, I);
740 }
741 }
742
bindReturnValue(const CallEvent & Call,const LocationContext * LCtx,ProgramStateRef State)743 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
744 const LocationContext *LCtx,
745 ProgramStateRef State) {
746 const Expr *E = Call.getOriginExpr();
747 if (!E)
748 return State;
749
750 // Some method families have known return values.
751 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
752 switch (Msg->getMethodFamily()) {
753 default:
754 break;
755 case OMF_autorelease:
756 case OMF_retain:
757 case OMF_self: {
758 // These methods return their receivers.
759 return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
760 }
761 }
762 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
763 SVal ThisV = C->getCXXThisVal();
764 ThisV = State->getSVal(ThisV.castAs<Loc>());
765 return State->BindExpr(E, LCtx, ThisV);
766 }
767
768 SVal R;
769 QualType ResultTy = Call.getResultType();
770 unsigned Count = currBldrCtx->blockCount();
771 if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) {
772 // Conjure a temporary if the function returns an object by value.
773 SVal Target;
774 assert(RTC->getStmt() == Call.getOriginExpr());
775 EvalCallOptions CallOpts; // FIXME: We won't really need those.
776 std::tie(State, Target) = handleConstructionContext(
777 Call.getOriginExpr(), State, currBldrCtx, LCtx,
778 RTC->getConstructionContext(), CallOpts);
779 const MemRegion *TargetR = Target.getAsRegion();
780 assert(TargetR);
781 // Invalidate the region so that it didn't look uninitialized. If this is
782 // a field or element constructor, we do not want to invalidate
783 // the whole structure. Pointer escape is meaningless because
784 // the structure is a product of conservative evaluation
785 // and therefore contains nothing interesting at this point.
786 RegionAndSymbolInvalidationTraits ITraits;
787 ITraits.setTrait(TargetR,
788 RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
789 State = State->invalidateRegions(TargetR, E, Count, LCtx,
790 /* CausesPointerEscape=*/false, nullptr,
791 &Call, &ITraits);
792
793 R = State->getSVal(Target.castAs<Loc>(), E->getType());
794 } else {
795 // Conjure a symbol if the return value is unknown.
796
797 // See if we need to conjure a heap pointer instead of
798 // a regular unknown pointer.
799 const auto *CNE = dyn_cast<CXXNewExpr>(E);
800 if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
801 R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count);
802 const MemRegion *MR = R.getAsRegion()->StripCasts();
803
804 // Store the extent of the allocated object(s).
805 SVal ElementCount;
806 if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) {
807 ElementCount = State->getSVal(SizeExpr, LCtx);
808 } else {
809 ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
810 }
811
812 SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder);
813
814 SVal Size =
815 svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize,
816 svalBuilder.getArrayIndexType());
817
818 // FIXME: This line is to prevent a crash. For more details please check
819 // issue #56264.
820 if (Size.isUndef())
821 Size = UnknownVal();
822
823 State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(),
824 svalBuilder);
825 } else {
826 R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
827 }
828 }
829 return State->BindExpr(E, LCtx, R);
830 }
831
832 // Conservatively evaluate call by invalidating regions and binding
833 // a conjured return value.
conservativeEvalCall(const CallEvent & Call,NodeBuilder & Bldr,ExplodedNode * Pred,ProgramStateRef State)834 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
835 ExplodedNode *Pred, ProgramStateRef State) {
836 State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
837 State = bindReturnValue(Call, Pred->getLocationContext(), State);
838
839 // And make the result node.
840 Bldr.generateNode(Call.getProgramPoint(), State, Pred);
841 }
842
843 ExprEngine::CallInlinePolicy
mayInlineCallKind(const CallEvent & Call,const ExplodedNode * Pred,AnalyzerOptions & Opts,const EvalCallOptions & CallOpts)844 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
845 AnalyzerOptions &Opts,
846 const EvalCallOptions &CallOpts) {
847 const LocationContext *CurLC = Pred->getLocationContext();
848 const StackFrameContext *CallerSFC = CurLC->getStackFrame();
849 switch (Call.getKind()) {
850 case CE_Function:
851 case CE_Block:
852 break;
853 case CE_CXXMember:
854 case CE_CXXMemberOperator:
855 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
856 return CIP_DisallowedAlways;
857 break;
858 case CE_CXXConstructor: {
859 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
860 return CIP_DisallowedAlways;
861
862 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
863
864 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
865
866 auto CCE = getCurrentCFGElement().getAs<CFGConstructor>();
867 const ConstructionContext *CC = CCE ? CCE->getConstructionContext()
868 : nullptr;
869
870 if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) &&
871 !Opts.MayInlineCXXAllocator)
872 return CIP_DisallowedOnce;
873
874 if (CallOpts.IsArrayCtorOrDtor) {
875 if (!shouldInlineArrayConstruction(Pred->getState(), CtorExpr, CurLC))
876 return CIP_DisallowedOnce;
877 }
878
879 // Inlining constructors requires including initializers in the CFG.
880 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
881 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
882 (void)ADC;
883
884 // If the destructor is trivial, it's always safe to inline the constructor.
885 if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
886 break;
887
888 // For other types, only inline constructors if destructor inlining is
889 // also enabled.
890 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
891 return CIP_DisallowedAlways;
892
893 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) {
894 // If we don't handle temporary destructors, we shouldn't inline
895 // their constructors.
896 if (CallOpts.IsTemporaryCtorOrDtor &&
897 !Opts.ShouldIncludeTemporaryDtorsInCFG)
898 return CIP_DisallowedOnce;
899
900 // If we did not find the correct this-region, it would be pointless
901 // to inline the constructor. Instead we will simply invalidate
902 // the fake temporary target.
903 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
904 return CIP_DisallowedOnce;
905
906 // If the temporary is lifetime-extended by binding it to a reference-type
907 // field within an aggregate, automatic destructors don't work properly.
908 if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate)
909 return CIP_DisallowedOnce;
910 }
911
912 break;
913 }
914 case CE_CXXInheritedConstructor: {
915 // This doesn't really increase the cost of inlining ever, because
916 // the stack frame of the inherited constructor is trivial.
917 return CIP_Allowed;
918 }
919 case CE_CXXDestructor: {
920 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
921 return CIP_DisallowedAlways;
922
923 // Inlining destructors requires building the CFG correctly.
924 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
925 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
926 (void)ADC;
927
928 if (CallOpts.IsArrayCtorOrDtor) {
929 if (!shouldInlineArrayDestruction(getElementCountOfArrayBeingDestructed(
930 Call, Pred->getState(), svalBuilder))) {
931 return CIP_DisallowedOnce;
932 }
933 }
934
935 // Allow disabling temporary destructor inlining with a separate option.
936 if (CallOpts.IsTemporaryCtorOrDtor &&
937 !Opts.MayInlineCXXTemporaryDtors)
938 return CIP_DisallowedOnce;
939
940 // If we did not find the correct this-region, it would be pointless
941 // to inline the destructor. Instead we will simply invalidate
942 // the fake temporary target.
943 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion)
944 return CIP_DisallowedOnce;
945 break;
946 }
947 case CE_CXXDeallocator:
948 [[fallthrough]];
949 case CE_CXXAllocator:
950 if (Opts.MayInlineCXXAllocator)
951 break;
952 // Do not inline allocators until we model deallocators.
953 // This is unfortunate, but basically necessary for smart pointers and such.
954 return CIP_DisallowedAlways;
955 case CE_ObjCMessage:
956 if (!Opts.MayInlineObjCMethod)
957 return CIP_DisallowedAlways;
958 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
959 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
960 return CIP_DisallowedAlways;
961 break;
962 }
963
964 return CIP_Allowed;
965 }
966
967 /// Returns true if the given C++ class contains a member with the given name.
hasMember(const ASTContext & Ctx,const CXXRecordDecl * RD,StringRef Name)968 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
969 StringRef Name) {
970 const IdentifierInfo &II = Ctx.Idents.get(Name);
971 return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II));
972 }
973
974 /// Returns true if the given C++ class is a container or iterator.
975 ///
976 /// Our heuristic for this is whether it contains a method named 'begin()' or a
977 /// nested type named 'iterator' or 'iterator_category'.
isContainerClass(const ASTContext & Ctx,const CXXRecordDecl * RD)978 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
979 return hasMember(Ctx, RD, "begin") ||
980 hasMember(Ctx, RD, "iterator") ||
981 hasMember(Ctx, RD, "iterator_category");
982 }
983
984 /// Returns true if the given function refers to a method of a C++ container
985 /// or iterator.
986 ///
987 /// We generally do a poor job modeling most containers right now, and might
988 /// prefer not to inline their methods.
isContainerMethod(const ASTContext & Ctx,const FunctionDecl * FD)989 static bool isContainerMethod(const ASTContext &Ctx,
990 const FunctionDecl *FD) {
991 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
992 return isContainerClass(Ctx, MD->getParent());
993 return false;
994 }
995
996 /// Returns true if the given function is the destructor of a class named
997 /// "shared_ptr".
isCXXSharedPtrDtor(const FunctionDecl * FD)998 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) {
999 const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD);
1000 if (!Dtor)
1001 return false;
1002
1003 const CXXRecordDecl *RD = Dtor->getParent();
1004 if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo())
1005 if (II->isStr("shared_ptr"))
1006 return true;
1007
1008 return false;
1009 }
1010
1011 /// Returns true if the function in \p CalleeADC may be inlined in general.
1012 ///
1013 /// This checks static properties of the function, such as its signature and
1014 /// CFG, to determine whether the analyzer should ever consider inlining it,
1015 /// in any context.
mayInlineDecl(AnalysisDeclContext * CalleeADC) const1016 bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const {
1017 AnalyzerOptions &Opts = AMgr.getAnalyzerOptions();
1018 // FIXME: Do not inline variadic calls.
1019 if (CallEvent::isVariadic(CalleeADC->getDecl()))
1020 return false;
1021
1022 // Check certain C++-related inlining policies.
1023 ASTContext &Ctx = CalleeADC->getASTContext();
1024 if (Ctx.getLangOpts().CPlusPlus) {
1025 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
1026 // Conditionally control the inlining of template functions.
1027 if (!Opts.MayInlineTemplateFunctions)
1028 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
1029 return false;
1030
1031 // Conditionally control the inlining of C++ standard library functions.
1032 if (!Opts.MayInlineCXXStandardLibrary)
1033 if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
1034 if (AnalysisDeclContext::isInStdNamespace(FD))
1035 return false;
1036
1037 // Conditionally control the inlining of methods on objects that look
1038 // like C++ containers.
1039 if (!Opts.MayInlineCXXContainerMethods)
1040 if (!AMgr.isInCodeFile(FD->getLocation()))
1041 if (isContainerMethod(Ctx, FD))
1042 return false;
1043
1044 // Conditionally control the inlining of the destructor of C++ shared_ptr.
1045 // We don't currently do a good job modeling shared_ptr because we can't
1046 // see the reference count, so treating as opaque is probably the best
1047 // idea.
1048 if (!Opts.MayInlineCXXSharedPtrDtor)
1049 if (isCXXSharedPtrDtor(FD))
1050 return false;
1051 }
1052 }
1053
1054 // It is possible that the CFG cannot be constructed.
1055 // Be safe, and check if the CalleeCFG is valid.
1056 const CFG *CalleeCFG = CalleeADC->getCFG();
1057 if (!CalleeCFG)
1058 return false;
1059
1060 // Do not inline large functions.
1061 if (isHuge(CalleeADC))
1062 return false;
1063
1064 // It is possible that the live variables analysis cannot be
1065 // run. If so, bail out.
1066 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
1067 return false;
1068
1069 return true;
1070 }
1071
shouldInlineCall(const CallEvent & Call,const Decl * D,const ExplodedNode * Pred,const EvalCallOptions & CallOpts)1072 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
1073 const ExplodedNode *Pred,
1074 const EvalCallOptions &CallOpts) {
1075 if (!D)
1076 return false;
1077
1078 AnalysisManager &AMgr = getAnalysisManager();
1079 AnalyzerOptions &Opts = AMgr.options;
1080 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
1081 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
1082
1083 // The auto-synthesized bodies are essential to inline as they are
1084 // usually small and commonly used. Note: we should do this check early on to
1085 // ensure we always inline these calls.
1086 if (CalleeADC->isBodyAutosynthesized())
1087 return true;
1088
1089 if (!AMgr.shouldInlineCall())
1090 return false;
1091
1092 // Check if this function has been marked as non-inlinable.
1093 std::optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
1094 if (MayInline) {
1095 if (!*MayInline)
1096 return false;
1097
1098 } else {
1099 // We haven't actually checked the static properties of this function yet.
1100 // Do that now, and record our decision in the function summaries.
1101 if (mayInlineDecl(CalleeADC)) {
1102 Engine.FunctionSummaries->markMayInline(D);
1103 } else {
1104 Engine.FunctionSummaries->markShouldNotInline(D);
1105 return false;
1106 }
1107 }
1108
1109 // Check if we should inline a call based on its kind.
1110 // FIXME: this checks both static and dynamic properties of the call, which
1111 // means we're redoing a bit of work that could be cached in the function
1112 // summary.
1113 CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
1114 if (CIP != CIP_Allowed) {
1115 if (CIP == CIP_DisallowedAlways) {
1116 assert(!MayInline || *MayInline);
1117 Engine.FunctionSummaries->markShouldNotInline(D);
1118 }
1119 return false;
1120 }
1121
1122 // Do not inline if recursive or we've reached max stack frame count.
1123 bool IsRecursive = false;
1124 unsigned StackDepth = 0;
1125 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
1126 if ((StackDepth >= Opts.InlineMaxStackDepth) &&
1127 (!isSmall(CalleeADC) || IsRecursive))
1128 return false;
1129
1130 // Do not inline large functions too many times.
1131 if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
1132 Opts.MaxTimesInlineLarge) &&
1133 isLarge(CalleeADC)) {
1134 NumReachedInlineCountMax++;
1135 return false;
1136 }
1137
1138 if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive))
1139 return false;
1140
1141 return true;
1142 }
1143
shouldInlineArrayConstruction(const ProgramStateRef State,const CXXConstructExpr * CE,const LocationContext * LCtx)1144 bool ExprEngine::shouldInlineArrayConstruction(const ProgramStateRef State,
1145 const CXXConstructExpr *CE,
1146 const LocationContext *LCtx) {
1147 if (!CE)
1148 return false;
1149
1150 // FIXME: Handle other arrays types.
1151 if (const auto *CAT = dyn_cast<ConstantArrayType>(CE->getType())) {
1152 unsigned ArrSize = getContext().getConstantArrayElementCount(CAT);
1153
1154 // This might seem conter-intuitive at first glance, but the functions are
1155 // closely related. Reasoning about destructors depends only on the type
1156 // of the expression that initialized the memory region, which is the
1157 // CXXConstructExpr. So to avoid code repetition, the work is delegated
1158 // to the function that reasons about destructor inlining. Also note that
1159 // if the constructors of the array elements are inlined, the destructors
1160 // can also be inlined and if the destructors can be inline, it's safe to
1161 // inline the constructors.
1162 return shouldInlineArrayDestruction(ArrSize);
1163 }
1164
1165 // Check if we're inside an ArrayInitLoopExpr, and it's sufficiently small.
1166 if (auto Size = getPendingInitLoop(State, CE, LCtx))
1167 return shouldInlineArrayDestruction(*Size);
1168
1169 return false;
1170 }
1171
shouldInlineArrayDestruction(uint64_t Size)1172 bool ExprEngine::shouldInlineArrayDestruction(uint64_t Size) {
1173
1174 uint64_t maxAllowedSize = AMgr.options.maxBlockVisitOnPath;
1175
1176 // Declaring a 0 element array is also possible.
1177 return Size <= maxAllowedSize && Size > 0;
1178 }
1179
shouldRepeatCtorCall(ProgramStateRef State,const CXXConstructExpr * E,const LocationContext * LCtx)1180 bool ExprEngine::shouldRepeatCtorCall(ProgramStateRef State,
1181 const CXXConstructExpr *E,
1182 const LocationContext *LCtx) {
1183
1184 if (!E)
1185 return false;
1186
1187 auto Ty = E->getType();
1188
1189 // FIXME: Handle non constant array types
1190 if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty)) {
1191 unsigned Size = getContext().getConstantArrayElementCount(CAT);
1192 return Size > getIndexOfElementToConstruct(State, E, LCtx);
1193 }
1194
1195 if (auto Size = getPendingInitLoop(State, E, LCtx))
1196 return Size > getIndexOfElementToConstruct(State, E, LCtx);
1197
1198 return false;
1199 }
1200
isTrivialObjectAssignment(const CallEvent & Call)1201 static bool isTrivialObjectAssignment(const CallEvent &Call) {
1202 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
1203 if (!ICall)
1204 return false;
1205
1206 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
1207 if (!MD)
1208 return false;
1209 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
1210 return false;
1211
1212 return MD->isTrivial();
1213 }
1214
defaultEvalCall(NodeBuilder & Bldr,ExplodedNode * Pred,const CallEvent & CallTemplate,const EvalCallOptions & CallOpts)1215 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
1216 const CallEvent &CallTemplate,
1217 const EvalCallOptions &CallOpts) {
1218 // Make sure we have the most recent state attached to the call.
1219 ProgramStateRef State = Pred->getState();
1220 CallEventRef<> Call = CallTemplate.cloneWithState(State);
1221
1222 // Special-case trivial assignment operators.
1223 if (isTrivialObjectAssignment(*Call)) {
1224 performTrivialCopy(Bldr, Pred, *Call);
1225 return;
1226 }
1227
1228 // Try to inline the call.
1229 // The origin expression here is just used as a kind of checksum;
1230 // this should still be safe even for CallEvents that don't come from exprs.
1231 const Expr *E = Call->getOriginExpr();
1232
1233 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
1234 if (InlinedFailedState) {
1235 // If we already tried once and failed, make sure we don't retry later.
1236 State = InlinedFailedState;
1237 } else {
1238 RuntimeDefinition RD = Call->getRuntimeDefinition();
1239 Call->setForeign(RD.isForeign());
1240 const Decl *D = RD.getDecl();
1241 if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
1242 if (RD.mayHaveOtherDefinitions()) {
1243 AnalyzerOptions &Options = getAnalysisManager().options;
1244
1245 // Explore with and without inlining the call.
1246 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
1247 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
1248 return;
1249 }
1250
1251 // Don't inline if we're not in any dynamic dispatch mode.
1252 if (Options.getIPAMode() != IPAK_DynamicDispatch) {
1253 conservativeEvalCall(*Call, Bldr, Pred, State);
1254 return;
1255 }
1256 }
1257 ctuBifurcate(*Call, D, Bldr, Pred, State);
1258 return;
1259 }
1260 }
1261
1262 // If we can't inline it, clean up the state traits used only if the function
1263 // is inlined.
1264 State = removeStateTraitsUsedForArrayEvaluation(
1265 State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext());
1266
1267 // Also handle the return value and invalidate the regions.
1268 conservativeEvalCall(*Call, Bldr, Pred, State);
1269 }
1270
BifurcateCall(const MemRegion * BifurReg,const CallEvent & Call,const Decl * D,NodeBuilder & Bldr,ExplodedNode * Pred)1271 void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
1272 const CallEvent &Call, const Decl *D,
1273 NodeBuilder &Bldr, ExplodedNode *Pred) {
1274 assert(BifurReg);
1275 BifurReg = BifurReg->StripCasts();
1276
1277 // Check if we've performed the split already - note, we only want
1278 // to split the path once per memory region.
1279 ProgramStateRef State = Pred->getState();
1280 const unsigned *BState =
1281 State->get<DynamicDispatchBifurcationMap>(BifurReg);
1282 if (BState) {
1283 // If we are on "inline path", keep inlining if possible.
1284 if (*BState == DynamicDispatchModeInlined)
1285 ctuBifurcate(Call, D, Bldr, Pred, State);
1286 // If inline failed, or we are on the path where we assume we
1287 // don't have enough info about the receiver to inline, conjure the
1288 // return value and invalidate the regions.
1289 conservativeEvalCall(Call, Bldr, Pred, State);
1290 return;
1291 }
1292
1293 // If we got here, this is the first time we process a message to this
1294 // region, so split the path.
1295 ProgramStateRef IState =
1296 State->set<DynamicDispatchBifurcationMap>(BifurReg,
1297 DynamicDispatchModeInlined);
1298 ctuBifurcate(Call, D, Bldr, Pred, IState);
1299
1300 ProgramStateRef NoIState =
1301 State->set<DynamicDispatchBifurcationMap>(BifurReg,
1302 DynamicDispatchModeConservative);
1303 conservativeEvalCall(Call, Bldr, Pred, NoIState);
1304
1305 NumOfDynamicDispatchPathSplits++;
1306 }
1307
VisitReturnStmt(const ReturnStmt * RS,ExplodedNode * Pred,ExplodedNodeSet & Dst)1308 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
1309 ExplodedNodeSet &Dst) {
1310 ExplodedNodeSet dstPreVisit;
1311 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
1312
1313 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
1314
1315 if (RS->getRetValue()) {
1316 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
1317 ei = dstPreVisit.end(); it != ei; ++it) {
1318 B.generateNode(RS, *it, (*it)->getState());
1319 }
1320 }
1321 }
1322