1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines ExprEngine's support for calls and returns. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PrettyStackTraceLocationContext.h" 14 #include "clang/AST/CXXInheritance.h" 15 #include "clang/AST/Decl.h" 16 #include "clang/AST/DeclCXX.h" 17 #include "clang/Analysis/Analyses/LiveVariables.h" 18 #include "clang/Analysis/ConstructionContext.h" 19 #include "clang/StaticAnalyzer/Core/CheckerManager.h" 20 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" 21 #include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h" 22 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Support/Casting.h" 26 #include "llvm/Support/Compiler.h" 27 #include "llvm/Support/SaveAndRestore.h" 28 29 using namespace clang; 30 using namespace ento; 31 32 #define DEBUG_TYPE "ExprEngine" 33 34 STATISTIC(NumOfDynamicDispatchPathSplits, 35 "The # of times we split the path due to imprecise dynamic dispatch info"); 36 37 STATISTIC(NumInlinedCalls, 38 "The # of times we inlined a call"); 39 40 STATISTIC(NumReachedInlineCountMax, 41 "The # of times we reached inline count maximum"); 42 43 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE, 44 ExplodedNode *Pred) { 45 // Get the entry block in the CFG of the callee. 46 const StackFrameContext *calleeCtx = CE.getCalleeContext(); 47 PrettyStackTraceLocationContext CrashInfo(calleeCtx); 48 const CFGBlock *Entry = CE.getEntry(); 49 50 // Validate the CFG. 51 assert(Entry->empty()); 52 assert(Entry->succ_size() == 1); 53 54 // Get the solitary successor. 55 const CFGBlock *Succ = *(Entry->succ_begin()); 56 57 // Construct an edge representing the starting location in the callee. 58 BlockEdge Loc(Entry, Succ, calleeCtx); 59 60 ProgramStateRef state = Pred->getState(); 61 62 // Construct a new node, notify checkers that analysis of the function has 63 // begun, and add the resultant nodes to the worklist. 64 bool isNew; 65 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew); 66 Node->addPredecessor(Pred, G); 67 if (isNew) { 68 ExplodedNodeSet DstBegin; 69 processBeginOfFunction(BC, Node, DstBegin, Loc); 70 Engine.enqueue(DstBegin); 71 } 72 } 73 74 // Find the last statement on the path to the exploded node and the 75 // corresponding Block. 76 static std::pair<const Stmt*, 77 const CFGBlock*> getLastStmt(const ExplodedNode *Node) { 78 const Stmt *S = nullptr; 79 const CFGBlock *Blk = nullptr; 80 const StackFrameContext *SF = Node->getStackFrame(); 81 82 // Back up through the ExplodedGraph until we reach a statement node in this 83 // stack frame. 84 while (Node) { 85 const ProgramPoint &PP = Node->getLocation(); 86 87 if (PP.getStackFrame() == SF) { 88 if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) { 89 S = SP->getStmt(); 90 break; 91 } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) { 92 S = CEE->getCalleeContext()->getCallSite(); 93 if (S) 94 break; 95 96 // If there is no statement, this is an implicitly-generated call. 97 // We'll walk backwards over it and then continue the loop to find 98 // an actual statement. 99 Optional<CallEnter> CE; 100 do { 101 Node = Node->getFirstPred(); 102 CE = Node->getLocationAs<CallEnter>(); 103 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext()); 104 105 // Continue searching the graph. 106 } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) { 107 Blk = BE->getSrc(); 108 } 109 } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) { 110 // If we reached the CallEnter for this function, it has no statements. 111 if (CE->getCalleeContext() == SF) 112 break; 113 } 114 115 if (Node->pred_empty()) 116 return std::make_pair(nullptr, nullptr); 117 118 Node = *Node->pred_begin(); 119 } 120 121 return std::make_pair(S, Blk); 122 } 123 124 /// Adjusts a return value when the called function's return type does not 125 /// match the caller's expression type. This can happen when a dynamic call 126 /// is devirtualized, and the overriding method has a covariant (more specific) 127 /// return type than the parent's method. For C++ objects, this means we need 128 /// to add base casts. 129 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy, 130 StoreManager &StoreMgr) { 131 // For now, the only adjustments we handle apply only to locations. 132 if (!isa<Loc>(V)) 133 return V; 134 135 // If the types already match, don't do any unnecessary work. 136 ExpectedTy = ExpectedTy.getCanonicalType(); 137 ActualTy = ActualTy.getCanonicalType(); 138 if (ExpectedTy == ActualTy) 139 return V; 140 141 // No adjustment is needed between Objective-C pointer types. 142 if (ExpectedTy->isObjCObjectPointerType() && 143 ActualTy->isObjCObjectPointerType()) 144 return V; 145 146 // C++ object pointers may need "derived-to-base" casts. 147 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl(); 148 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl(); 149 if (ExpectedClass && ActualClass) { 150 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 151 /*DetectVirtual=*/false); 152 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) && 153 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) { 154 return StoreMgr.evalDerivedToBase(V, Paths.front()); 155 } 156 } 157 158 // Unfortunately, Objective-C does not enforce that overridden methods have 159 // covariant return types, so we can't assert that that never happens. 160 // Be safe and return UnknownVal(). 161 return UnknownVal(); 162 } 163 164 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC, 165 ExplodedNode *Pred, 166 ExplodedNodeSet &Dst) { 167 // Find the last statement in the function and the corresponding basic block. 168 const Stmt *LastSt = nullptr; 169 const CFGBlock *Blk = nullptr; 170 std::tie(LastSt, Blk) = getLastStmt(Pred); 171 if (!Blk || !LastSt) { 172 Dst.Add(Pred); 173 return; 174 } 175 176 // Here, we destroy the current location context. We use the current 177 // function's entire body as a diagnostic statement, with which the program 178 // point will be associated. However, we only want to use LastStmt as a 179 // reference for what to clean up if it's a ReturnStmt; otherwise, everything 180 // is dead. 181 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC); 182 const LocationContext *LCtx = Pred->getLocationContext(); 183 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx, 184 LCtx->getAnalysisDeclContext()->getBody(), 185 ProgramPoint::PostStmtPurgeDeadSymbolsKind); 186 } 187 188 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call, 189 const StackFrameContext *calleeCtx) { 190 const Decl *RuntimeCallee = calleeCtx->getDecl(); 191 const Decl *StaticDecl = Call->getDecl(); 192 assert(RuntimeCallee); 193 if (!StaticDecl) 194 return true; 195 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl(); 196 } 197 198 /// The call exit is simulated with a sequence of nodes, which occur between 199 /// CallExitBegin and CallExitEnd. The following operations occur between the 200 /// two program points: 201 /// 1. CallExitBegin (triggers the start of call exit sequence) 202 /// 2. Bind the return value 203 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee. 204 /// 4. CallExitEnd (switch to the caller context) 205 /// 5. PostStmt<CallExpr> 206 void ExprEngine::processCallExit(ExplodedNode *CEBNode) { 207 // Step 1 CEBNode was generated before the call. 208 PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext()); 209 const StackFrameContext *calleeCtx = CEBNode->getStackFrame(); 210 211 // The parent context might not be a stack frame, so make sure we 212 // look up the first enclosing stack frame. 213 const StackFrameContext *callerCtx = 214 calleeCtx->getParent()->getStackFrame(); 215 216 const Stmt *CE = calleeCtx->getCallSite(); 217 ProgramStateRef state = CEBNode->getState(); 218 // Find the last statement in the function and the corresponding basic block. 219 const Stmt *LastSt = nullptr; 220 const CFGBlock *Blk = nullptr; 221 std::tie(LastSt, Blk) = getLastStmt(CEBNode); 222 223 // Generate a CallEvent /before/ cleaning the state, so that we can get the 224 // correct value for 'this' (if necessary). 225 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 226 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); 227 228 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode. 229 230 // If this variable is set to 'true' the analyzer will evaluate the call 231 // statement we are about to exit again, instead of continuing the execution 232 // from the statement after the call. This is useful for non-POD type array 233 // construction where the CXXConstructExpr is referenced only once in the CFG, 234 // but we want to evaluate it as many times as many elements the array has. 235 bool ShouldRepeatCall = false; 236 237 // If the callee returns an expression, bind its value to CallExpr. 238 if (CE) { 239 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) { 240 const LocationContext *LCtx = CEBNode->getLocationContext(); 241 SVal V = state->getSVal(RS, LCtx); 242 243 // Ensure that the return type matches the type of the returned Expr. 244 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) { 245 QualType ReturnedTy = 246 CallEvent::getDeclaredResultType(calleeCtx->getDecl()); 247 if (!ReturnedTy.isNull()) { 248 if (const Expr *Ex = dyn_cast<Expr>(CE)) { 249 V = adjustReturnValue(V, Ex->getType(), ReturnedTy, 250 getStoreManager()); 251 } 252 } 253 } 254 255 state = state->BindExpr(CE, callerCtx, V); 256 } 257 258 // Bind the constructed object value to CXXConstructExpr. 259 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) { 260 loc::MemRegionVal This = 261 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx); 262 SVal ThisV = state->getSVal(This); 263 ThisV = state->getSVal(ThisV.castAs<Loc>()); 264 state = state->BindExpr(CCE, callerCtx, ThisV); 265 266 ShouldRepeatCall = shouldRepeatCtorCall(state, CCE, callerCtx); 267 268 if (!ShouldRepeatCall) { 269 if (getIndexOfElementToConstruct(state, CCE, callerCtx)) 270 state = removeIndexOfElementToConstruct(state, CCE, callerCtx); 271 272 if (getPendingInitLoop(state, CCE, callerCtx)) 273 state = removePendingInitLoop(state, CCE, callerCtx); 274 } 275 } 276 277 if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) { 278 // We are currently evaluating a CXXNewAllocator CFGElement. It takes a 279 // while to reach the actual CXXNewExpr element from here, so keep the 280 // region for later use. 281 // Additionally cast the return value of the inlined operator new 282 // (which is of type 'void *') to the correct object type. 283 SVal AllocV = state->getSVal(CNE, callerCtx); 284 AllocV = svalBuilder.evalCast( 285 AllocV, CNE->getType(), 286 getContext().getPointerType(getContext().VoidTy)); 287 288 state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(), 289 AllocV); 290 } 291 } 292 293 // Step 3: BindedRetNode -> CleanedNodes 294 // If we can find a statement and a block in the inlined function, run remove 295 // dead bindings before returning from the call. This is important to ensure 296 // that we report the issues such as leaks in the stack contexts in which 297 // they occurred. 298 ExplodedNodeSet CleanedNodes; 299 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) { 300 static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value"); 301 PostStmt Loc(LastSt, calleeCtx, &retValBind); 302 bool isNew; 303 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew); 304 BindedRetNode->addPredecessor(CEBNode, G); 305 if (!isNew) 306 return; 307 308 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode); 309 currBldrCtx = &Ctx; 310 // Here, we call the Symbol Reaper with 0 statement and callee location 311 // context, telling it to clean up everything in the callee's context 312 // (and its children). We use the callee's function body as a diagnostic 313 // statement, with which the program point will be associated. 314 removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx, 315 calleeCtx->getAnalysisDeclContext()->getBody(), 316 ProgramPoint::PostStmtPurgeDeadSymbolsKind); 317 currBldrCtx = nullptr; 318 } else { 319 CleanedNodes.Add(CEBNode); 320 } 321 322 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(), 323 E = CleanedNodes.end(); I != E; ++I) { 324 325 // Step 4: Generate the CallExit and leave the callee's context. 326 // CleanedNodes -> CEENode 327 CallExitEnd Loc(calleeCtx, callerCtx); 328 bool isNew; 329 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState(); 330 331 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew); 332 CEENode->addPredecessor(*I, G); 333 if (!isNew) 334 return; 335 336 // Step 5: Perform the post-condition check of the CallExpr and enqueue the 337 // result onto the work list. 338 // CEENode -> Dst -> WorkList 339 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode); 340 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx, 341 &Ctx); 342 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex()); 343 344 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); 345 346 ExplodedNodeSet DstPostCall; 347 if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) { 348 ExplodedNodeSet DstPostPostCallCallback; 349 getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback, 350 CEENode, *UpdatedCall, *this, 351 /*wasInlined=*/true); 352 for (ExplodedNode *I : DstPostPostCallCallback) { 353 getCheckerManager().runCheckersForNewAllocator( 354 cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this, 355 /*wasInlined=*/true); 356 } 357 } else { 358 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, 359 *UpdatedCall, *this, 360 /*wasInlined=*/true); 361 } 362 ExplodedNodeSet Dst; 363 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) { 364 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg, 365 *this, 366 /*wasInlined=*/true); 367 } else if (CE && 368 !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr. 369 AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) { 370 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE, 371 *this, /*wasInlined=*/true); 372 } else { 373 Dst.insert(DstPostCall); 374 } 375 376 // Enqueue the next element in the block. 377 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end(); 378 PSI != PSE; ++PSI) { 379 unsigned Idx = calleeCtx->getIndex() + (ShouldRepeatCall ? 0 : 1); 380 381 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), Idx); 382 } 383 } 384 } 385 386 bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const { 387 // When there are no branches in the function, it means that there's no 388 // exponential complexity introduced by inlining such function. 389 // Such functions also don't trigger various fundamental problems 390 // with our inlining mechanism, such as the problem of 391 // inlined defensive checks. Hence isLinear(). 392 const CFG *Cfg = ADC->getCFG(); 393 return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize; 394 } 395 396 bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const { 397 const CFG *Cfg = ADC->getCFG(); 398 return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge; 399 } 400 401 bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const { 402 const CFG *Cfg = ADC->getCFG(); 403 return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize; 404 } 405 406 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx, 407 bool &IsRecursive, unsigned &StackDepth) { 408 IsRecursive = false; 409 StackDepth = 0; 410 411 while (LCtx) { 412 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) { 413 const Decl *DI = SFC->getDecl(); 414 415 // Mark recursive (and mutually recursive) functions and always count 416 // them when measuring the stack depth. 417 if (DI == D) { 418 IsRecursive = true; 419 ++StackDepth; 420 LCtx = LCtx->getParent(); 421 continue; 422 } 423 424 // Do not count the small functions when determining the stack depth. 425 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI); 426 if (!isSmall(CalleeADC)) 427 ++StackDepth; 428 } 429 LCtx = LCtx->getParent(); 430 } 431 } 432 433 // The GDM component containing the dynamic dispatch bifurcation info. When 434 // the exact type of the receiver is not known, we want to explore both paths - 435 // one on which we do inline it and the other one on which we don't. This is 436 // done to ensure we do not drop coverage. 437 // This is the map from the receiver region to a bool, specifying either we 438 // consider this region's information precise or not along the given path. 439 namespace { 440 enum DynamicDispatchMode { 441 DynamicDispatchModeInlined = 1, 442 DynamicDispatchModeConservative 443 }; 444 } // end anonymous namespace 445 446 REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap, 447 const MemRegion *, unsigned) 448 REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool) 449 450 void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D, 451 NodeBuilder &Bldr, ExplodedNode *Pred, 452 ProgramStateRef State) { 453 ProgramStateRef ConservativeEvalState = nullptr; 454 if (Call.isForeign() && !isSecondPhaseCTU()) { 455 const auto IK = AMgr.options.getCTUPhase1Inlining(); 456 const bool DoInline = IK == CTUPhase1InliningKind::All || 457 (IK == CTUPhase1InliningKind::Small && 458 isSmall(AMgr.getAnalysisDeclContext(D))); 459 if (DoInline) { 460 inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State); 461 return; 462 } 463 const bool BState = State->get<CTUDispatchBifurcation>(); 464 if (!BState) { // This is the first time we see this foreign function. 465 // Enqueue it to be analyzed in the second (ctu) phase. 466 inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State); 467 // Conservatively evaluate in the first phase. 468 ConservativeEvalState = State->set<CTUDispatchBifurcation>(true); 469 conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState); 470 } else { 471 conservativeEvalCall(Call, Bldr, Pred, State); 472 } 473 return; 474 } 475 inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State); 476 } 477 478 void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call, 479 const Decl *D, NodeBuilder &Bldr, 480 ExplodedNode *Pred, ProgramStateRef State) { 481 assert(D); 482 483 const LocationContext *CurLC = Pred->getLocationContext(); 484 const StackFrameContext *CallerSFC = CurLC->getStackFrame(); 485 const LocationContext *ParentOfCallee = CallerSFC; 486 if (Call.getKind() == CE_Block && 487 !cast<BlockCall>(Call).isConversionFromLambda()) { 488 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); 489 assert(BR && "If we have the block definition we should have its region"); 490 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); 491 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, 492 cast<BlockDecl>(D), 493 BR); 494 } 495 496 // This may be NULL, but that's fine. 497 const Expr *CallE = Call.getOriginExpr(); 498 499 // Construct a new stack frame for the callee. 500 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); 501 const StackFrameContext *CalleeSFC = 502 CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(), 503 currBldrCtx->blockCount(), currStmtIdx); 504 505 CallEnter Loc(CallE, CalleeSFC, CurLC); 506 507 // Construct a new state which contains the mapping from actual to 508 // formal arguments. 509 State = State->enterStackFrame(Call, CalleeSFC); 510 511 bool isNew; 512 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { 513 N->addPredecessor(Pred, G); 514 if (isNew) 515 WList->enqueue(N); 516 } 517 518 // If we decided to inline the call, the successor has been manually 519 // added onto the work list so remove it from the node builder. 520 Bldr.takeNodes(Pred); 521 522 NumInlinedCalls++; 523 Engine.FunctionSummaries->bumpNumTimesInlined(D); 524 525 // Do not mark as visited in the 2nd run (CTUWList), so the function will 526 // be visited as top-level, this way we won't loose reports in non-ctu 527 // mode. Considering the case when a function in a foreign TU calls back 528 // into the main TU. 529 // Note, during the 1st run, it doesn't matter if we mark the foreign 530 // functions as visited (or not) because they can never appear as a top level 531 // function in the main TU. 532 if (!isSecondPhaseCTU()) 533 // Mark the decl as visited. 534 if (VisitedCallees) 535 VisitedCallees->insert(D); 536 } 537 538 static ProgramStateRef getInlineFailedState(ProgramStateRef State, 539 const Stmt *CallE) { 540 const void *ReplayState = State->get<ReplayWithoutInlining>(); 541 if (!ReplayState) 542 return nullptr; 543 544 assert(ReplayState == CallE && "Backtracked to the wrong call."); 545 (void)CallE; 546 547 return State->remove<ReplayWithoutInlining>(); 548 } 549 550 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred, 551 ExplodedNodeSet &dst) { 552 // Perform the previsit of the CallExpr. 553 ExplodedNodeSet dstPreVisit; 554 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this); 555 556 // Get the call in its initial state. We use this as a template to perform 557 // all the checks. 558 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 559 CallEventRef<> CallTemplate 560 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext()); 561 562 // Evaluate the function call. We try each of the checkers 563 // to see if the can evaluate the function call. 564 ExplodedNodeSet dstCallEvaluated; 565 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end(); 566 I != E; ++I) { 567 evalCall(dstCallEvaluated, *I, *CallTemplate); 568 } 569 570 // Finally, perform the post-condition check of the CallExpr and store 571 // the created nodes in 'Dst'. 572 // Note that if the call was inlined, dstCallEvaluated will be empty. 573 // The post-CallExpr check will occur in processCallExit. 574 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE, 575 *this); 576 } 577 578 ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State, 579 const CallEvent &Call) { 580 const Expr *E = Call.getOriginExpr(); 581 // FIXME: Constructors to placement arguments of operator new 582 // are not supported yet. 583 if (!E || isa<CXXNewExpr>(E)) 584 return State; 585 586 const LocationContext *LC = Call.getLocationContext(); 587 for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) { 588 unsigned I = Call.getASTArgumentIndex(CallI); 589 if (Optional<SVal> V = 590 getObjectUnderConstruction(State, {E, I}, LC)) { 591 SVal VV = *V; 592 (void)VV; 593 assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion()) 594 ->getStackFrame()->getParent() 595 ->getStackFrame() == LC->getStackFrame()); 596 State = finishObjectConstruction(State, {E, I}, LC); 597 } 598 } 599 600 return State; 601 } 602 603 void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst, 604 ExplodedNode *Pred, 605 const CallEvent &Call) { 606 ProgramStateRef State = Pred->getState(); 607 ProgramStateRef CleanedState = finishArgumentConstruction(State, Call); 608 if (CleanedState == State) { 609 Dst.insert(Pred); 610 return; 611 } 612 613 const Expr *E = Call.getOriginExpr(); 614 const LocationContext *LC = Call.getLocationContext(); 615 NodeBuilder B(Pred, Dst, *currBldrCtx); 616 static SimpleProgramPointTag Tag("ExprEngine", 617 "Finish argument construction"); 618 PreStmt PP(E, LC, &Tag); 619 B.generateNode(PP, CleanedState, Pred); 620 } 621 622 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred, 623 const CallEvent &Call) { 624 // WARNING: At this time, the state attached to 'Call' may be older than the 625 // state in 'Pred'. This is a minor optimization since CheckerManager will 626 // use an updated CallEvent instance when calling checkers, but if 'Call' is 627 // ever used directly in this function all callers should be updated to pass 628 // the most recent state. (It is probably not worth doing the work here since 629 // for some callers this will not be necessary.) 630 631 // Run any pre-call checks using the generic call interface. 632 ExplodedNodeSet dstPreVisit; 633 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, 634 Call, *this); 635 636 // Actually evaluate the function call. We try each of the checkers 637 // to see if the can evaluate the function call, and get a callback at 638 // defaultEvalCall if all of them fail. 639 ExplodedNodeSet dstCallEvaluated; 640 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit, 641 Call, *this, EvalCallOptions()); 642 643 // If there were other constructors called for object-type arguments 644 // of this call, clean them up. 645 ExplodedNodeSet dstArgumentCleanup; 646 for (ExplodedNode *I : dstCallEvaluated) 647 finishArgumentConstruction(dstArgumentCleanup, I, Call); 648 649 ExplodedNodeSet dstPostCall; 650 getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup, 651 Call, *this); 652 653 // Escaping symbols conjured during invalidating the regions above. 654 // Note that, for inlined calls the nodes were put back into the worklist, 655 // so we can assume that every node belongs to a conservative call at this 656 // point. 657 658 // Run pointerEscape callback with the newly conjured symbols. 659 SmallVector<std::pair<SVal, SVal>, 8> Escaped; 660 for (ExplodedNode *I : dstPostCall) { 661 NodeBuilder B(I, Dst, *currBldrCtx); 662 ProgramStateRef State = I->getState(); 663 Escaped.clear(); 664 { 665 unsigned Arg = -1; 666 for (const ParmVarDecl *PVD : Call.parameters()) { 667 ++Arg; 668 QualType ParamTy = PVD->getType(); 669 if (ParamTy.isNull() || 670 (!ParamTy->isPointerType() && !ParamTy->isReferenceType())) 671 continue; 672 QualType Pointee = ParamTy->getPointeeType(); 673 if (Pointee.isConstQualified() || Pointee->isVoidType()) 674 continue; 675 if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion()) 676 Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee)); 677 } 678 } 679 680 State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(), 681 PSK_EscapeOutParameters, &Call); 682 683 if (State == I->getState()) 684 Dst.insert(I); 685 else 686 B.generateNode(I->getLocation(), State, I); 687 } 688 } 689 690 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, 691 const LocationContext *LCtx, 692 ProgramStateRef State) { 693 const Expr *E = Call.getOriginExpr(); 694 if (!E) 695 return State; 696 697 // Some method families have known return values. 698 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) { 699 switch (Msg->getMethodFamily()) { 700 default: 701 break; 702 case OMF_autorelease: 703 case OMF_retain: 704 case OMF_self: { 705 // These methods return their receivers. 706 return State->BindExpr(E, LCtx, Msg->getReceiverSVal()); 707 } 708 } 709 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){ 710 SVal ThisV = C->getCXXThisVal(); 711 ThisV = State->getSVal(ThisV.castAs<Loc>()); 712 return State->BindExpr(E, LCtx, ThisV); 713 } 714 715 SVal R; 716 QualType ResultTy = Call.getResultType(); 717 unsigned Count = currBldrCtx->blockCount(); 718 if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) { 719 // Conjure a temporary if the function returns an object by value. 720 SVal Target; 721 assert(RTC->getStmt() == Call.getOriginExpr()); 722 EvalCallOptions CallOpts; // FIXME: We won't really need those. 723 std::tie(State, Target) = 724 handleConstructionContext(Call.getOriginExpr(), State, LCtx, 725 RTC->getConstructionContext(), CallOpts); 726 const MemRegion *TargetR = Target.getAsRegion(); 727 assert(TargetR); 728 // Invalidate the region so that it didn't look uninitialized. If this is 729 // a field or element constructor, we do not want to invalidate 730 // the whole structure. Pointer escape is meaningless because 731 // the structure is a product of conservative evaluation 732 // and therefore contains nothing interesting at this point. 733 RegionAndSymbolInvalidationTraits ITraits; 734 ITraits.setTrait(TargetR, 735 RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion); 736 State = State->invalidateRegions(TargetR, E, Count, LCtx, 737 /* CausesPointerEscape=*/false, nullptr, 738 &Call, &ITraits); 739 740 R = State->getSVal(Target.castAs<Loc>(), E->getType()); 741 } else { 742 // Conjure a symbol if the return value is unknown. 743 744 // See if we need to conjure a heap pointer instead of 745 // a regular unknown pointer. 746 const auto *CNE = dyn_cast<CXXNewExpr>(E); 747 if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) { 748 R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count); 749 const MemRegion *MR = R.getAsRegion()->StripCasts(); 750 751 // Store the extent of the allocated object(s). 752 SVal ElementCount; 753 if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) { 754 ElementCount = State->getSVal(SizeExpr, LCtx); 755 } else { 756 ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true); 757 } 758 759 SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder); 760 761 SVal Size = 762 svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize, 763 svalBuilder.getArrayIndexType()); 764 765 State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(), 766 svalBuilder); 767 } else { 768 R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count); 769 } 770 } 771 return State->BindExpr(E, LCtx, R); 772 } 773 774 // Conservatively evaluate call by invalidating regions and binding 775 // a conjured return value. 776 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr, 777 ExplodedNode *Pred, ProgramStateRef State) { 778 State = Call.invalidateRegions(currBldrCtx->blockCount(), State); 779 State = bindReturnValue(Call, Pred->getLocationContext(), State); 780 781 // And make the result node. 782 Bldr.generateNode(Call.getProgramPoint(), State, Pred); 783 } 784 785 ExprEngine::CallInlinePolicy 786 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred, 787 AnalyzerOptions &Opts, 788 const EvalCallOptions &CallOpts) { 789 const LocationContext *CurLC = Pred->getLocationContext(); 790 const StackFrameContext *CallerSFC = CurLC->getStackFrame(); 791 switch (Call.getKind()) { 792 case CE_Function: 793 case CE_Block: 794 break; 795 case CE_CXXMember: 796 case CE_CXXMemberOperator: 797 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) 798 return CIP_DisallowedAlways; 799 break; 800 case CE_CXXConstructor: { 801 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) 802 return CIP_DisallowedAlways; 803 804 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); 805 806 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); 807 808 auto CCE = getCurrentCFGElement().getAs<CFGConstructor>(); 809 const ConstructionContext *CC = CCE ? CCE->getConstructionContext() 810 : nullptr; 811 812 if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) && 813 !Opts.MayInlineCXXAllocator) 814 return CIP_DisallowedOnce; 815 816 // FIXME: We don't handle constructors or destructors for arrays properly. 817 // Even once we do, we still need to be careful about implicitly-generated 818 // initializers for array fields in default move/copy constructors. 819 // We still allow construction into ElementRegion targets when they don't 820 // represent array elements. 821 if (CallOpts.IsArrayCtorOrDtor) { 822 if (!shouldInlineArrayConstruction(Pred->getState(), CtorExpr, CurLC)) 823 return CIP_DisallowedOnce; 824 } 825 826 // Inlining constructors requires including initializers in the CFG. 827 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 828 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); 829 (void)ADC; 830 831 // If the destructor is trivial, it's always safe to inline the constructor. 832 if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) 833 break; 834 835 // For other types, only inline constructors if destructor inlining is 836 // also enabled. 837 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 838 return CIP_DisallowedAlways; 839 840 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) { 841 // If we don't handle temporary destructors, we shouldn't inline 842 // their constructors. 843 if (CallOpts.IsTemporaryCtorOrDtor && 844 !Opts.ShouldIncludeTemporaryDtorsInCFG) 845 return CIP_DisallowedOnce; 846 847 // If we did not find the correct this-region, it would be pointless 848 // to inline the constructor. Instead we will simply invalidate 849 // the fake temporary target. 850 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) 851 return CIP_DisallowedOnce; 852 853 // If the temporary is lifetime-extended by binding it to a reference-type 854 // field within an aggregate, automatic destructors don't work properly. 855 if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate) 856 return CIP_DisallowedOnce; 857 } 858 859 break; 860 } 861 case CE_CXXInheritedConstructor: { 862 // This doesn't really increase the cost of inlining ever, because 863 // the stack frame of the inherited constructor is trivial. 864 return CIP_Allowed; 865 } 866 case CE_CXXDestructor: { 867 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 868 return CIP_DisallowedAlways; 869 870 // Inlining destructors requires building the CFG correctly. 871 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 872 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); 873 (void)ADC; 874 875 // FIXME: We don't handle destructors for arrays properly. 876 if (CallOpts.IsArrayCtorOrDtor) 877 return CIP_DisallowedOnce; 878 879 // Allow disabling temporary destructor inlining with a separate option. 880 if (CallOpts.IsTemporaryCtorOrDtor && 881 !Opts.MayInlineCXXTemporaryDtors) 882 return CIP_DisallowedOnce; 883 884 // If we did not find the correct this-region, it would be pointless 885 // to inline the destructor. Instead we will simply invalidate 886 // the fake temporary target. 887 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) 888 return CIP_DisallowedOnce; 889 break; 890 } 891 case CE_CXXDeallocator: 892 LLVM_FALLTHROUGH; 893 case CE_CXXAllocator: 894 if (Opts.MayInlineCXXAllocator) 895 break; 896 // Do not inline allocators until we model deallocators. 897 // This is unfortunate, but basically necessary for smart pointers and such. 898 return CIP_DisallowedAlways; 899 case CE_ObjCMessage: 900 if (!Opts.MayInlineObjCMethod) 901 return CIP_DisallowedAlways; 902 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch || 903 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate)) 904 return CIP_DisallowedAlways; 905 break; 906 } 907 908 return CIP_Allowed; 909 } 910 911 /// Returns true if the given C++ class contains a member with the given name. 912 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD, 913 StringRef Name) { 914 const IdentifierInfo &II = Ctx.Idents.get(Name); 915 return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II)); 916 } 917 918 /// Returns true if the given C++ class is a container or iterator. 919 /// 920 /// Our heuristic for this is whether it contains a method named 'begin()' or a 921 /// nested type named 'iterator' or 'iterator_category'. 922 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) { 923 return hasMember(Ctx, RD, "begin") || 924 hasMember(Ctx, RD, "iterator") || 925 hasMember(Ctx, RD, "iterator_category"); 926 } 927 928 /// Returns true if the given function refers to a method of a C++ container 929 /// or iterator. 930 /// 931 /// We generally do a poor job modeling most containers right now, and might 932 /// prefer not to inline their methods. 933 static bool isContainerMethod(const ASTContext &Ctx, 934 const FunctionDecl *FD) { 935 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 936 return isContainerClass(Ctx, MD->getParent()); 937 return false; 938 } 939 940 /// Returns true if the given function is the destructor of a class named 941 /// "shared_ptr". 942 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) { 943 const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD); 944 if (!Dtor) 945 return false; 946 947 const CXXRecordDecl *RD = Dtor->getParent(); 948 if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo()) 949 if (II->isStr("shared_ptr")) 950 return true; 951 952 return false; 953 } 954 955 /// Returns true if the function in \p CalleeADC may be inlined in general. 956 /// 957 /// This checks static properties of the function, such as its signature and 958 /// CFG, to determine whether the analyzer should ever consider inlining it, 959 /// in any context. 960 bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const { 961 AnalyzerOptions &Opts = AMgr.getAnalyzerOptions(); 962 // FIXME: Do not inline variadic calls. 963 if (CallEvent::isVariadic(CalleeADC->getDecl())) 964 return false; 965 966 // Check certain C++-related inlining policies. 967 ASTContext &Ctx = CalleeADC->getASTContext(); 968 if (Ctx.getLangOpts().CPlusPlus) { 969 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) { 970 // Conditionally control the inlining of template functions. 971 if (!Opts.MayInlineTemplateFunctions) 972 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) 973 return false; 974 975 // Conditionally control the inlining of C++ standard library functions. 976 if (!Opts.MayInlineCXXStandardLibrary) 977 if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation())) 978 if (AnalysisDeclContext::isInStdNamespace(FD)) 979 return false; 980 981 // Conditionally control the inlining of methods on objects that look 982 // like C++ containers. 983 if (!Opts.MayInlineCXXContainerMethods) 984 if (!AMgr.isInCodeFile(FD->getLocation())) 985 if (isContainerMethod(Ctx, FD)) 986 return false; 987 988 // Conditionally control the inlining of the destructor of C++ shared_ptr. 989 // We don't currently do a good job modeling shared_ptr because we can't 990 // see the reference count, so treating as opaque is probably the best 991 // idea. 992 if (!Opts.MayInlineCXXSharedPtrDtor) 993 if (isCXXSharedPtrDtor(FD)) 994 return false; 995 } 996 } 997 998 // It is possible that the CFG cannot be constructed. 999 // Be safe, and check if the CalleeCFG is valid. 1000 const CFG *CalleeCFG = CalleeADC->getCFG(); 1001 if (!CalleeCFG) 1002 return false; 1003 1004 // Do not inline large functions. 1005 if (isHuge(CalleeADC)) 1006 return false; 1007 1008 // It is possible that the live variables analysis cannot be 1009 // run. If so, bail out. 1010 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>()) 1011 return false; 1012 1013 return true; 1014 } 1015 1016 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D, 1017 const ExplodedNode *Pred, 1018 const EvalCallOptions &CallOpts) { 1019 if (!D) 1020 return false; 1021 1022 AnalysisManager &AMgr = getAnalysisManager(); 1023 AnalyzerOptions &Opts = AMgr.options; 1024 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager(); 1025 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D); 1026 1027 // The auto-synthesized bodies are essential to inline as they are 1028 // usually small and commonly used. Note: we should do this check early on to 1029 // ensure we always inline these calls. 1030 if (CalleeADC->isBodyAutosynthesized()) 1031 return true; 1032 1033 if (!AMgr.shouldInlineCall()) 1034 return false; 1035 1036 // Check if this function has been marked as non-inlinable. 1037 Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D); 1038 if (MayInline) { 1039 if (!MayInline.value()) 1040 return false; 1041 1042 } else { 1043 // We haven't actually checked the static properties of this function yet. 1044 // Do that now, and record our decision in the function summaries. 1045 if (mayInlineDecl(CalleeADC)) { 1046 Engine.FunctionSummaries->markMayInline(D); 1047 } else { 1048 Engine.FunctionSummaries->markShouldNotInline(D); 1049 return false; 1050 } 1051 } 1052 1053 // Check if we should inline a call based on its kind. 1054 // FIXME: this checks both static and dynamic properties of the call, which 1055 // means we're redoing a bit of work that could be cached in the function 1056 // summary. 1057 CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts); 1058 if (CIP != CIP_Allowed) { 1059 if (CIP == CIP_DisallowedAlways) { 1060 assert(!MayInline || *MayInline); 1061 Engine.FunctionSummaries->markShouldNotInline(D); 1062 } 1063 return false; 1064 } 1065 1066 // Do not inline if recursive or we've reached max stack frame count. 1067 bool IsRecursive = false; 1068 unsigned StackDepth = 0; 1069 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); 1070 if ((StackDepth >= Opts.InlineMaxStackDepth) && 1071 (!isSmall(CalleeADC) || IsRecursive)) 1072 return false; 1073 1074 // Do not inline large functions too many times. 1075 if ((Engine.FunctionSummaries->getNumTimesInlined(D) > 1076 Opts.MaxTimesInlineLarge) && 1077 isLarge(CalleeADC)) { 1078 NumReachedInlineCountMax++; 1079 return false; 1080 } 1081 1082 if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive)) 1083 return false; 1084 1085 return true; 1086 } 1087 1088 bool ExprEngine::shouldInlineArrayConstruction(const ProgramStateRef State, 1089 const CXXConstructExpr *CE, 1090 const LocationContext *LCtx) { 1091 if (!CE) 1092 return false; 1093 1094 auto Type = CE->getType(); 1095 1096 // FIXME: Handle other arrays types. 1097 if (const auto *CAT = dyn_cast<ConstantArrayType>(Type)) { 1098 unsigned Size = getContext().getConstantArrayElementCount(CAT); 1099 1100 return Size <= AMgr.options.maxBlockVisitOnPath; 1101 } 1102 1103 // Check if we're inside an ArrayInitLoopExpr, and it's sufficiently small. 1104 if (auto Size = getPendingInitLoop(State, CE, LCtx)) 1105 return *Size <= AMgr.options.maxBlockVisitOnPath; 1106 1107 return false; 1108 } 1109 1110 bool ExprEngine::shouldRepeatCtorCall(ProgramStateRef State, 1111 const CXXConstructExpr *E, 1112 const LocationContext *LCtx) { 1113 1114 if (!E) 1115 return false; 1116 1117 auto Ty = E->getType(); 1118 1119 // FIXME: Handle non constant array types 1120 if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty)) { 1121 unsigned Size = getContext().getConstantArrayElementCount(CAT); 1122 return Size > getIndexOfElementToConstruct(State, E, LCtx); 1123 } 1124 1125 if (auto Size = getPendingInitLoop(State, E, LCtx)) 1126 return Size > getIndexOfElementToConstruct(State, E, LCtx); 1127 1128 return false; 1129 } 1130 1131 static bool isTrivialObjectAssignment(const CallEvent &Call) { 1132 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call); 1133 if (!ICall) 1134 return false; 1135 1136 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl()); 1137 if (!MD) 1138 return false; 1139 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())) 1140 return false; 1141 1142 return MD->isTrivial(); 1143 } 1144 1145 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, 1146 const CallEvent &CallTemplate, 1147 const EvalCallOptions &CallOpts) { 1148 // Make sure we have the most recent state attached to the call. 1149 ProgramStateRef State = Pred->getState(); 1150 CallEventRef<> Call = CallTemplate.cloneWithState(State); 1151 1152 // Special-case trivial assignment operators. 1153 if (isTrivialObjectAssignment(*Call)) { 1154 performTrivialCopy(Bldr, Pred, *Call); 1155 return; 1156 } 1157 1158 // Try to inline the call. 1159 // The origin expression here is just used as a kind of checksum; 1160 // this should still be safe even for CallEvents that don't come from exprs. 1161 const Expr *E = Call->getOriginExpr(); 1162 1163 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E); 1164 if (InlinedFailedState) { 1165 // If we already tried once and failed, make sure we don't retry later. 1166 State = InlinedFailedState; 1167 } else { 1168 RuntimeDefinition RD = Call->getRuntimeDefinition(); 1169 Call->setForeign(RD.isForeign()); 1170 const Decl *D = RD.getDecl(); 1171 if (shouldInlineCall(*Call, D, Pred, CallOpts)) { 1172 if (RD.mayHaveOtherDefinitions()) { 1173 AnalyzerOptions &Options = getAnalysisManager().options; 1174 1175 // Explore with and without inlining the call. 1176 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) { 1177 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); 1178 return; 1179 } 1180 1181 // Don't inline if we're not in any dynamic dispatch mode. 1182 if (Options.getIPAMode() != IPAK_DynamicDispatch) { 1183 conservativeEvalCall(*Call, Bldr, Pred, State); 1184 return; 1185 } 1186 } 1187 ctuBifurcate(*Call, D, Bldr, Pred, State); 1188 return; 1189 } 1190 } 1191 1192 // If we can't inline it, handle the return value and invalidate the regions. 1193 conservativeEvalCall(*Call, Bldr, Pred, State); 1194 } 1195 1196 void ExprEngine::BifurcateCall(const MemRegion *BifurReg, 1197 const CallEvent &Call, const Decl *D, 1198 NodeBuilder &Bldr, ExplodedNode *Pred) { 1199 assert(BifurReg); 1200 BifurReg = BifurReg->StripCasts(); 1201 1202 // Check if we've performed the split already - note, we only want 1203 // to split the path once per memory region. 1204 ProgramStateRef State = Pred->getState(); 1205 const unsigned *BState = 1206 State->get<DynamicDispatchBifurcationMap>(BifurReg); 1207 if (BState) { 1208 // If we are on "inline path", keep inlining if possible. 1209 if (*BState == DynamicDispatchModeInlined) 1210 ctuBifurcate(Call, D, Bldr, Pred, State); 1211 // If inline failed, or we are on the path where we assume we 1212 // don't have enough info about the receiver to inline, conjure the 1213 // return value and invalidate the regions. 1214 conservativeEvalCall(Call, Bldr, Pred, State); 1215 return; 1216 } 1217 1218 // If we got here, this is the first time we process a message to this 1219 // region, so split the path. 1220 ProgramStateRef IState = 1221 State->set<DynamicDispatchBifurcationMap>(BifurReg, 1222 DynamicDispatchModeInlined); 1223 ctuBifurcate(Call, D, Bldr, Pred, IState); 1224 1225 ProgramStateRef NoIState = 1226 State->set<DynamicDispatchBifurcationMap>(BifurReg, 1227 DynamicDispatchModeConservative); 1228 conservativeEvalCall(Call, Bldr, Pred, NoIState); 1229 1230 NumOfDynamicDispatchPathSplits++; 1231 } 1232 1233 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred, 1234 ExplodedNodeSet &Dst) { 1235 ExplodedNodeSet dstPreVisit; 1236 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this); 1237 1238 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx); 1239 1240 if (RS->getRetValue()) { 1241 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(), 1242 ei = dstPreVisit.end(); it != ei; ++it) { 1243 B.generateNode(RS, *it, (*it)->getState()); 1244 } 1245 } 1246 } 1247