1 //=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines ExprEngine's support for calls and returns. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PrettyStackTraceLocationContext.h" 14 #include "clang/AST/CXXInheritance.h" 15 #include "clang/AST/Decl.h" 16 #include "clang/AST/DeclCXX.h" 17 #include "clang/Analysis/Analyses/LiveVariables.h" 18 #include "clang/Analysis/ConstructionContext.h" 19 #include "clang/StaticAnalyzer/Core/CheckerManager.h" 20 #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" 21 #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Support/Casting.h" 25 #include "llvm/Support/Compiler.h" 26 #include "llvm/Support/SaveAndRestore.h" 27 28 using namespace clang; 29 using namespace ento; 30 31 #define DEBUG_TYPE "ExprEngine" 32 33 STATISTIC(NumOfDynamicDispatchPathSplits, 34 "The # of times we split the path due to imprecise dynamic dispatch info"); 35 36 STATISTIC(NumInlinedCalls, 37 "The # of times we inlined a call"); 38 39 STATISTIC(NumReachedInlineCountMax, 40 "The # of times we reached inline count maximum"); 41 42 void ExprEngine::processCallEnter(NodeBuilderContext& BC, CallEnter CE, 43 ExplodedNode *Pred) { 44 // Get the entry block in the CFG of the callee. 45 const StackFrameContext *calleeCtx = CE.getCalleeContext(); 46 PrettyStackTraceLocationContext CrashInfo(calleeCtx); 47 const CFGBlock *Entry = CE.getEntry(); 48 49 // Validate the CFG. 50 assert(Entry->empty()); 51 assert(Entry->succ_size() == 1); 52 53 // Get the solitary successor. 54 const CFGBlock *Succ = *(Entry->succ_begin()); 55 56 // Construct an edge representing the starting location in the callee. 57 BlockEdge Loc(Entry, Succ, calleeCtx); 58 59 ProgramStateRef state = Pred->getState(); 60 61 // Construct a new node, notify checkers that analysis of the function has 62 // begun, and add the resultant nodes to the worklist. 63 bool isNew; 64 ExplodedNode *Node = G.getNode(Loc, state, false, &isNew); 65 Node->addPredecessor(Pred, G); 66 if (isNew) { 67 ExplodedNodeSet DstBegin; 68 processBeginOfFunction(BC, Node, DstBegin, Loc); 69 Engine.enqueue(DstBegin); 70 } 71 } 72 73 // Find the last statement on the path to the exploded node and the 74 // corresponding Block. 75 static std::pair<const Stmt*, 76 const CFGBlock*> getLastStmt(const ExplodedNode *Node) { 77 const Stmt *S = nullptr; 78 const CFGBlock *Blk = nullptr; 79 const StackFrameContext *SF = Node->getStackFrame(); 80 81 // Back up through the ExplodedGraph until we reach a statement node in this 82 // stack frame. 83 while (Node) { 84 const ProgramPoint &PP = Node->getLocation(); 85 86 if (PP.getStackFrame() == SF) { 87 if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) { 88 S = SP->getStmt(); 89 break; 90 } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) { 91 S = CEE->getCalleeContext()->getCallSite(); 92 if (S) 93 break; 94 95 // If there is no statement, this is an implicitly-generated call. 96 // We'll walk backwards over it and then continue the loop to find 97 // an actual statement. 98 Optional<CallEnter> CE; 99 do { 100 Node = Node->getFirstPred(); 101 CE = Node->getLocationAs<CallEnter>(); 102 } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext()); 103 104 // Continue searching the graph. 105 } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) { 106 Blk = BE->getSrc(); 107 } 108 } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) { 109 // If we reached the CallEnter for this function, it has no statements. 110 if (CE->getCalleeContext() == SF) 111 break; 112 } 113 114 if (Node->pred_empty()) 115 return std::make_pair(nullptr, nullptr); 116 117 Node = *Node->pred_begin(); 118 } 119 120 return std::make_pair(S, Blk); 121 } 122 123 /// Adjusts a return value when the called function's return type does not 124 /// match the caller's expression type. This can happen when a dynamic call 125 /// is devirtualized, and the overriding method has a covariant (more specific) 126 /// return type than the parent's method. For C++ objects, this means we need 127 /// to add base casts. 128 static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy, 129 StoreManager &StoreMgr) { 130 // For now, the only adjustments we handle apply only to locations. 131 if (!V.getAs<Loc>()) 132 return V; 133 134 // If the types already match, don't do any unnecessary work. 135 ExpectedTy = ExpectedTy.getCanonicalType(); 136 ActualTy = ActualTy.getCanonicalType(); 137 if (ExpectedTy == ActualTy) 138 return V; 139 140 // No adjustment is needed between Objective-C pointer types. 141 if (ExpectedTy->isObjCObjectPointerType() && 142 ActualTy->isObjCObjectPointerType()) 143 return V; 144 145 // C++ object pointers may need "derived-to-base" casts. 146 const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl(); 147 const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl(); 148 if (ExpectedClass && ActualClass) { 149 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 150 /*DetectVirtual=*/false); 151 if (ActualClass->isDerivedFrom(ExpectedClass, Paths) && 152 !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) { 153 return StoreMgr.evalDerivedToBase(V, Paths.front()); 154 } 155 } 156 157 // Unfortunately, Objective-C does not enforce that overridden methods have 158 // covariant return types, so we can't assert that that never happens. 159 // Be safe and return UnknownVal(). 160 return UnknownVal(); 161 } 162 163 void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC, 164 ExplodedNode *Pred, 165 ExplodedNodeSet &Dst) { 166 // Find the last statement in the function and the corresponding basic block. 167 const Stmt *LastSt = nullptr; 168 const CFGBlock *Blk = nullptr; 169 std::tie(LastSt, Blk) = getLastStmt(Pred); 170 if (!Blk || !LastSt) { 171 Dst.Add(Pred); 172 return; 173 } 174 175 // Here, we destroy the current location context. We use the current 176 // function's entire body as a diagnostic statement, with which the program 177 // point will be associated. However, we only want to use LastStmt as a 178 // reference for what to clean up if it's a ReturnStmt; otherwise, everything 179 // is dead. 180 SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC); 181 const LocationContext *LCtx = Pred->getLocationContext(); 182 removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx, 183 LCtx->getAnalysisDeclContext()->getBody(), 184 ProgramPoint::PostStmtPurgeDeadSymbolsKind); 185 } 186 187 static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call, 188 const StackFrameContext *calleeCtx) { 189 const Decl *RuntimeCallee = calleeCtx->getDecl(); 190 const Decl *StaticDecl = Call->getDecl(); 191 assert(RuntimeCallee); 192 if (!StaticDecl) 193 return true; 194 return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl(); 195 } 196 197 /// The call exit is simulated with a sequence of nodes, which occur between 198 /// CallExitBegin and CallExitEnd. The following operations occur between the 199 /// two program points: 200 /// 1. CallExitBegin (triggers the start of call exit sequence) 201 /// 2. Bind the return value 202 /// 3. Run Remove dead bindings to clean up the dead symbols from the callee. 203 /// 4. CallExitEnd (switch to the caller context) 204 /// 5. PostStmt<CallExpr> 205 void ExprEngine::processCallExit(ExplodedNode *CEBNode) { 206 // Step 1 CEBNode was generated before the call. 207 PrettyStackTraceLocationContext CrashInfo(CEBNode->getLocationContext()); 208 const StackFrameContext *calleeCtx = CEBNode->getStackFrame(); 209 210 // The parent context might not be a stack frame, so make sure we 211 // look up the first enclosing stack frame. 212 const StackFrameContext *callerCtx = 213 calleeCtx->getParent()->getStackFrame(); 214 215 const Stmt *CE = calleeCtx->getCallSite(); 216 ProgramStateRef state = CEBNode->getState(); 217 // Find the last statement in the function and the corresponding basic block. 218 const Stmt *LastSt = nullptr; 219 const CFGBlock *Blk = nullptr; 220 std::tie(LastSt, Blk) = getLastStmt(CEBNode); 221 222 // Generate a CallEvent /before/ cleaning the state, so that we can get the 223 // correct value for 'this' (if necessary). 224 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 225 CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); 226 227 // Step 2: generate node with bound return value: CEBNode -> BindedRetNode. 228 229 // If the callee returns an expression, bind its value to CallExpr. 230 if (CE) { 231 if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) { 232 const LocationContext *LCtx = CEBNode->getLocationContext(); 233 SVal V = state->getSVal(RS, LCtx); 234 235 // Ensure that the return type matches the type of the returned Expr. 236 if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) { 237 QualType ReturnedTy = 238 CallEvent::getDeclaredResultType(calleeCtx->getDecl()); 239 if (!ReturnedTy.isNull()) { 240 if (const Expr *Ex = dyn_cast<Expr>(CE)) { 241 V = adjustReturnValue(V, Ex->getType(), ReturnedTy, 242 getStoreManager()); 243 } 244 } 245 } 246 247 state = state->BindExpr(CE, callerCtx, V); 248 } 249 250 // Bind the constructed object value to CXXConstructExpr. 251 if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) { 252 loc::MemRegionVal This = 253 svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx); 254 SVal ThisV = state->getSVal(This); 255 ThisV = state->getSVal(ThisV.castAs<Loc>()); 256 state = state->BindExpr(CCE, callerCtx, ThisV); 257 } 258 259 if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) { 260 // We are currently evaluating a CXXNewAllocator CFGElement. It takes a 261 // while to reach the actual CXXNewExpr element from here, so keep the 262 // region for later use. 263 // Additionally cast the return value of the inlined operator new 264 // (which is of type 'void *') to the correct object type. 265 SVal AllocV = state->getSVal(CNE, callerCtx); 266 AllocV = svalBuilder.evalCast( 267 AllocV, CNE->getType(), 268 getContext().getPointerType(getContext().VoidTy)); 269 270 state = addObjectUnderConstruction(state, CNE, calleeCtx->getParent(), 271 AllocV); 272 } 273 } 274 275 // Step 3: BindedRetNode -> CleanedNodes 276 // If we can find a statement and a block in the inlined function, run remove 277 // dead bindings before returning from the call. This is important to ensure 278 // that we report the issues such as leaks in the stack contexts in which 279 // they occurred. 280 ExplodedNodeSet CleanedNodes; 281 if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) { 282 static SimpleProgramPointTag retValBind("ExprEngine", "Bind Return Value"); 283 PostStmt Loc(LastSt, calleeCtx, &retValBind); 284 bool isNew; 285 ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew); 286 BindedRetNode->addPredecessor(CEBNode, G); 287 if (!isNew) 288 return; 289 290 NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode); 291 currBldrCtx = &Ctx; 292 // Here, we call the Symbol Reaper with 0 statement and callee location 293 // context, telling it to clean up everything in the callee's context 294 // (and its children). We use the callee's function body as a diagnostic 295 // statement, with which the program point will be associated. 296 removeDead(BindedRetNode, CleanedNodes, nullptr, calleeCtx, 297 calleeCtx->getAnalysisDeclContext()->getBody(), 298 ProgramPoint::PostStmtPurgeDeadSymbolsKind); 299 currBldrCtx = nullptr; 300 } else { 301 CleanedNodes.Add(CEBNode); 302 } 303 304 for (ExplodedNodeSet::iterator I = CleanedNodes.begin(), 305 E = CleanedNodes.end(); I != E; ++I) { 306 307 // Step 4: Generate the CallExit and leave the callee's context. 308 // CleanedNodes -> CEENode 309 CallExitEnd Loc(calleeCtx, callerCtx); 310 bool isNew; 311 ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState(); 312 313 ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew); 314 CEENode->addPredecessor(*I, G); 315 if (!isNew) 316 return; 317 318 // Step 5: Perform the post-condition check of the CallExpr and enqueue the 319 // result onto the work list. 320 // CEENode -> Dst -> WorkList 321 NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode); 322 SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx, 323 &Ctx); 324 SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex()); 325 326 CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); 327 328 ExplodedNodeSet DstPostCall; 329 if (llvm::isa_and_nonnull<CXXNewExpr>(CE)) { 330 ExplodedNodeSet DstPostPostCallCallback; 331 getCheckerManager().runCheckersForPostCall(DstPostPostCallCallback, 332 CEENode, *UpdatedCall, *this, 333 /*wasInlined=*/true); 334 for (ExplodedNode *I : DstPostPostCallCallback) { 335 getCheckerManager().runCheckersForNewAllocator( 336 cast<CXXAllocatorCall>(*UpdatedCall), DstPostCall, I, *this, 337 /*wasInlined=*/true); 338 } 339 } else { 340 getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, 341 *UpdatedCall, *this, 342 /*wasInlined=*/true); 343 } 344 ExplodedNodeSet Dst; 345 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) { 346 getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg, 347 *this, 348 /*wasInlined=*/true); 349 } else if (CE && 350 !(isa<CXXNewExpr>(CE) && // Called when visiting CXXNewExpr. 351 AMgr.getAnalyzerOptions().MayInlineCXXAllocator)) { 352 getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE, 353 *this, /*wasInlined=*/true); 354 } else { 355 Dst.insert(DstPostCall); 356 } 357 358 // Enqueue the next element in the block. 359 for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end(); 360 PSI != PSE; ++PSI) { 361 Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), 362 calleeCtx->getIndex()+1); 363 } 364 } 365 } 366 367 bool ExprEngine::isSmall(AnalysisDeclContext *ADC) const { 368 // When there are no branches in the function, it means that there's no 369 // exponential complexity introduced by inlining such function. 370 // Such functions also don't trigger various fundamental problems 371 // with our inlining mechanism, such as the problem of 372 // inlined defensive checks. Hence isLinear(). 373 const CFG *Cfg = ADC->getCFG(); 374 return Cfg->isLinear() || Cfg->size() <= AMgr.options.AlwaysInlineSize; 375 } 376 377 bool ExprEngine::isLarge(AnalysisDeclContext *ADC) const { 378 const CFG *Cfg = ADC->getCFG(); 379 return Cfg->size() >= AMgr.options.MinCFGSizeTreatFunctionsAsLarge; 380 } 381 382 bool ExprEngine::isHuge(AnalysisDeclContext *ADC) const { 383 const CFG *Cfg = ADC->getCFG(); 384 return Cfg->getNumBlockIDs() > AMgr.options.MaxInlinableSize; 385 } 386 387 void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx, 388 bool &IsRecursive, unsigned &StackDepth) { 389 IsRecursive = false; 390 StackDepth = 0; 391 392 while (LCtx) { 393 if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) { 394 const Decl *DI = SFC->getDecl(); 395 396 // Mark recursive (and mutually recursive) functions and always count 397 // them when measuring the stack depth. 398 if (DI == D) { 399 IsRecursive = true; 400 ++StackDepth; 401 LCtx = LCtx->getParent(); 402 continue; 403 } 404 405 // Do not count the small functions when determining the stack depth. 406 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI); 407 if (!isSmall(CalleeADC)) 408 ++StackDepth; 409 } 410 LCtx = LCtx->getParent(); 411 } 412 } 413 414 // The GDM component containing the dynamic dispatch bifurcation info. When 415 // the exact type of the receiver is not known, we want to explore both paths - 416 // one on which we do inline it and the other one on which we don't. This is 417 // done to ensure we do not drop coverage. 418 // This is the map from the receiver region to a bool, specifying either we 419 // consider this region's information precise or not along the given path. 420 namespace { 421 enum DynamicDispatchMode { 422 DynamicDispatchModeInlined = 1, 423 DynamicDispatchModeConservative 424 }; 425 } // end anonymous namespace 426 427 REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap, 428 const MemRegion *, unsigned) 429 430 bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, 431 NodeBuilder &Bldr, ExplodedNode *Pred, 432 ProgramStateRef State) { 433 assert(D); 434 435 const LocationContext *CurLC = Pred->getLocationContext(); 436 const StackFrameContext *CallerSFC = CurLC->getStackFrame(); 437 const LocationContext *ParentOfCallee = CallerSFC; 438 if (Call.getKind() == CE_Block && 439 !cast<BlockCall>(Call).isConversionFromLambda()) { 440 const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion(); 441 assert(BR && "If we have the block definition we should have its region"); 442 AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D); 443 ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC, 444 cast<BlockDecl>(D), 445 BR); 446 } 447 448 // This may be NULL, but that's fine. 449 const Expr *CallE = Call.getOriginExpr(); 450 451 // Construct a new stack frame for the callee. 452 AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); 453 const StackFrameContext *CalleeSFC = 454 CalleeADC->getStackFrame(ParentOfCallee, CallE, currBldrCtx->getBlock(), 455 currBldrCtx->blockCount(), currStmtIdx); 456 457 CallEnter Loc(CallE, CalleeSFC, CurLC); 458 459 // Construct a new state which contains the mapping from actual to 460 // formal arguments. 461 State = State->enterStackFrame(Call, CalleeSFC); 462 463 bool isNew; 464 if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) { 465 N->addPredecessor(Pred, G); 466 if (isNew) 467 Engine.getWorkList()->enqueue(N); 468 } 469 470 // If we decided to inline the call, the successor has been manually 471 // added onto the work list so remove it from the node builder. 472 Bldr.takeNodes(Pred); 473 474 NumInlinedCalls++; 475 Engine.FunctionSummaries->bumpNumTimesInlined(D); 476 477 // Mark the decl as visited. 478 if (VisitedCallees) 479 VisitedCallees->insert(D); 480 481 return true; 482 } 483 484 static ProgramStateRef getInlineFailedState(ProgramStateRef State, 485 const Stmt *CallE) { 486 const void *ReplayState = State->get<ReplayWithoutInlining>(); 487 if (!ReplayState) 488 return nullptr; 489 490 assert(ReplayState == CallE && "Backtracked to the wrong call."); 491 (void)CallE; 492 493 return State->remove<ReplayWithoutInlining>(); 494 } 495 496 void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred, 497 ExplodedNodeSet &dst) { 498 // Perform the previsit of the CallExpr. 499 ExplodedNodeSet dstPreVisit; 500 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this); 501 502 // Get the call in its initial state. We use this as a template to perform 503 // all the checks. 504 CallEventManager &CEMgr = getStateManager().getCallEventManager(); 505 CallEventRef<> CallTemplate 506 = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext()); 507 508 // Evaluate the function call. We try each of the checkers 509 // to see if the can evaluate the function call. 510 ExplodedNodeSet dstCallEvaluated; 511 for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end(); 512 I != E; ++I) { 513 evalCall(dstCallEvaluated, *I, *CallTemplate); 514 } 515 516 // Finally, perform the post-condition check of the CallExpr and store 517 // the created nodes in 'Dst'. 518 // Note that if the call was inlined, dstCallEvaluated will be empty. 519 // The post-CallExpr check will occur in processCallExit. 520 getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE, 521 *this); 522 } 523 524 ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State, 525 const CallEvent &Call) { 526 const Expr *E = Call.getOriginExpr(); 527 // FIXME: Constructors to placement arguments of operator new 528 // are not supported yet. 529 if (!E || isa<CXXNewExpr>(E)) 530 return State; 531 532 const LocationContext *LC = Call.getLocationContext(); 533 for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) { 534 unsigned I = Call.getASTArgumentIndex(CallI); 535 if (Optional<SVal> V = 536 getObjectUnderConstruction(State, {E, I}, LC)) { 537 SVal VV = *V; 538 (void)VV; 539 assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion()) 540 ->getStackFrame()->getParent() 541 ->getStackFrame() == LC->getStackFrame()); 542 State = finishObjectConstruction(State, {E, I}, LC); 543 } 544 } 545 546 return State; 547 } 548 549 void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst, 550 ExplodedNode *Pred, 551 const CallEvent &Call) { 552 ProgramStateRef State = Pred->getState(); 553 ProgramStateRef CleanedState = finishArgumentConstruction(State, Call); 554 if (CleanedState == State) { 555 Dst.insert(Pred); 556 return; 557 } 558 559 const Expr *E = Call.getOriginExpr(); 560 const LocationContext *LC = Call.getLocationContext(); 561 NodeBuilder B(Pred, Dst, *currBldrCtx); 562 static SimpleProgramPointTag Tag("ExprEngine", 563 "Finish argument construction"); 564 PreStmt PP(E, LC, &Tag); 565 B.generateNode(PP, CleanedState, Pred); 566 } 567 568 void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred, 569 const CallEvent &Call) { 570 // WARNING: At this time, the state attached to 'Call' may be older than the 571 // state in 'Pred'. This is a minor optimization since CheckerManager will 572 // use an updated CallEvent instance when calling checkers, but if 'Call' is 573 // ever used directly in this function all callers should be updated to pass 574 // the most recent state. (It is probably not worth doing the work here since 575 // for some callers this will not be necessary.) 576 577 // Run any pre-call checks using the generic call interface. 578 ExplodedNodeSet dstPreVisit; 579 getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, 580 Call, *this); 581 582 // Actually evaluate the function call. We try each of the checkers 583 // to see if the can evaluate the function call, and get a callback at 584 // defaultEvalCall if all of them fail. 585 ExplodedNodeSet dstCallEvaluated; 586 getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit, 587 Call, *this, EvalCallOptions()); 588 589 // If there were other constructors called for object-type arguments 590 // of this call, clean them up. 591 ExplodedNodeSet dstArgumentCleanup; 592 for (ExplodedNode *I : dstCallEvaluated) 593 finishArgumentConstruction(dstArgumentCleanup, I, Call); 594 595 ExplodedNodeSet dstPostCall; 596 getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup, 597 Call, *this); 598 599 // Escaping symbols conjured during invalidating the regions above. 600 // Note that, for inlined calls the nodes were put back into the worklist, 601 // so we can assume that every node belongs to a conservative call at this 602 // point. 603 604 // Run pointerEscape callback with the newly conjured symbols. 605 SmallVector<std::pair<SVal, SVal>, 8> Escaped; 606 for (ExplodedNode *I : dstPostCall) { 607 NodeBuilder B(I, Dst, *currBldrCtx); 608 ProgramStateRef State = I->getState(); 609 Escaped.clear(); 610 { 611 unsigned Arg = -1; 612 for (const ParmVarDecl *PVD : Call.parameters()) { 613 ++Arg; 614 QualType ParamTy = PVD->getType(); 615 if (ParamTy.isNull() || 616 (!ParamTy->isPointerType() && !ParamTy->isReferenceType())) 617 continue; 618 QualType Pointee = ParamTy->getPointeeType(); 619 if (Pointee.isConstQualified() || Pointee->isVoidType()) 620 continue; 621 if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion()) 622 Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee)); 623 } 624 } 625 626 State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(), 627 PSK_EscapeOutParameters, &Call); 628 629 if (State == I->getState()) 630 Dst.insert(I); 631 else 632 B.generateNode(I->getLocation(), State, I); 633 } 634 } 635 636 ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, 637 const LocationContext *LCtx, 638 ProgramStateRef State) { 639 const Expr *E = Call.getOriginExpr(); 640 if (!E) 641 return State; 642 643 // Some method families have known return values. 644 if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) { 645 switch (Msg->getMethodFamily()) { 646 default: 647 break; 648 case OMF_autorelease: 649 case OMF_retain: 650 case OMF_self: { 651 // These methods return their receivers. 652 return State->BindExpr(E, LCtx, Msg->getReceiverSVal()); 653 } 654 } 655 } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){ 656 SVal ThisV = C->getCXXThisVal(); 657 ThisV = State->getSVal(ThisV.castAs<Loc>()); 658 return State->BindExpr(E, LCtx, ThisV); 659 } 660 661 SVal R; 662 QualType ResultTy = Call.getResultType(); 663 unsigned Count = currBldrCtx->blockCount(); 664 if (auto RTC = getCurrentCFGElement().getAs<CFGCXXRecordTypedCall>()) { 665 // Conjure a temporary if the function returns an object by value. 666 SVal Target; 667 assert(RTC->getStmt() == Call.getOriginExpr()); 668 EvalCallOptions CallOpts; // FIXME: We won't really need those. 669 std::tie(State, Target) = 670 handleConstructionContext(Call.getOriginExpr(), State, LCtx, 671 RTC->getConstructionContext(), CallOpts); 672 const MemRegion *TargetR = Target.getAsRegion(); 673 assert(TargetR); 674 // Invalidate the region so that it didn't look uninitialized. If this is 675 // a field or element constructor, we do not want to invalidate 676 // the whole structure. Pointer escape is meaningless because 677 // the structure is a product of conservative evaluation 678 // and therefore contains nothing interesting at this point. 679 RegionAndSymbolInvalidationTraits ITraits; 680 ITraits.setTrait(TargetR, 681 RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion); 682 State = State->invalidateRegions(TargetR, E, Count, LCtx, 683 /* CausesPointerEscape=*/false, nullptr, 684 &Call, &ITraits); 685 686 R = State->getSVal(Target.castAs<Loc>(), E->getType()); 687 } else { 688 // Conjure a symbol if the return value is unknown. 689 690 // See if we need to conjure a heap pointer instead of 691 // a regular unknown pointer. 692 bool IsHeapPointer = false; 693 if (const auto *CNE = dyn_cast<CXXNewExpr>(E)) 694 if (CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) { 695 // FIXME: Delegate this to evalCall in MallocChecker? 696 IsHeapPointer = true; 697 } 698 699 R = IsHeapPointer ? svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count) 700 : svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, 701 Count); 702 } 703 return State->BindExpr(E, LCtx, R); 704 } 705 706 // Conservatively evaluate call by invalidating regions and binding 707 // a conjured return value. 708 void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr, 709 ExplodedNode *Pred, ProgramStateRef State) { 710 State = Call.invalidateRegions(currBldrCtx->blockCount(), State); 711 State = bindReturnValue(Call, Pred->getLocationContext(), State); 712 713 // And make the result node. 714 Bldr.generateNode(Call.getProgramPoint(), State, Pred); 715 } 716 717 ExprEngine::CallInlinePolicy 718 ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred, 719 AnalyzerOptions &Opts, 720 const EvalCallOptions &CallOpts) { 721 const LocationContext *CurLC = Pred->getLocationContext(); 722 const StackFrameContext *CallerSFC = CurLC->getStackFrame(); 723 switch (Call.getKind()) { 724 case CE_Function: 725 case CE_Block: 726 break; 727 case CE_CXXMember: 728 case CE_CXXMemberOperator: 729 if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) 730 return CIP_DisallowedAlways; 731 break; 732 case CE_CXXConstructor: { 733 if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) 734 return CIP_DisallowedAlways; 735 736 const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call); 737 738 const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); 739 740 auto CCE = getCurrentCFGElement().getAs<CFGConstructor>(); 741 const ConstructionContext *CC = CCE ? CCE->getConstructionContext() 742 : nullptr; 743 744 if (llvm::isa_and_nonnull<NewAllocatedObjectConstructionContext>(CC) && 745 !Opts.MayInlineCXXAllocator) 746 return CIP_DisallowedOnce; 747 748 // FIXME: We don't handle constructors or destructors for arrays properly. 749 // Even once we do, we still need to be careful about implicitly-generated 750 // initializers for array fields in default move/copy constructors. 751 // We still allow construction into ElementRegion targets when they don't 752 // represent array elements. 753 if (CallOpts.IsArrayCtorOrDtor) 754 return CIP_DisallowedOnce; 755 756 // Inlining constructors requires including initializers in the CFG. 757 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 758 assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); 759 (void)ADC; 760 761 // If the destructor is trivial, it's always safe to inline the constructor. 762 if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) 763 break; 764 765 // For other types, only inline constructors if destructor inlining is 766 // also enabled. 767 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 768 return CIP_DisallowedAlways; 769 770 if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) { 771 // If we don't handle temporary destructors, we shouldn't inline 772 // their constructors. 773 if (CallOpts.IsTemporaryCtorOrDtor && 774 !Opts.ShouldIncludeTemporaryDtorsInCFG) 775 return CIP_DisallowedOnce; 776 777 // If we did not find the correct this-region, it would be pointless 778 // to inline the constructor. Instead we will simply invalidate 779 // the fake temporary target. 780 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) 781 return CIP_DisallowedOnce; 782 783 // If the temporary is lifetime-extended by binding it to a reference-type 784 // field within an aggregate, automatic destructors don't work properly. 785 if (CallOpts.IsTemporaryLifetimeExtendedViaAggregate) 786 return CIP_DisallowedOnce; 787 } 788 789 break; 790 } 791 case CE_CXXInheritedConstructor: { 792 // This doesn't really increase the cost of inlining ever, because 793 // the stack frame of the inherited constructor is trivial. 794 return CIP_Allowed; 795 } 796 case CE_CXXDestructor: { 797 if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) 798 return CIP_DisallowedAlways; 799 800 // Inlining destructors requires building the CFG correctly. 801 const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); 802 assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); 803 (void)ADC; 804 805 // FIXME: We don't handle constructors or destructors for arrays properly. 806 if (CallOpts.IsArrayCtorOrDtor) 807 return CIP_DisallowedOnce; 808 809 // Allow disabling temporary destructor inlining with a separate option. 810 if (CallOpts.IsTemporaryCtorOrDtor && 811 !Opts.MayInlineCXXTemporaryDtors) 812 return CIP_DisallowedOnce; 813 814 // If we did not find the correct this-region, it would be pointless 815 // to inline the destructor. Instead we will simply invalidate 816 // the fake temporary target. 817 if (CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion) 818 return CIP_DisallowedOnce; 819 break; 820 } 821 case CE_CXXDeallocator: 822 LLVM_FALLTHROUGH; 823 case CE_CXXAllocator: 824 if (Opts.MayInlineCXXAllocator) 825 break; 826 // Do not inline allocators until we model deallocators. 827 // This is unfortunate, but basically necessary for smart pointers and such. 828 return CIP_DisallowedAlways; 829 case CE_ObjCMessage: 830 if (!Opts.MayInlineObjCMethod) 831 return CIP_DisallowedAlways; 832 if (!(Opts.getIPAMode() == IPAK_DynamicDispatch || 833 Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate)) 834 return CIP_DisallowedAlways; 835 break; 836 } 837 838 return CIP_Allowed; 839 } 840 841 /// Returns true if the given C++ class contains a member with the given name. 842 static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD, 843 StringRef Name) { 844 const IdentifierInfo &II = Ctx.Idents.get(Name); 845 return RD->hasMemberName(Ctx.DeclarationNames.getIdentifier(&II)); 846 } 847 848 /// Returns true if the given C++ class is a container or iterator. 849 /// 850 /// Our heuristic for this is whether it contains a method named 'begin()' or a 851 /// nested type named 'iterator' or 'iterator_category'. 852 static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) { 853 return hasMember(Ctx, RD, "begin") || 854 hasMember(Ctx, RD, "iterator") || 855 hasMember(Ctx, RD, "iterator_category"); 856 } 857 858 /// Returns true if the given function refers to a method of a C++ container 859 /// or iterator. 860 /// 861 /// We generally do a poor job modeling most containers right now, and might 862 /// prefer not to inline their methods. 863 static bool isContainerMethod(const ASTContext &Ctx, 864 const FunctionDecl *FD) { 865 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 866 return isContainerClass(Ctx, MD->getParent()); 867 return false; 868 } 869 870 /// Returns true if the given function is the destructor of a class named 871 /// "shared_ptr". 872 static bool isCXXSharedPtrDtor(const FunctionDecl *FD) { 873 const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(FD); 874 if (!Dtor) 875 return false; 876 877 const CXXRecordDecl *RD = Dtor->getParent(); 878 if (const IdentifierInfo *II = RD->getDeclName().getAsIdentifierInfo()) 879 if (II->isStr("shared_ptr")) 880 return true; 881 882 return false; 883 } 884 885 /// Returns true if the function in \p CalleeADC may be inlined in general. 886 /// 887 /// This checks static properties of the function, such as its signature and 888 /// CFG, to determine whether the analyzer should ever consider inlining it, 889 /// in any context. 890 bool ExprEngine::mayInlineDecl(AnalysisDeclContext *CalleeADC) const { 891 AnalyzerOptions &Opts = AMgr.getAnalyzerOptions(); 892 // FIXME: Do not inline variadic calls. 893 if (CallEvent::isVariadic(CalleeADC->getDecl())) 894 return false; 895 896 // Check certain C++-related inlining policies. 897 ASTContext &Ctx = CalleeADC->getASTContext(); 898 if (Ctx.getLangOpts().CPlusPlus) { 899 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) { 900 // Conditionally control the inlining of template functions. 901 if (!Opts.MayInlineTemplateFunctions) 902 if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) 903 return false; 904 905 // Conditionally control the inlining of C++ standard library functions. 906 if (!Opts.MayInlineCXXStandardLibrary) 907 if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation())) 908 if (AnalysisDeclContext::isInStdNamespace(FD)) 909 return false; 910 911 // Conditionally control the inlining of methods on objects that look 912 // like C++ containers. 913 if (!Opts.MayInlineCXXContainerMethods) 914 if (!AMgr.isInCodeFile(FD->getLocation())) 915 if (isContainerMethod(Ctx, FD)) 916 return false; 917 918 // Conditionally control the inlining of the destructor of C++ shared_ptr. 919 // We don't currently do a good job modeling shared_ptr because we can't 920 // see the reference count, so treating as opaque is probably the best 921 // idea. 922 if (!Opts.MayInlineCXXSharedPtrDtor) 923 if (isCXXSharedPtrDtor(FD)) 924 return false; 925 } 926 } 927 928 // It is possible that the CFG cannot be constructed. 929 // Be safe, and check if the CalleeCFG is valid. 930 const CFG *CalleeCFG = CalleeADC->getCFG(); 931 if (!CalleeCFG) 932 return false; 933 934 // Do not inline large functions. 935 if (isHuge(CalleeADC)) 936 return false; 937 938 // It is possible that the live variables analysis cannot be 939 // run. If so, bail out. 940 if (!CalleeADC->getAnalysis<RelaxedLiveVariables>()) 941 return false; 942 943 return true; 944 } 945 946 bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D, 947 const ExplodedNode *Pred, 948 const EvalCallOptions &CallOpts) { 949 if (!D) 950 return false; 951 952 AnalysisManager &AMgr = getAnalysisManager(); 953 AnalyzerOptions &Opts = AMgr.options; 954 AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager(); 955 AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D); 956 957 // The auto-synthesized bodies are essential to inline as they are 958 // usually small and commonly used. Note: we should do this check early on to 959 // ensure we always inline these calls. 960 if (CalleeADC->isBodyAutosynthesized()) 961 return true; 962 963 if (!AMgr.shouldInlineCall()) 964 return false; 965 966 // Check if this function has been marked as non-inlinable. 967 Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D); 968 if (MayInline.hasValue()) { 969 if (!MayInline.getValue()) 970 return false; 971 972 } else { 973 // We haven't actually checked the static properties of this function yet. 974 // Do that now, and record our decision in the function summaries. 975 if (mayInlineDecl(CalleeADC)) { 976 Engine.FunctionSummaries->markMayInline(D); 977 } else { 978 Engine.FunctionSummaries->markShouldNotInline(D); 979 return false; 980 } 981 } 982 983 // Check if we should inline a call based on its kind. 984 // FIXME: this checks both static and dynamic properties of the call, which 985 // means we're redoing a bit of work that could be cached in the function 986 // summary. 987 CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts); 988 if (CIP != CIP_Allowed) { 989 if (CIP == CIP_DisallowedAlways) { 990 assert(!MayInline.hasValue() || MayInline.getValue()); 991 Engine.FunctionSummaries->markShouldNotInline(D); 992 } 993 return false; 994 } 995 996 // Do not inline if recursive or we've reached max stack frame count. 997 bool IsRecursive = false; 998 unsigned StackDepth = 0; 999 examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); 1000 if ((StackDepth >= Opts.InlineMaxStackDepth) && 1001 (!isSmall(CalleeADC) || IsRecursive)) 1002 return false; 1003 1004 // Do not inline large functions too many times. 1005 if ((Engine.FunctionSummaries->getNumTimesInlined(D) > 1006 Opts.MaxTimesInlineLarge) && 1007 isLarge(CalleeADC)) { 1008 NumReachedInlineCountMax++; 1009 return false; 1010 } 1011 1012 if (HowToInline == Inline_Minimal && (!isSmall(CalleeADC) || IsRecursive)) 1013 return false; 1014 1015 return true; 1016 } 1017 1018 static bool isTrivialObjectAssignment(const CallEvent &Call) { 1019 const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call); 1020 if (!ICall) 1021 return false; 1022 1023 const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl()); 1024 if (!MD) 1025 return false; 1026 if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())) 1027 return false; 1028 1029 return MD->isTrivial(); 1030 } 1031 1032 void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, 1033 const CallEvent &CallTemplate, 1034 const EvalCallOptions &CallOpts) { 1035 // Make sure we have the most recent state attached to the call. 1036 ProgramStateRef State = Pred->getState(); 1037 CallEventRef<> Call = CallTemplate.cloneWithState(State); 1038 1039 // Special-case trivial assignment operators. 1040 if (isTrivialObjectAssignment(*Call)) { 1041 performTrivialCopy(Bldr, Pred, *Call); 1042 return; 1043 } 1044 1045 // Try to inline the call. 1046 // The origin expression here is just used as a kind of checksum; 1047 // this should still be safe even for CallEvents that don't come from exprs. 1048 const Expr *E = Call->getOriginExpr(); 1049 1050 ProgramStateRef InlinedFailedState = getInlineFailedState(State, E); 1051 if (InlinedFailedState) { 1052 // If we already tried once and failed, make sure we don't retry later. 1053 State = InlinedFailedState; 1054 } else { 1055 RuntimeDefinition RD = Call->getRuntimeDefinition(); 1056 const Decl *D = RD.getDecl(); 1057 if (shouldInlineCall(*Call, D, Pred, CallOpts)) { 1058 if (RD.mayHaveOtherDefinitions()) { 1059 AnalyzerOptions &Options = getAnalysisManager().options; 1060 1061 // Explore with and without inlining the call. 1062 if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) { 1063 BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); 1064 return; 1065 } 1066 1067 // Don't inline if we're not in any dynamic dispatch mode. 1068 if (Options.getIPAMode() != IPAK_DynamicDispatch) { 1069 conservativeEvalCall(*Call, Bldr, Pred, State); 1070 return; 1071 } 1072 } 1073 1074 // We are not bifurcating and we do have a Decl, so just inline. 1075 if (inlineCall(*Call, D, Bldr, Pred, State)) 1076 return; 1077 } 1078 } 1079 1080 // If we can't inline it, handle the return value and invalidate the regions. 1081 conservativeEvalCall(*Call, Bldr, Pred, State); 1082 } 1083 1084 void ExprEngine::BifurcateCall(const MemRegion *BifurReg, 1085 const CallEvent &Call, const Decl *D, 1086 NodeBuilder &Bldr, ExplodedNode *Pred) { 1087 assert(BifurReg); 1088 BifurReg = BifurReg->StripCasts(); 1089 1090 // Check if we've performed the split already - note, we only want 1091 // to split the path once per memory region. 1092 ProgramStateRef State = Pred->getState(); 1093 const unsigned *BState = 1094 State->get<DynamicDispatchBifurcationMap>(BifurReg); 1095 if (BState) { 1096 // If we are on "inline path", keep inlining if possible. 1097 if (*BState == DynamicDispatchModeInlined) 1098 if (inlineCall(Call, D, Bldr, Pred, State)) 1099 return; 1100 // If inline failed, or we are on the path where we assume we 1101 // don't have enough info about the receiver to inline, conjure the 1102 // return value and invalidate the regions. 1103 conservativeEvalCall(Call, Bldr, Pred, State); 1104 return; 1105 } 1106 1107 // If we got here, this is the first time we process a message to this 1108 // region, so split the path. 1109 ProgramStateRef IState = 1110 State->set<DynamicDispatchBifurcationMap>(BifurReg, 1111 DynamicDispatchModeInlined); 1112 inlineCall(Call, D, Bldr, Pred, IState); 1113 1114 ProgramStateRef NoIState = 1115 State->set<DynamicDispatchBifurcationMap>(BifurReg, 1116 DynamicDispatchModeConservative); 1117 conservativeEvalCall(Call, Bldr, Pred, NoIState); 1118 1119 NumOfDynamicDispatchPathSplits++; 1120 } 1121 1122 void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred, 1123 ExplodedNodeSet &Dst) { 1124 ExplodedNodeSet dstPreVisit; 1125 getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this); 1126 1127 StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx); 1128 1129 if (RS->getRetValue()) { 1130 for (ExplodedNodeSet::iterator it = dstPreVisit.begin(), 1131 ei = dstPreVisit.end(); it != ei; ++it) { 1132 B.generateNode(RS, *it, (*it)->getState()); 1133 } 1134 } 1135 } 1136