1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This pass builds the coroutine frame and outlines resume and destroy parts 9 // of the coroutine into separate functions. 10 // 11 // We present a coroutine to an LLVM as an ordinary function with suspension 12 // points marked up with intrinsics. We let the optimizer party on the coroutine 13 // as a single function for as long as possible. Shortly before the coroutine is 14 // eligible to be inlined into its callers, we split up the coroutine into parts 15 // corresponding to an initial, resume and destroy invocations of the coroutine, 16 // add them to the current SCC and restart the IPO pipeline to optimize the 17 // coroutine subfunctions we extracted before proceeding to the caller of the 18 // coroutine. 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/Transforms/Coroutines/CoroSplit.h" 22 #include "CoroInstr.h" 23 #include "CoroInternal.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/StringRef.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/Analysis/CallGraph.h" 30 #include "llvm/Analysis/CallGraphSCCPass.h" 31 #include "llvm/Analysis/LazyCallGraph.h" 32 #include "llvm/IR/Argument.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/CFG.h" 36 #include "llvm/IR/CallingConv.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/DerivedTypes.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalValue.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/IRBuilder.h" 44 #include "llvm/IR/InstIterator.h" 45 #include "llvm/IR/InstrTypes.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/IntrinsicInst.h" 49 #include "llvm/IR/LLVMContext.h" 50 #include "llvm/IR/LegacyPassManager.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Type.h" 53 #include "llvm/IR/Value.h" 54 #include "llvm/IR/Verifier.h" 55 #include "llvm/InitializePasses.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/Debug.h" 59 #include "llvm/Support/PrettyStackTrace.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Transforms/Scalar.h" 62 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 63 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 64 #include "llvm/Transforms/Utils/Cloning.h" 65 #include "llvm/Transforms/Utils/Local.h" 66 #include "llvm/Transforms/Utils/ValueMapper.h" 67 #include <cassert> 68 #include <cstddef> 69 #include <cstdint> 70 #include <initializer_list> 71 #include <iterator> 72 73 using namespace llvm; 74 75 #define DEBUG_TYPE "coro-split" 76 77 namespace { 78 79 /// A little helper class for building 80 class CoroCloner { 81 public: 82 enum class Kind { 83 /// The shared resume function for a switch lowering. 84 SwitchResume, 85 86 /// The shared unwind function for a switch lowering. 87 SwitchUnwind, 88 89 /// The shared cleanup function for a switch lowering. 90 SwitchCleanup, 91 92 /// An individual continuation function. 93 Continuation, 94 95 /// An async resume function. 96 Async, 97 }; 98 99 private: 100 Function &OrigF; 101 Function *NewF; 102 const Twine &Suffix; 103 coro::Shape &Shape; 104 Kind FKind; 105 ValueToValueMapTy VMap; 106 IRBuilder<> Builder; 107 Value *NewFramePtr = nullptr; 108 109 /// The active suspend instruction; meaningful only for continuation and async 110 /// ABIs. 111 AnyCoroSuspendInst *ActiveSuspend = nullptr; 112 113 public: 114 /// Create a cloner for a switch lowering. 115 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 116 Kind FKind) 117 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape), 118 FKind(FKind), Builder(OrigF.getContext()) { 119 assert(Shape.ABI == coro::ABI::Switch); 120 } 121 122 /// Create a cloner for a continuation lowering. 123 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 124 Function *NewF, AnyCoroSuspendInst *ActiveSuspend) 125 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape), 126 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation), 127 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) { 128 assert(Shape.ABI == coro::ABI::Retcon || 129 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async); 130 assert(NewF && "need existing function for continuation"); 131 assert(ActiveSuspend && "need active suspend point for continuation"); 132 } 133 134 Function *getFunction() const { 135 assert(NewF != nullptr && "declaration not yet set"); 136 return NewF; 137 } 138 139 void create(); 140 141 private: 142 bool isSwitchDestroyFunction() { 143 switch (FKind) { 144 case Kind::Async: 145 case Kind::Continuation: 146 case Kind::SwitchResume: 147 return false; 148 case Kind::SwitchUnwind: 149 case Kind::SwitchCleanup: 150 return true; 151 } 152 llvm_unreachable("Unknown CoroCloner::Kind enum"); 153 } 154 155 void replaceEntryBlock(); 156 Value *deriveNewFramePointer(); 157 void replaceRetconOrAsyncSuspendUses(); 158 void replaceCoroSuspends(); 159 void replaceCoroEnds(); 160 void replaceSwiftErrorOps(); 161 void salvageDebugInfo(); 162 void handleFinalSuspend(); 163 }; 164 165 } // end anonymous namespace 166 167 static void maybeFreeRetconStorage(IRBuilder<> &Builder, 168 const coro::Shape &Shape, Value *FramePtr, 169 CallGraph *CG) { 170 assert(Shape.ABI == coro::ABI::Retcon || 171 Shape.ABI == coro::ABI::RetconOnce); 172 if (Shape.RetconLowering.IsFrameInlineInStorage) 173 return; 174 175 Shape.emitDealloc(Builder, FramePtr, CG); 176 } 177 178 /// Replace an llvm.coro.end.async. 179 /// Will inline the must tail call function call if there is one. 180 /// \returns true if cleanup of the coro.end block is needed, false otherwise. 181 static bool replaceCoroEndAsync(AnyCoroEndInst *End) { 182 IRBuilder<> Builder(End); 183 184 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End); 185 if (!EndAsync) { 186 Builder.CreateRetVoid(); 187 return true /*needs cleanup of coro.end block*/; 188 } 189 190 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction(); 191 if (!MustTailCallFunc) { 192 Builder.CreateRetVoid(); 193 return true /*needs cleanup of coro.end block*/; 194 } 195 196 // Move the must tail call from the predecessor block into the end block. 197 auto *CoroEndBlock = End->getParent(); 198 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor(); 199 assert(MustTailCallFuncBlock && "Must have a single predecessor block"); 200 auto It = MustTailCallFuncBlock->getTerminator()->getIterator(); 201 auto *MustTailCall = cast<CallInst>(&*std::prev(It)); 202 CoroEndBlock->getInstList().splice( 203 End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall); 204 205 // Insert the return instruction. 206 Builder.SetInsertPoint(End); 207 Builder.CreateRetVoid(); 208 InlineFunctionInfo FnInfo; 209 210 // Remove the rest of the block, by splitting it into an unreachable block. 211 auto *BB = End->getParent(); 212 BB->splitBasicBlock(End); 213 BB->getTerminator()->eraseFromParent(); 214 215 auto InlineRes = InlineFunction(*MustTailCall, FnInfo); 216 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 217 (void)InlineRes; 218 219 // We have cleaned up the coro.end block above. 220 return false; 221 } 222 223 /// Replace a non-unwind call to llvm.coro.end. 224 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, 225 const coro::Shape &Shape, Value *FramePtr, 226 bool InResume, CallGraph *CG) { 227 // Start inserting right before the coro.end. 228 IRBuilder<> Builder(End); 229 230 // Create the return instruction. 231 switch (Shape.ABI) { 232 // The cloned functions in switch-lowering always return void. 233 case coro::ABI::Switch: 234 // coro.end doesn't immediately end the coroutine in the main function 235 // in this lowering, because we need to deallocate the coroutine. 236 if (!InResume) 237 return; 238 Builder.CreateRetVoid(); 239 break; 240 241 // In async lowering this returns. 242 case coro::ABI::Async: { 243 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End); 244 if (!CoroEndBlockNeedsCleanup) 245 return; 246 break; 247 } 248 249 // In unique continuation lowering, the continuations always return void. 250 // But we may have implicitly allocated storage. 251 case coro::ABI::RetconOnce: 252 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 253 Builder.CreateRetVoid(); 254 break; 255 256 // In non-unique continuation lowering, we signal completion by returning 257 // a null continuation. 258 case coro::ABI::Retcon: { 259 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 260 auto RetTy = Shape.getResumeFunctionType()->getReturnType(); 261 auto RetStructTy = dyn_cast<StructType>(RetTy); 262 PointerType *ContinuationTy = 263 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy); 264 265 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy); 266 if (RetStructTy) { 267 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy), 268 ReturnValue, 0); 269 } 270 Builder.CreateRet(ReturnValue); 271 break; 272 } 273 } 274 275 // Remove the rest of the block, by splitting it into an unreachable block. 276 auto *BB = End->getParent(); 277 BB->splitBasicBlock(End); 278 BB->getTerminator()->eraseFromParent(); 279 } 280 281 /// Replace an unwind call to llvm.coro.end. 282 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 283 Value *FramePtr, bool InResume, 284 CallGraph *CG) { 285 IRBuilder<> Builder(End); 286 287 switch (Shape.ABI) { 288 // In switch-lowering, this does nothing in the main function. 289 case coro::ABI::Switch: 290 if (!InResume) 291 return; 292 break; 293 // In async lowering this does nothing. 294 case coro::ABI::Async: 295 break; 296 // In continuation-lowering, this frees the continuation storage. 297 case coro::ABI::Retcon: 298 case coro::ABI::RetconOnce: 299 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 300 break; 301 } 302 303 // If coro.end has an associated bundle, add cleanupret instruction. 304 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) { 305 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]); 306 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr); 307 End->getParent()->splitBasicBlock(End); 308 CleanupRet->getParent()->getTerminator()->eraseFromParent(); 309 } 310 } 311 312 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 313 Value *FramePtr, bool InResume, CallGraph *CG) { 314 if (End->isUnwind()) 315 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG); 316 else 317 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG); 318 319 auto &Context = End->getContext(); 320 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context) 321 : ConstantInt::getFalse(Context)); 322 End->eraseFromParent(); 323 } 324 325 // Create an entry block for a resume function with a switch that will jump to 326 // suspend points. 327 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) { 328 assert(Shape.ABI == coro::ABI::Switch); 329 LLVMContext &C = F.getContext(); 330 331 // resume.entry: 332 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, 333 // i32 2 334 // % index = load i32, i32* %index.addr 335 // switch i32 %index, label %unreachable [ 336 // i32 0, label %resume.0 337 // i32 1, label %resume.1 338 // ... 339 // ] 340 341 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F); 342 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F); 343 344 IRBuilder<> Builder(NewEntry); 345 auto *FramePtr = Shape.FramePtr; 346 auto *FrameTy = Shape.FrameTy; 347 auto *GepIndex = Builder.CreateStructGEP( 348 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 349 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index"); 350 auto *Switch = 351 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size()); 352 Shape.SwitchLowering.ResumeSwitch = Switch; 353 354 size_t SuspendIndex = 0; 355 for (auto *AnyS : Shape.CoroSuspends) { 356 auto *S = cast<CoroSuspendInst>(AnyS); 357 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex); 358 359 // Replace CoroSave with a store to Index: 360 // %index.addr = getelementptr %f.frame... (index field number) 361 // store i32 0, i32* %index.addr1 362 auto *Save = S->getCoroSave(); 363 Builder.SetInsertPoint(Save); 364 if (S->isFinal()) { 365 // Final suspend point is represented by storing zero in ResumeFnAddr. 366 auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr, 367 coro::Shape::SwitchFieldIndex::Resume, 368 "ResumeFn.addr"); 369 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>( 370 cast<PointerType>(GepIndex->getType())->getElementType())); 371 Builder.CreateStore(NullPtr, GepIndex); 372 } else { 373 auto *GepIndex = Builder.CreateStructGEP( 374 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 375 Builder.CreateStore(IndexVal, GepIndex); 376 } 377 Save->replaceAllUsesWith(ConstantTokenNone::get(C)); 378 Save->eraseFromParent(); 379 380 // Split block before and after coro.suspend and add a jump from an entry 381 // switch: 382 // 383 // whateverBB: 384 // whatever 385 // %0 = call i8 @llvm.coro.suspend(token none, i1 false) 386 // switch i8 %0, label %suspend[i8 0, label %resume 387 // i8 1, label %cleanup] 388 // becomes: 389 // 390 // whateverBB: 391 // whatever 392 // br label %resume.0.landing 393 // 394 // resume.0: ; <--- jump from the switch in the resume.entry 395 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false) 396 // br label %resume.0.landing 397 // 398 // resume.0.landing: 399 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0] 400 // switch i8 % 1, label %suspend [i8 0, label %resume 401 // i8 1, label %cleanup] 402 403 auto *SuspendBB = S->getParent(); 404 auto *ResumeBB = 405 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex)); 406 auto *LandingBB = ResumeBB->splitBasicBlock( 407 S->getNextNode(), ResumeBB->getName() + Twine(".landing")); 408 Switch->addCase(IndexVal, ResumeBB); 409 410 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB); 411 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front()); 412 S->replaceAllUsesWith(PN); 413 PN->addIncoming(Builder.getInt8(-1), SuspendBB); 414 PN->addIncoming(S, ResumeBB); 415 416 ++SuspendIndex; 417 } 418 419 Builder.SetInsertPoint(UnreachBB); 420 Builder.CreateUnreachable(); 421 422 Shape.SwitchLowering.ResumeEntryBlock = NewEntry; 423 } 424 425 426 // Rewrite final suspend point handling. We do not use suspend index to 427 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the 428 // coroutine frame, since it is undefined behavior to resume a coroutine 429 // suspended at the final suspend point. Thus, in the resume function, we can 430 // simply remove the last case (when coro::Shape is built, the final suspend 431 // point (if present) is always the last element of CoroSuspends array). 432 // In the destroy function, we add a code sequence to check if ResumeFnAddress 433 // is Null, and if so, jump to the appropriate label to handle cleanup from the 434 // final suspend point. 435 void CoroCloner::handleFinalSuspend() { 436 assert(Shape.ABI == coro::ABI::Switch && 437 Shape.SwitchLowering.HasFinalSuspend); 438 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]); 439 auto FinalCaseIt = std::prev(Switch->case_end()); 440 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor(); 441 Switch->removeCase(FinalCaseIt); 442 if (isSwitchDestroyFunction()) { 443 BasicBlock *OldSwitchBB = Switch->getParent(); 444 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch"); 445 Builder.SetInsertPoint(OldSwitchBB->getTerminator()); 446 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr, 447 coro::Shape::SwitchFieldIndex::Resume, 448 "ResumeFn.addr"); 449 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(), 450 GepIndex); 451 auto *Cond = Builder.CreateIsNull(Load); 452 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB); 453 OldSwitchBB->getTerminator()->eraseFromParent(); 454 } 455 } 456 457 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape, 458 const Twine &Suffix, 459 Module::iterator InsertBefore) { 460 Module *M = OrigF.getParent(); 461 auto *FnTy = Shape.getResumeFunctionType(); 462 463 Function *NewF = 464 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage, 465 OrigF.getName() + Suffix); 466 NewF->addParamAttr(0, Attribute::NonNull); 467 468 // For the async lowering ABI we can't guarantee that the context argument is 469 // not access via a different pointer not based on the argument. 470 if (Shape.ABI != coro::ABI::Async) 471 NewF->addParamAttr(0, Attribute::NoAlias); 472 473 M->getFunctionList().insert(InsertBefore, NewF); 474 475 return NewF; 476 } 477 478 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the 479 /// arguments to the continuation function. 480 /// 481 /// This assumes that the builder has a meaningful insertion point. 482 void CoroCloner::replaceRetconOrAsyncSuspendUses() { 483 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 484 Shape.ABI == coro::ABI::Async); 485 486 auto NewS = VMap[ActiveSuspend]; 487 if (NewS->use_empty()) return; 488 489 // Copy out all the continuation arguments after the buffer pointer into 490 // an easily-indexed data structure for convenience. 491 SmallVector<Value*, 8> Args; 492 // The async ABI includes all arguments -- including the first argument. 493 bool IsAsyncABI = Shape.ABI == coro::ABI::Async; 494 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()), 495 E = NewF->arg_end(); 496 I != E; ++I) 497 Args.push_back(&*I); 498 499 // If the suspend returns a single scalar value, we can just do a simple 500 // replacement. 501 if (!isa<StructType>(NewS->getType())) { 502 assert(Args.size() == 1); 503 NewS->replaceAllUsesWith(Args.front()); 504 return; 505 } 506 507 // Try to peephole extracts of an aggregate return. 508 for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) { 509 auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser()); 510 if (!EVI || EVI->getNumIndices() != 1) 511 continue; 512 513 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]); 514 EVI->eraseFromParent(); 515 } 516 517 // If we have no remaining uses, we're done. 518 if (NewS->use_empty()) return; 519 520 // Otherwise, we need to create an aggregate. 521 Value *Agg = UndefValue::get(NewS->getType()); 522 for (size_t I = 0, E = Args.size(); I != E; ++I) 523 Agg = Builder.CreateInsertValue(Agg, Args[I], I); 524 525 NewS->replaceAllUsesWith(Agg); 526 } 527 528 void CoroCloner::replaceCoroSuspends() { 529 Value *SuspendResult; 530 531 switch (Shape.ABI) { 532 // In switch lowering, replace coro.suspend with the appropriate value 533 // for the type of function we're extracting. 534 // Replacing coro.suspend with (0) will result in control flow proceeding to 535 // a resume label associated with a suspend point, replacing it with (1) will 536 // result in control flow proceeding to a cleanup label associated with this 537 // suspend point. 538 case coro::ABI::Switch: 539 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0); 540 break; 541 542 // In async lowering there are no uses of the result. 543 case coro::ABI::Async: 544 return; 545 546 // In returned-continuation lowering, the arguments from earlier 547 // continuations are theoretically arbitrary, and they should have been 548 // spilled. 549 case coro::ABI::RetconOnce: 550 case coro::ABI::Retcon: 551 return; 552 } 553 554 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) { 555 // The active suspend was handled earlier. 556 if (CS == ActiveSuspend) continue; 557 558 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]); 559 MappedCS->replaceAllUsesWith(SuspendResult); 560 MappedCS->eraseFromParent(); 561 } 562 } 563 564 void CoroCloner::replaceCoroEnds() { 565 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 566 // We use a null call graph because there's no call graph node for 567 // the cloned function yet. We'll just be rebuilding that later. 568 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]); 569 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr); 570 } 571 } 572 573 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, 574 ValueToValueMapTy *VMap) { 575 Value *CachedSlot = nullptr; 576 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * { 577 if (CachedSlot) { 578 assert(CachedSlot->getType()->getPointerElementType() == ValueTy && 579 "multiple swifterror slots in function with different types"); 580 return CachedSlot; 581 } 582 583 // Check if the function has a swifterror argument. 584 for (auto &Arg : F.args()) { 585 if (Arg.isSwiftError()) { 586 CachedSlot = &Arg; 587 assert(Arg.getType()->getPointerElementType() == ValueTy && 588 "swifterror argument does not have expected type"); 589 return &Arg; 590 } 591 } 592 593 // Create a swifterror alloca. 594 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 595 auto Alloca = Builder.CreateAlloca(ValueTy); 596 Alloca->setSwiftError(true); 597 598 CachedSlot = Alloca; 599 return Alloca; 600 }; 601 602 for (CallInst *Op : Shape.SwiftErrorOps) { 603 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op; 604 IRBuilder<> Builder(MappedOp); 605 606 // If there are no arguments, this is a 'get' operation. 607 Value *MappedResult; 608 if (Op->getNumArgOperands() == 0) { 609 auto ValueTy = Op->getType(); 610 auto Slot = getSwiftErrorSlot(ValueTy); 611 MappedResult = Builder.CreateLoad(ValueTy, Slot); 612 } else { 613 assert(Op->getNumArgOperands() == 1); 614 auto Value = MappedOp->getArgOperand(0); 615 auto ValueTy = Value->getType(); 616 auto Slot = getSwiftErrorSlot(ValueTy); 617 Builder.CreateStore(Value, Slot); 618 MappedResult = Slot; 619 } 620 621 MappedOp->replaceAllUsesWith(MappedResult); 622 MappedOp->eraseFromParent(); 623 } 624 625 // If we're updating the original function, we've invalidated SwiftErrorOps. 626 if (VMap == nullptr) { 627 Shape.SwiftErrorOps.clear(); 628 } 629 } 630 631 void CoroCloner::replaceSwiftErrorOps() { 632 ::replaceSwiftErrorOps(*NewF, Shape, &VMap); 633 } 634 635 void CoroCloner::salvageDebugInfo() { 636 SmallVector<DbgDeclareInst *, 8> Worklist; 637 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 638 for (auto &BB : *NewF) 639 for (auto &I : BB) 640 if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) 641 Worklist.push_back(DDI); 642 for (DbgDeclareInst *DDI : Worklist) { 643 // This is a heuristic that detects declares left by CoroFrame. 644 bool LoadFromFramePtr = !isa<AllocaInst>(DDI->getAddress()); 645 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, LoadFromFramePtr); 646 } 647 // Remove all salvaged dbg.declare intrinsics that became 648 // either unreachable or stale due to the CoroSplit transformation. 649 auto IsUnreachableBlock = [&](BasicBlock *BB) { 650 return BB->hasNPredecessors(0) && BB != &NewF->getEntryBlock(); 651 }; 652 for (DbgDeclareInst *DDI : Worklist) { 653 if (IsUnreachableBlock(DDI->getParent())) 654 DDI->eraseFromParent(); 655 else if (auto *Alloca = dyn_cast_or_null<AllocaInst>(DDI->getAddress())) { 656 // Count all non-debuginfo uses in reachable blocks. 657 unsigned Uses = 0; 658 for (auto *User : DDI->getAddress()->users()) 659 if (auto *I = dyn_cast<Instruction>(User)) 660 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent())) 661 ++Uses; 662 if (!Uses) 663 DDI->eraseFromParent(); 664 } 665 } 666 } 667 668 void CoroCloner::replaceEntryBlock() { 669 // In the original function, the AllocaSpillBlock is a block immediately 670 // following the allocation of the frame object which defines GEPs for 671 // all the allocas that have been moved into the frame, and it ends by 672 // branching to the original beginning of the coroutine. Make this 673 // the entry block of the cloned function. 674 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]); 675 auto *OldEntry = &NewF->getEntryBlock(); 676 Entry->setName("entry" + Suffix); 677 Entry->moveBefore(OldEntry); 678 Entry->getTerminator()->eraseFromParent(); 679 680 // Clear all predecessors of the new entry block. There should be 681 // exactly one predecessor, which we created when splitting out 682 // AllocaSpillBlock to begin with. 683 assert(Entry->hasOneUse()); 684 auto BranchToEntry = cast<BranchInst>(Entry->user_back()); 685 assert(BranchToEntry->isUnconditional()); 686 Builder.SetInsertPoint(BranchToEntry); 687 Builder.CreateUnreachable(); 688 BranchToEntry->eraseFromParent(); 689 690 // Branch from the entry to the appropriate place. 691 Builder.SetInsertPoint(Entry); 692 switch (Shape.ABI) { 693 case coro::ABI::Switch: { 694 // In switch-lowering, we built a resume-entry block in the original 695 // function. Make the entry block branch to this. 696 auto *SwitchBB = 697 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]); 698 Builder.CreateBr(SwitchBB); 699 break; 700 } 701 case coro::ABI::Async: 702 case coro::ABI::Retcon: 703 case coro::ABI::RetconOnce: { 704 // In continuation ABIs, we want to branch to immediately after the 705 // active suspend point. Earlier phases will have put the suspend in its 706 // own basic block, so just thread our jump directly to its successor. 707 assert((Shape.ABI == coro::ABI::Async && 708 isa<CoroSuspendAsyncInst>(ActiveSuspend)) || 709 ((Shape.ABI == coro::ABI::Retcon || 710 Shape.ABI == coro::ABI::RetconOnce) && 711 isa<CoroSuspendRetconInst>(ActiveSuspend))); 712 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]); 713 auto Branch = cast<BranchInst>(MappedCS->getNextNode()); 714 assert(Branch->isUnconditional()); 715 Builder.CreateBr(Branch->getSuccessor(0)); 716 break; 717 } 718 } 719 720 // Any alloca that's still being used but not reachable from the new entry 721 // needs to be moved to the new entry. 722 Function *F = OldEntry->getParent(); 723 DominatorTree DT{*F}; 724 for (auto IT = inst_begin(F), End = inst_end(F); IT != End;) { 725 Instruction &I = *IT++; 726 if (!isa<AllocaInst>(&I) || I.use_empty()) 727 continue; 728 if (DT.isReachableFromEntry(I.getParent())) 729 continue; 730 I.moveBefore(*Entry, Entry->getFirstInsertionPt()); 731 } 732 } 733 734 /// Derive the value of the new frame pointer. 735 Value *CoroCloner::deriveNewFramePointer() { 736 // Builder should be inserting to the front of the new entry block. 737 738 switch (Shape.ABI) { 739 // In switch-lowering, the argument is the frame pointer. 740 case coro::ABI::Switch: 741 return &*NewF->arg_begin(); 742 // In async-lowering, one of the arguments is an async context as determined 743 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of 744 // the resume function from the async context projection function associated 745 // with the active suspend. The frame is located as a tail to the async 746 // context header. 747 case coro::ABI::Async: { 748 auto *CalleeContext = NewF->getArg(Shape.AsyncLowering.ContextArgNo); 749 auto *FramePtrTy = Shape.FrameTy->getPointerTo(); 750 auto *ProjectionFunc = cast<CoroSuspendAsyncInst>(ActiveSuspend) 751 ->getAsyncContextProjectionFunction(); 752 auto DbgLoc = 753 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc(); 754 // Calling i8* (i8*) 755 auto *CallerContext = Builder.CreateCall( 756 cast<FunctionType>(ProjectionFunc->getType()->getPointerElementType()), 757 ProjectionFunc, CalleeContext); 758 CallerContext->setCallingConv(ProjectionFunc->getCallingConv()); 759 CallerContext->setDebugLoc(DbgLoc); 760 // The frame is located after the async_context header. 761 auto &Context = Builder.getContext(); 762 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32( 763 Type::getInt8Ty(Context), CallerContext, 764 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr"); 765 // Inline the projection function. 766 InlineFunctionInfo InlineInfo; 767 auto InlineRes = InlineFunction(*CallerContext, InlineInfo); 768 assert(InlineRes.isSuccess()); 769 (void)InlineRes; 770 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy); 771 } 772 // In continuation-lowering, the argument is the opaque storage. 773 case coro::ABI::Retcon: 774 case coro::ABI::RetconOnce: { 775 Argument *NewStorage = &*NewF->arg_begin(); 776 auto FramePtrTy = Shape.FrameTy->getPointerTo(); 777 778 // If the storage is inline, just bitcast to the storage to the frame type. 779 if (Shape.RetconLowering.IsFrameInlineInStorage) 780 return Builder.CreateBitCast(NewStorage, FramePtrTy); 781 782 // Otherwise, load the real frame from the opaque storage. 783 auto FramePtrPtr = 784 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo()); 785 return Builder.CreateLoad(FramePtrTy, FramePtrPtr); 786 } 787 } 788 llvm_unreachable("bad ABI"); 789 } 790 791 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, 792 unsigned ParamIndex, 793 uint64_t Size, Align Alignment) { 794 AttrBuilder ParamAttrs; 795 ParamAttrs.addAttribute(Attribute::NonNull); 796 ParamAttrs.addAttribute(Attribute::NoAlias); 797 ParamAttrs.addAlignmentAttr(Alignment); 798 ParamAttrs.addDereferenceableAttr(Size); 799 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 800 } 801 802 /// Clone the body of the original function into a resume function of 803 /// some sort. 804 void CoroCloner::create() { 805 // Create the new function if we don't already have one. 806 if (!NewF) { 807 NewF = createCloneDeclaration(OrigF, Shape, Suffix, 808 OrigF.getParent()->end()); 809 } 810 811 // Replace all args with undefs. The buildCoroutineFrame algorithm already 812 // rewritten access to the args that occurs after suspend points with loads 813 // and stores to/from the coroutine frame. 814 for (Argument &A : OrigF.args()) 815 VMap[&A] = UndefValue::get(A.getType()); 816 817 SmallVector<ReturnInst *, 4> Returns; 818 819 // Ignore attempts to change certain attributes of the function. 820 // TODO: maybe there should be a way to suppress this during cloning? 821 auto savedVisibility = NewF->getVisibility(); 822 auto savedUnnamedAddr = NewF->getUnnamedAddr(); 823 auto savedDLLStorageClass = NewF->getDLLStorageClass(); 824 825 // NewF's linkage (which CloneFunctionInto does *not* change) might not 826 // be compatible with the visibility of OrigF (which it *does* change), 827 // so protect against that. 828 auto savedLinkage = NewF->getLinkage(); 829 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage); 830 831 CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns); 832 833 NewF->setLinkage(savedLinkage); 834 NewF->setVisibility(savedVisibility); 835 NewF->setUnnamedAddr(savedUnnamedAddr); 836 NewF->setDLLStorageClass(savedDLLStorageClass); 837 838 auto &Context = NewF->getContext(); 839 840 // Replace the attributes of the new function: 841 auto OrigAttrs = NewF->getAttributes(); 842 auto NewAttrs = AttributeList(); 843 844 switch (Shape.ABI) { 845 case coro::ABI::Switch: 846 // Bootstrap attributes by copying function attributes from the 847 // original function. This should include optimization settings and so on. 848 NewAttrs = NewAttrs.addAttributes(Context, AttributeList::FunctionIndex, 849 OrigAttrs.getFnAttributes()); 850 851 addFramePointerAttrs(NewAttrs, Context, 0, 852 Shape.FrameSize, Shape.FrameAlign); 853 break; 854 case coro::ABI::Async: 855 break; 856 case coro::ABI::Retcon: 857 case coro::ABI::RetconOnce: 858 // If we have a continuation prototype, just use its attributes, 859 // full-stop. 860 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes(); 861 862 addFramePointerAttrs(NewAttrs, Context, 0, 863 Shape.getRetconCoroId()->getStorageSize(), 864 Shape.getRetconCoroId()->getStorageAlignment()); 865 break; 866 } 867 868 switch (Shape.ABI) { 869 // In these ABIs, the cloned functions always return 'void', and the 870 // existing return sites are meaningless. Note that for unique 871 // continuations, this includes the returns associated with suspends; 872 // this is fine because we can't suspend twice. 873 case coro::ABI::Switch: 874 case coro::ABI::RetconOnce: 875 // Remove old returns. 876 for (ReturnInst *Return : Returns) 877 changeToUnreachable(Return, /*UseLLVMTrap=*/false); 878 break; 879 880 // With multi-suspend continuations, we'll already have eliminated the 881 // original returns and inserted returns before all the suspend points, 882 // so we want to leave any returns in place. 883 case coro::ABI::Retcon: 884 break; 885 // Async lowering will insert musttail call functions at all suspend points 886 // followed by a return. 887 // Don't change returns to unreachable because that will trip up the verifier. 888 // These returns should be unreachable from the clone. 889 case coro::ABI::Async: 890 break; 891 } 892 893 NewF->setAttributes(NewAttrs); 894 NewF->setCallingConv(Shape.getResumeFunctionCC()); 895 896 // Set up the new entry block. 897 replaceEntryBlock(); 898 899 Builder.SetInsertPoint(&NewF->getEntryBlock().front()); 900 NewFramePtr = deriveNewFramePointer(); 901 902 // Remap frame pointer. 903 Value *OldFramePtr = VMap[Shape.FramePtr]; 904 NewFramePtr->takeName(OldFramePtr); 905 OldFramePtr->replaceAllUsesWith(NewFramePtr); 906 907 // Remap vFrame pointer. 908 auto *NewVFrame = Builder.CreateBitCast( 909 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame"); 910 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]); 911 OldVFrame->replaceAllUsesWith(NewVFrame); 912 913 switch (Shape.ABI) { 914 case coro::ABI::Switch: 915 // Rewrite final suspend handling as it is not done via switch (allows to 916 // remove final case from the switch, since it is undefined behavior to 917 // resume the coroutine suspended at the final suspend point. 918 if (Shape.SwitchLowering.HasFinalSuspend) 919 handleFinalSuspend(); 920 break; 921 case coro::ABI::Async: 922 case coro::ABI::Retcon: 923 case coro::ABI::RetconOnce: 924 // Replace uses of the active suspend with the corresponding 925 // continuation-function arguments. 926 assert(ActiveSuspend != nullptr && 927 "no active suspend when lowering a continuation-style coroutine"); 928 replaceRetconOrAsyncSuspendUses(); 929 break; 930 } 931 932 // Handle suspends. 933 replaceCoroSuspends(); 934 935 // Handle swifterror. 936 replaceSwiftErrorOps(); 937 938 // Remove coro.end intrinsics. 939 replaceCoroEnds(); 940 941 // Salvage debug info that points into the coroutine frame. 942 salvageDebugInfo(); 943 944 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup, 945 // to suppress deallocation code. 946 if (Shape.ABI == coro::ABI::Switch) 947 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]), 948 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup); 949 } 950 951 // Create a resume clone by cloning the body of the original function, setting 952 // new entry block and replacing coro.suspend an appropriate value to force 953 // resume or cleanup pass for every suspend point. 954 static Function *createClone(Function &F, const Twine &Suffix, 955 coro::Shape &Shape, CoroCloner::Kind FKind) { 956 CoroCloner Cloner(F, Suffix, Shape, FKind); 957 Cloner.create(); 958 return Cloner.getFunction(); 959 } 960 961 /// Remove calls to llvm.coro.end in the original function. 962 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) { 963 for (auto End : Shape.CoroEnds) { 964 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG); 965 } 966 } 967 968 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) { 969 assert(Shape.ABI == coro::ABI::Async); 970 971 auto *FuncPtrStruct = cast<ConstantStruct>( 972 Shape.AsyncLowering.AsyncFuncPointer->getInitializer()); 973 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0); 974 auto *OrigContextSize = FuncPtrStruct->getOperand(1); 975 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(), 976 Shape.AsyncLowering.ContextSize); 977 auto *NewFuncPtrStruct = ConstantStruct::get( 978 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize); 979 980 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct); 981 } 982 983 static void replaceFrameSize(coro::Shape &Shape) { 984 if (Shape.ABI == coro::ABI::Async) 985 updateAsyncFuncPointerContextSize(Shape); 986 987 if (Shape.CoroSizes.empty()) 988 return; 989 990 // In the same function all coro.sizes should have the same result type. 991 auto *SizeIntrin = Shape.CoroSizes.back(); 992 Module *M = SizeIntrin->getModule(); 993 const DataLayout &DL = M->getDataLayout(); 994 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 995 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size); 996 997 for (CoroSizeInst *CS : Shape.CoroSizes) { 998 CS->replaceAllUsesWith(SizeConstant); 999 CS->eraseFromParent(); 1000 } 1001 } 1002 1003 // Create a global constant array containing pointers to functions provided and 1004 // set Info parameter of CoroBegin to point at this constant. Example: 1005 // 1006 // @f.resumers = internal constant [2 x void(%f.frame*)*] 1007 // [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy] 1008 // define void @f() { 1009 // ... 1010 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null, 1011 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*)) 1012 // 1013 // Assumes that all the functions have the same signature. 1014 static void setCoroInfo(Function &F, coro::Shape &Shape, 1015 ArrayRef<Function *> Fns) { 1016 // This only works under the switch-lowering ABI because coro elision 1017 // only works on the switch-lowering ABI. 1018 assert(Shape.ABI == coro::ABI::Switch); 1019 1020 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end()); 1021 assert(!Args.empty()); 1022 Function *Part = *Fns.begin(); 1023 Module *M = Part->getParent(); 1024 auto *ArrTy = ArrayType::get(Part->getType(), Args.size()); 1025 1026 auto *ConstVal = ConstantArray::get(ArrTy, Args); 1027 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true, 1028 GlobalVariable::PrivateLinkage, ConstVal, 1029 F.getName() + Twine(".resumers")); 1030 1031 // Update coro.begin instruction to refer to this constant. 1032 LLVMContext &C = F.getContext(); 1033 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C)); 1034 Shape.getSwitchCoroId()->setInfo(BC); 1035 } 1036 1037 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame. 1038 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn, 1039 Function *DestroyFn, Function *CleanupFn) { 1040 assert(Shape.ABI == coro::ABI::Switch); 1041 1042 IRBuilder<> Builder(Shape.FramePtr->getNextNode()); 1043 auto *ResumeAddr = Builder.CreateStructGEP( 1044 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume, 1045 "resume.addr"); 1046 Builder.CreateStore(ResumeFn, ResumeAddr); 1047 1048 Value *DestroyOrCleanupFn = DestroyFn; 1049 1050 CoroIdInst *CoroId = Shape.getSwitchCoroId(); 1051 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) { 1052 // If there is a CoroAlloc and it returns false (meaning we elide the 1053 // allocation, use CleanupFn instead of DestroyFn). 1054 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn); 1055 } 1056 1057 auto *DestroyAddr = Builder.CreateStructGEP( 1058 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy, 1059 "destroy.addr"); 1060 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr); 1061 } 1062 1063 static void postSplitCleanup(Function &F) { 1064 removeUnreachableBlocks(F); 1065 1066 // For now, we do a mandatory verification step because we don't 1067 // entirely trust this pass. Note that we don't want to add a verifier 1068 // pass to FPM below because it will also verify all the global data. 1069 if (verifyFunction(F, &errs())) 1070 report_fatal_error("Broken function"); 1071 1072 legacy::FunctionPassManager FPM(F.getParent()); 1073 1074 FPM.add(createSCCPPass()); 1075 FPM.add(createCFGSimplificationPass()); 1076 FPM.add(createEarlyCSEPass()); 1077 FPM.add(createCFGSimplificationPass()); 1078 1079 FPM.doInitialization(); 1080 FPM.run(F); 1081 FPM.doFinalization(); 1082 } 1083 1084 // Assuming we arrived at the block NewBlock from Prev instruction, store 1085 // PHI's incoming values in the ResolvedValues map. 1086 static void 1087 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock, 1088 DenseMap<Value *, Value *> &ResolvedValues) { 1089 auto *PrevBB = Prev->getParent(); 1090 for (PHINode &PN : NewBlock->phis()) { 1091 auto V = PN.getIncomingValueForBlock(PrevBB); 1092 // See if we already resolved it. 1093 auto VI = ResolvedValues.find(V); 1094 if (VI != ResolvedValues.end()) 1095 V = VI->second; 1096 // Remember the value. 1097 ResolvedValues[&PN] = V; 1098 } 1099 } 1100 1101 // Replace a sequence of branches leading to a ret, with a clone of a ret 1102 // instruction. Suspend instruction represented by a switch, track the PHI 1103 // values and select the correct case successor when possible. 1104 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) { 1105 DenseMap<Value *, Value *> ResolvedValues; 1106 BasicBlock *UnconditionalSucc = nullptr; 1107 1108 Instruction *I = InitialInst; 1109 while (I->isTerminator() || 1110 (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) { 1111 if (isa<ReturnInst>(I)) { 1112 if (I != InitialInst) { 1113 // If InitialInst is an unconditional branch, 1114 // remove PHI values that come from basic block of InitialInst 1115 if (UnconditionalSucc) 1116 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true); 1117 ReplaceInstWithInst(InitialInst, I->clone()); 1118 } 1119 return true; 1120 } 1121 if (auto *BR = dyn_cast<BranchInst>(I)) { 1122 if (BR->isUnconditional()) { 1123 BasicBlock *BB = BR->getSuccessor(0); 1124 if (I == InitialInst) 1125 UnconditionalSucc = BB; 1126 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1127 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1128 continue; 1129 } 1130 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) { 1131 auto *BR = dyn_cast<BranchInst>(I->getNextNode()); 1132 if (BR && BR->isConditional() && CondCmp == BR->getCondition()) { 1133 // If the case number of suspended switch instruction is reduced to 1134 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator. 1135 // And the comparsion looks like : %cond = icmp eq i8 %V, constant. 1136 ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1)); 1137 if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) { 1138 Value *V = CondCmp->getOperand(0); 1139 auto it = ResolvedValues.find(V); 1140 if (it != ResolvedValues.end()) 1141 V = it->second; 1142 1143 if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) { 1144 BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue()) 1145 ? BR->getSuccessor(0) 1146 : BR->getSuccessor(1); 1147 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1148 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1149 continue; 1150 } 1151 } 1152 } 1153 } else if (auto *SI = dyn_cast<SwitchInst>(I)) { 1154 Value *V = SI->getCondition(); 1155 auto it = ResolvedValues.find(V); 1156 if (it != ResolvedValues.end()) 1157 V = it->second; 1158 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1159 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor(); 1160 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1161 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1162 continue; 1163 } 1164 } 1165 return false; 1166 } 1167 return false; 1168 } 1169 1170 // Check whether CI obeys the rules of musttail attribute. 1171 static bool shouldBeMustTail(const CallInst &CI, const Function &F) { 1172 if (CI.isInlineAsm()) 1173 return false; 1174 1175 // Match prototypes and calling conventions of resume function. 1176 FunctionType *CalleeTy = CI.getFunctionType(); 1177 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1)) 1178 return false; 1179 1180 Type *CalleeParmTy = CalleeTy->getParamType(0); 1181 if (!CalleeParmTy->isPointerTy() || 1182 (CalleeParmTy->getPointerAddressSpace() != 0)) 1183 return false; 1184 1185 if (CI.getCallingConv() != F.getCallingConv()) 1186 return false; 1187 1188 // CI should not has any ABI-impacting function attributes. 1189 static const Attribute::AttrKind ABIAttrs[] = { 1190 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 1191 Attribute::Preallocated, Attribute::InReg, Attribute::Returned, 1192 Attribute::SwiftSelf, Attribute::SwiftError}; 1193 AttributeList Attrs = CI.getAttributes(); 1194 for (auto AK : ABIAttrs) 1195 if (Attrs.hasParamAttribute(0, AK)) 1196 return false; 1197 1198 return true; 1199 } 1200 1201 // Add musttail to any resume instructions that is immediately followed by a 1202 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call 1203 // for symmetrical coroutine control transfer (C++ Coroutines TS extension). 1204 // This transformation is done only in the resume part of the coroutine that has 1205 // identical signature and calling convention as the coro.resume call. 1206 static void addMustTailToCoroResumes(Function &F) { 1207 bool changed = false; 1208 1209 // Collect potential resume instructions. 1210 SmallVector<CallInst *, 4> Resumes; 1211 for (auto &I : instructions(F)) 1212 if (auto *Call = dyn_cast<CallInst>(&I)) 1213 if (shouldBeMustTail(*Call, F)) 1214 Resumes.push_back(Call); 1215 1216 // Set musttail on those that are followed by a ret instruction. 1217 for (CallInst *Call : Resumes) 1218 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) { 1219 Call->setTailCallKind(CallInst::TCK_MustTail); 1220 changed = true; 1221 } 1222 1223 if (changed) 1224 removeUnreachableBlocks(F); 1225 } 1226 1227 // Coroutine has no suspend points. Remove heap allocation for the coroutine 1228 // frame if possible. 1229 static void handleNoSuspendCoroutine(coro::Shape &Shape) { 1230 auto *CoroBegin = Shape.CoroBegin; 1231 auto *CoroId = CoroBegin->getId(); 1232 auto *AllocInst = CoroId->getCoroAlloc(); 1233 switch (Shape.ABI) { 1234 case coro::ABI::Switch: { 1235 auto SwitchId = cast<CoroIdInst>(CoroId); 1236 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr); 1237 if (AllocInst) { 1238 IRBuilder<> Builder(AllocInst); 1239 auto *Frame = Builder.CreateAlloca(Shape.FrameTy); 1240 Frame->setAlignment(Shape.FrameAlign); 1241 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy()); 1242 AllocInst->replaceAllUsesWith(Builder.getFalse()); 1243 AllocInst->eraseFromParent(); 1244 CoroBegin->replaceAllUsesWith(VFrame); 1245 } else { 1246 CoroBegin->replaceAllUsesWith(CoroBegin->getMem()); 1247 } 1248 break; 1249 } 1250 case coro::ABI::Async: 1251 case coro::ABI::Retcon: 1252 case coro::ABI::RetconOnce: 1253 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType())); 1254 break; 1255 } 1256 1257 CoroBegin->eraseFromParent(); 1258 } 1259 1260 // SimplifySuspendPoint needs to check that there is no calls between 1261 // coro_save and coro_suspend, since any of the calls may potentially resume 1262 // the coroutine and if that is the case we cannot eliminate the suspend point. 1263 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) { 1264 for (Instruction *I = From; I != To; I = I->getNextNode()) { 1265 // Assume that no intrinsic can resume the coroutine. 1266 if (isa<IntrinsicInst>(I)) 1267 continue; 1268 1269 if (isa<CallBase>(I)) 1270 return true; 1271 } 1272 return false; 1273 } 1274 1275 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) { 1276 SmallPtrSet<BasicBlock *, 8> Set; 1277 SmallVector<BasicBlock *, 8> Worklist; 1278 1279 Set.insert(SaveBB); 1280 Worklist.push_back(ResDesBB); 1281 1282 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr 1283 // returns a token consumed by suspend instruction, all blocks in between 1284 // will have to eventually hit SaveBB when going backwards from ResDesBB. 1285 while (!Worklist.empty()) { 1286 auto *BB = Worklist.pop_back_val(); 1287 Set.insert(BB); 1288 for (auto *Pred : predecessors(BB)) 1289 if (Set.count(Pred) == 0) 1290 Worklist.push_back(Pred); 1291 } 1292 1293 // SaveBB and ResDesBB are checked separately in hasCallsBetween. 1294 Set.erase(SaveBB); 1295 Set.erase(ResDesBB); 1296 1297 for (auto *BB : Set) 1298 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr)) 1299 return true; 1300 1301 return false; 1302 } 1303 1304 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) { 1305 auto *SaveBB = Save->getParent(); 1306 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent(); 1307 1308 if (SaveBB == ResumeOrDestroyBB) 1309 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy); 1310 1311 // Any calls from Save to the end of the block? 1312 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr)) 1313 return true; 1314 1315 // Any calls from begging of the block up to ResumeOrDestroy? 1316 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(), 1317 ResumeOrDestroy)) 1318 return true; 1319 1320 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB? 1321 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB)) 1322 return true; 1323 1324 return false; 1325 } 1326 1327 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the 1328 // suspend point and replace it with nornal control flow. 1329 static bool simplifySuspendPoint(CoroSuspendInst *Suspend, 1330 CoroBeginInst *CoroBegin) { 1331 Instruction *Prev = Suspend->getPrevNode(); 1332 if (!Prev) { 1333 auto *Pred = Suspend->getParent()->getSinglePredecessor(); 1334 if (!Pred) 1335 return false; 1336 Prev = Pred->getTerminator(); 1337 } 1338 1339 CallBase *CB = dyn_cast<CallBase>(Prev); 1340 if (!CB) 1341 return false; 1342 1343 auto *Callee = CB->getCalledOperand()->stripPointerCasts(); 1344 1345 // See if the callsite is for resumption or destruction of the coroutine. 1346 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee); 1347 if (!SubFn) 1348 return false; 1349 1350 // Does not refer to the current coroutine, we cannot do anything with it. 1351 if (SubFn->getFrame() != CoroBegin) 1352 return false; 1353 1354 // See if the transformation is safe. Specifically, see if there are any 1355 // calls in between Save and CallInstr. They can potenitally resume the 1356 // coroutine rendering this optimization unsafe. 1357 auto *Save = Suspend->getCoroSave(); 1358 if (hasCallsBetween(Save, CB)) 1359 return false; 1360 1361 // Replace llvm.coro.suspend with the value that results in resumption over 1362 // the resume or cleanup path. 1363 Suspend->replaceAllUsesWith(SubFn->getRawIndex()); 1364 Suspend->eraseFromParent(); 1365 Save->eraseFromParent(); 1366 1367 // No longer need a call to coro.resume or coro.destroy. 1368 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) { 1369 BranchInst::Create(Invoke->getNormalDest(), Invoke); 1370 } 1371 1372 // Grab the CalledValue from CB before erasing the CallInstr. 1373 auto *CalledValue = CB->getCalledOperand(); 1374 CB->eraseFromParent(); 1375 1376 // If no more users remove it. Usually it is a bitcast of SubFn. 1377 if (CalledValue != SubFn && CalledValue->user_empty()) 1378 if (auto *I = dyn_cast<Instruction>(CalledValue)) 1379 I->eraseFromParent(); 1380 1381 // Now we are good to remove SubFn. 1382 if (SubFn->user_empty()) 1383 SubFn->eraseFromParent(); 1384 1385 return true; 1386 } 1387 1388 // Remove suspend points that are simplified. 1389 static void simplifySuspendPoints(coro::Shape &Shape) { 1390 // Currently, the only simplification we do is switch-lowering-specific. 1391 if (Shape.ABI != coro::ABI::Switch) 1392 return; 1393 1394 auto &S = Shape.CoroSuspends; 1395 size_t I = 0, N = S.size(); 1396 if (N == 0) 1397 return; 1398 while (true) { 1399 auto SI = cast<CoroSuspendInst>(S[I]); 1400 // Leave final.suspend to handleFinalSuspend since it is undefined behavior 1401 // to resume a coroutine suspended at the final suspend point. 1402 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) { 1403 if (--N == I) 1404 break; 1405 std::swap(S[I], S[N]); 1406 continue; 1407 } 1408 if (++I == N) 1409 break; 1410 } 1411 S.resize(N); 1412 } 1413 1414 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape, 1415 SmallVectorImpl<Function *> &Clones) { 1416 assert(Shape.ABI == coro::ABI::Switch); 1417 1418 createResumeEntryBlock(F, Shape); 1419 auto ResumeClone = createClone(F, ".resume", Shape, 1420 CoroCloner::Kind::SwitchResume); 1421 auto DestroyClone = createClone(F, ".destroy", Shape, 1422 CoroCloner::Kind::SwitchUnwind); 1423 auto CleanupClone = createClone(F, ".cleanup", Shape, 1424 CoroCloner::Kind::SwitchCleanup); 1425 1426 postSplitCleanup(*ResumeClone); 1427 postSplitCleanup(*DestroyClone); 1428 postSplitCleanup(*CleanupClone); 1429 1430 addMustTailToCoroResumes(*ResumeClone); 1431 1432 // Store addresses resume/destroy/cleanup functions in the coroutine frame. 1433 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone); 1434 1435 assert(Clones.empty()); 1436 Clones.push_back(ResumeClone); 1437 Clones.push_back(DestroyClone); 1438 Clones.push_back(CleanupClone); 1439 1440 // Create a constant array referring to resume/destroy/clone functions pointed 1441 // by the last argument of @llvm.coro.info, so that CoroElide pass can 1442 // determined correct function to call. 1443 setCoroInfo(F, Shape, Clones); 1444 } 1445 1446 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, 1447 Value *Continuation) { 1448 auto *ResumeIntrinsic = Suspend->getResumeFunction(); 1449 auto &Context = Suspend->getParent()->getParent()->getContext(); 1450 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1451 1452 IRBuilder<> Builder(ResumeIntrinsic); 1453 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy); 1454 ResumeIntrinsic->replaceAllUsesWith(Val); 1455 ResumeIntrinsic->eraseFromParent(); 1456 Suspend->setOperand(0, UndefValue::get(Int8PtrTy)); 1457 } 1458 1459 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs. 1460 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, 1461 ArrayRef<Value *> FnArgs, 1462 SmallVectorImpl<Value *> &CallArgs) { 1463 size_t ArgIdx = 0; 1464 for (auto paramTy : FnTy->params()) { 1465 assert(ArgIdx < FnArgs.size()); 1466 if (paramTy != FnArgs[ArgIdx]->getType()) 1467 CallArgs.push_back( 1468 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy)); 1469 else 1470 CallArgs.push_back(FnArgs[ArgIdx]); 1471 ++ArgIdx; 1472 } 1473 } 1474 1475 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, 1476 ArrayRef<Value *> Arguments, 1477 IRBuilder<> &Builder) { 1478 auto *FnTy = 1479 cast<FunctionType>(MustTailCallFn->getType()->getPointerElementType()); 1480 // Coerce the arguments, llvm optimizations seem to ignore the types in 1481 // vaarg functions and throws away casts in optimized mode. 1482 SmallVector<Value *, 8> CallArgs; 1483 coerceArguments(Builder, FnTy, Arguments, CallArgs); 1484 1485 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs); 1486 TailCall->setTailCallKind(CallInst::TCK_MustTail); 1487 TailCall->setDebugLoc(Loc); 1488 TailCall->setCallingConv(MustTailCallFn->getCallingConv()); 1489 return TailCall; 1490 } 1491 1492 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape, 1493 SmallVectorImpl<Function *> &Clones) { 1494 assert(Shape.ABI == coro::ABI::Async); 1495 assert(Clones.empty()); 1496 // Reset various things that the optimizer might have decided it 1497 // "knows" about the coroutine function due to not seeing a return. 1498 F.removeFnAttr(Attribute::NoReturn); 1499 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 1500 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1501 1502 auto &Context = F.getContext(); 1503 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1504 1505 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId()); 1506 IRBuilder<> Builder(Id); 1507 1508 auto *FramePtr = Id->getStorage(); 1509 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy); 1510 FramePtr = Builder.CreateConstInBoundsGEP1_32( 1511 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset, 1512 "async.ctx.frameptr"); 1513 1514 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1515 { 1516 // Make sure we don't invalidate Shape.FramePtr. 1517 TrackingVH<Instruction> Handle(Shape.FramePtr); 1518 Shape.CoroBegin->replaceAllUsesWith(FramePtr); 1519 Shape.FramePtr = Handle.getValPtr(); 1520 } 1521 1522 // Create all the functions in order after the main function. 1523 auto NextF = std::next(F.getIterator()); 1524 1525 // Create a continuation function for each of the suspend points. 1526 Clones.reserve(Shape.CoroSuspends.size()); 1527 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1528 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]); 1529 1530 // Create the clone declaration. 1531 auto *Continuation = 1532 createCloneDeclaration(F, Shape, ".resume." + Twine(Idx), NextF); 1533 Clones.push_back(Continuation); 1534 1535 // Insert a branch to a new return block immediately before the suspend 1536 // point. 1537 auto *SuspendBB = Suspend->getParent(); 1538 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1539 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1540 1541 // Place it before the first suspend. 1542 auto *ReturnBB = 1543 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB); 1544 Branch->setSuccessor(0, ReturnBB); 1545 1546 IRBuilder<> Builder(ReturnBB); 1547 1548 // Insert the call to the tail call function and inline it. 1549 auto *Fn = Suspend->getMustTailCallFunction(); 1550 SmallVector<Value *, 8> Args(Suspend->args()); 1551 auto FnArgs = ArrayRef<Value *>(Args).drop_front(3); 1552 auto *TailCall = 1553 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder); 1554 Builder.CreateRetVoid(); 1555 InlineFunctionInfo FnInfo; 1556 auto InlineRes = InlineFunction(*TailCall, FnInfo); 1557 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 1558 (void)InlineRes; 1559 1560 // Replace the lvm.coro.async.resume intrisic call. 1561 replaceAsyncResumeFunction(Suspend, Continuation); 1562 } 1563 1564 assert(Clones.size() == Shape.CoroSuspends.size()); 1565 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1566 auto *Suspend = Shape.CoroSuspends[Idx]; 1567 auto *Clone = Clones[Idx]; 1568 1569 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create(); 1570 } 1571 } 1572 1573 static void splitRetconCoroutine(Function &F, coro::Shape &Shape, 1574 SmallVectorImpl<Function *> &Clones) { 1575 assert(Shape.ABI == coro::ABI::Retcon || 1576 Shape.ABI == coro::ABI::RetconOnce); 1577 assert(Clones.empty()); 1578 1579 // Reset various things that the optimizer might have decided it 1580 // "knows" about the coroutine function due to not seeing a return. 1581 F.removeFnAttr(Attribute::NoReturn); 1582 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 1583 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1584 1585 // Allocate the frame. 1586 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId()); 1587 Value *RawFramePtr; 1588 if (Shape.RetconLowering.IsFrameInlineInStorage) { 1589 RawFramePtr = Id->getStorage(); 1590 } else { 1591 IRBuilder<> Builder(Id); 1592 1593 // Determine the size of the frame. 1594 const DataLayout &DL = F.getParent()->getDataLayout(); 1595 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1596 1597 // Allocate. We don't need to update the call graph node because we're 1598 // going to recompute it from scratch after splitting. 1599 // FIXME: pass the required alignment 1600 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr); 1601 RawFramePtr = 1602 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType()); 1603 1604 // Stash the allocated frame pointer in the continuation storage. 1605 auto Dest = Builder.CreateBitCast(Id->getStorage(), 1606 RawFramePtr->getType()->getPointerTo()); 1607 Builder.CreateStore(RawFramePtr, Dest); 1608 } 1609 1610 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1611 { 1612 // Make sure we don't invalidate Shape.FramePtr. 1613 TrackingVH<Instruction> Handle(Shape.FramePtr); 1614 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr); 1615 Shape.FramePtr = Handle.getValPtr(); 1616 } 1617 1618 // Create a unique return block. 1619 BasicBlock *ReturnBB = nullptr; 1620 SmallVector<PHINode *, 4> ReturnPHIs; 1621 1622 // Create all the functions in order after the main function. 1623 auto NextF = std::next(F.getIterator()); 1624 1625 // Create a continuation function for each of the suspend points. 1626 Clones.reserve(Shape.CoroSuspends.size()); 1627 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1628 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]); 1629 1630 // Create the clone declaration. 1631 auto Continuation = 1632 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF); 1633 Clones.push_back(Continuation); 1634 1635 // Insert a branch to the unified return block immediately before 1636 // the suspend point. 1637 auto SuspendBB = Suspend->getParent(); 1638 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1639 auto Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1640 1641 // Create the unified return block. 1642 if (!ReturnBB) { 1643 // Place it before the first suspend. 1644 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F, 1645 NewSuspendBB); 1646 Shape.RetconLowering.ReturnBlock = ReturnBB; 1647 1648 IRBuilder<> Builder(ReturnBB); 1649 1650 // Create PHIs for all the return values. 1651 assert(ReturnPHIs.empty()); 1652 1653 // First, the continuation. 1654 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(), 1655 Shape.CoroSuspends.size())); 1656 1657 // Next, all the directly-yielded values. 1658 for (auto ResultTy : Shape.getRetconResultTypes()) 1659 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy, 1660 Shape.CoroSuspends.size())); 1661 1662 // Build the return value. 1663 auto RetTy = F.getReturnType(); 1664 1665 // Cast the continuation value if necessary. 1666 // We can't rely on the types matching up because that type would 1667 // have to be infinite. 1668 auto CastedContinuationTy = 1669 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0)); 1670 auto *CastedContinuation = 1671 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy); 1672 1673 Value *RetV; 1674 if (ReturnPHIs.size() == 1) { 1675 RetV = CastedContinuation; 1676 } else { 1677 RetV = UndefValue::get(RetTy); 1678 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0); 1679 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I) 1680 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I); 1681 } 1682 1683 Builder.CreateRet(RetV); 1684 } 1685 1686 // Branch to the return block. 1687 Branch->setSuccessor(0, ReturnBB); 1688 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB); 1689 size_t NextPHIIndex = 1; 1690 for (auto &VUse : Suspend->value_operands()) 1691 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB); 1692 assert(NextPHIIndex == ReturnPHIs.size()); 1693 } 1694 1695 assert(Clones.size() == Shape.CoroSuspends.size()); 1696 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1697 auto Suspend = Shape.CoroSuspends[i]; 1698 auto Clone = Clones[i]; 1699 1700 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create(); 1701 } 1702 } 1703 1704 namespace { 1705 class PrettyStackTraceFunction : public PrettyStackTraceEntry { 1706 Function &F; 1707 public: 1708 PrettyStackTraceFunction(Function &F) : F(F) {} 1709 void print(raw_ostream &OS) const override { 1710 OS << "While splitting coroutine "; 1711 F.printAsOperand(OS, /*print type*/ false, F.getParent()); 1712 OS << "\n"; 1713 } 1714 }; 1715 } 1716 1717 static coro::Shape splitCoroutine(Function &F, 1718 SmallVectorImpl<Function *> &Clones, 1719 bool ReuseFrameSlot) { 1720 PrettyStackTraceFunction prettyStackTrace(F); 1721 1722 // The suspend-crossing algorithm in buildCoroutineFrame get tripped 1723 // up by uses in unreachable blocks, so remove them as a first pass. 1724 removeUnreachableBlocks(F); 1725 1726 coro::Shape Shape(F, ReuseFrameSlot); 1727 if (!Shape.CoroBegin) 1728 return Shape; 1729 1730 simplifySuspendPoints(Shape); 1731 buildCoroutineFrame(F, Shape); 1732 replaceFrameSize(Shape); 1733 1734 // If there are no suspend points, no split required, just remove 1735 // the allocation and deallocation blocks, they are not needed. 1736 if (Shape.CoroSuspends.empty()) { 1737 handleNoSuspendCoroutine(Shape); 1738 } else { 1739 switch (Shape.ABI) { 1740 case coro::ABI::Switch: 1741 splitSwitchCoroutine(F, Shape, Clones); 1742 break; 1743 case coro::ABI::Async: 1744 splitAsyncCoroutine(F, Shape, Clones); 1745 break; 1746 case coro::ABI::Retcon: 1747 case coro::ABI::RetconOnce: 1748 splitRetconCoroutine(F, Shape, Clones); 1749 break; 1750 } 1751 } 1752 1753 // Replace all the swifterror operations in the original function. 1754 // This invalidates SwiftErrorOps in the Shape. 1755 replaceSwiftErrorOps(F, Shape, nullptr); 1756 1757 return Shape; 1758 } 1759 1760 static void 1761 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape, 1762 const SmallVectorImpl<Function *> &Clones, 1763 CallGraph &CG, CallGraphSCC &SCC) { 1764 if (!Shape.CoroBegin) 1765 return; 1766 1767 removeCoroEnds(Shape, &CG); 1768 postSplitCleanup(F); 1769 1770 // Update call graph and add the functions we created to the SCC. 1771 coro::updateCallGraph(F, Clones, CG, SCC); 1772 } 1773 1774 static void updateCallGraphAfterCoroutineSplit( 1775 LazyCallGraph::Node &N, const coro::Shape &Shape, 1776 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C, 1777 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, 1778 FunctionAnalysisManager &FAM) { 1779 if (!Shape.CoroBegin) 1780 return; 1781 1782 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) { 1783 auto &Context = End->getContext(); 1784 End->replaceAllUsesWith(ConstantInt::getFalse(Context)); 1785 End->eraseFromParent(); 1786 } 1787 1788 if (!Clones.empty()) { 1789 switch (Shape.ABI) { 1790 case coro::ABI::Switch: 1791 // Each clone in the Switch lowering is independent of the other clones. 1792 // Let the LazyCallGraph know about each one separately. 1793 for (Function *Clone : Clones) 1794 CG.addSplitFunction(N.getFunction(), *Clone); 1795 break; 1796 case coro::ABI::Async: 1797 case coro::ABI::Retcon: 1798 case coro::ABI::RetconOnce: 1799 // Each clone in the Async/Retcon lowering references of the other clones. 1800 // Let the LazyCallGraph know about all of them at once. 1801 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones); 1802 break; 1803 } 1804 1805 // Let the CGSCC infra handle the changes to the original function. 1806 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM); 1807 } 1808 1809 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges 1810 // to the split functions. 1811 postSplitCleanup(N.getFunction()); 1812 updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM); 1813 } 1814 1815 // When we see the coroutine the first time, we insert an indirect call to a 1816 // devirt trigger function and mark the coroutine that it is now ready for 1817 // split. 1818 // Async lowering uses this after it has split the function to restart the 1819 // pipeline. 1820 static void prepareForSplit(Function &F, CallGraph &CG, 1821 bool MarkForAsyncRestart = false) { 1822 Module &M = *F.getParent(); 1823 LLVMContext &Context = F.getContext(); 1824 #ifndef NDEBUG 1825 Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN); 1826 assert(DevirtFn && "coro.devirt.trigger function not found"); 1827 #endif 1828 1829 F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart 1830 ? ASYNC_RESTART_AFTER_SPLIT 1831 : PREPARED_FOR_SPLIT); 1832 1833 // Insert an indirect call sequence that will be devirtualized by CoroElide 1834 // pass: 1835 // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1) 1836 // %1 = bitcast i8* %0 to void(i8*)* 1837 // call void %1(i8* null) 1838 coro::LowererBase Lowerer(M); 1839 Instruction *InsertPt = 1840 MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime() 1841 : F.getEntryBlock().getTerminator(); 1842 auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context)); 1843 auto *DevirtFnAddr = 1844 Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt); 1845 FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context), 1846 {Type::getInt8PtrTy(Context)}, false); 1847 auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt); 1848 1849 // Update CG graph with an indirect call we just added. 1850 CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode()); 1851 } 1852 1853 // Make sure that there is a devirtualization trigger function that the 1854 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt 1855 // trigger function is not found, we will create one and add it to the current 1856 // SCC. 1857 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) { 1858 Module &M = CG.getModule(); 1859 if (M.getFunction(CORO_DEVIRT_TRIGGER_FN)) 1860 return; 1861 1862 LLVMContext &C = M.getContext(); 1863 auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C), 1864 /*isVarArg=*/false); 1865 Function *DevirtFn = 1866 Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage, 1867 CORO_DEVIRT_TRIGGER_FN, &M); 1868 DevirtFn->addFnAttr(Attribute::AlwaysInline); 1869 auto *Entry = BasicBlock::Create(C, "entry", DevirtFn); 1870 ReturnInst::Create(C, Entry); 1871 1872 auto *Node = CG.getOrInsertFunction(DevirtFn); 1873 1874 SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end()); 1875 Nodes.push_back(Node); 1876 SCC.initialize(Nodes); 1877 } 1878 1879 /// Replace a call to llvm.coro.prepare.retcon. 1880 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, 1881 LazyCallGraph::SCC &C) { 1882 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1883 auto Fn = CastFn->stripPointerCasts(); // as its original type 1884 1885 // Attempt to peephole this pattern: 1886 // %0 = bitcast [[TYPE]] @some_function to i8* 1887 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1888 // %2 = bitcast %1 to [[TYPE]] 1889 // ==> 1890 // %2 = @some_function 1891 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) { 1892 // Look for bitcasts back to the original function type. 1893 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser()); 1894 if (!Cast || Cast->getType() != Fn->getType()) 1895 continue; 1896 1897 // Replace and remove the cast. 1898 Cast->replaceAllUsesWith(Fn); 1899 Cast->eraseFromParent(); 1900 } 1901 1902 // Replace any remaining uses with the function as an i8*. 1903 // This can never directly be a callee, so we don't need to update CG. 1904 Prepare->replaceAllUsesWith(CastFn); 1905 Prepare->eraseFromParent(); 1906 1907 // Kill dead bitcasts. 1908 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 1909 if (!Cast->use_empty()) 1910 break; 1911 CastFn = Cast->getOperand(0); 1912 Cast->eraseFromParent(); 1913 } 1914 } 1915 /// Replace a call to llvm.coro.prepare.retcon. 1916 static void replacePrepare(CallInst *Prepare, CallGraph &CG) { 1917 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1918 auto Fn = CastFn->stripPointerCasts(); // as its original type 1919 1920 // Find call graph nodes for the preparation. 1921 CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr; 1922 if (auto ConcreteFn = dyn_cast<Function>(Fn)) { 1923 PrepareUserNode = CG[Prepare->getFunction()]; 1924 FnNode = CG[ConcreteFn]; 1925 } 1926 1927 // Attempt to peephole this pattern: 1928 // %0 = bitcast [[TYPE]] @some_function to i8* 1929 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1930 // %2 = bitcast %1 to [[TYPE]] 1931 // ==> 1932 // %2 = @some_function 1933 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); 1934 UI != UE; ) { 1935 // Look for bitcasts back to the original function type. 1936 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser()); 1937 if (!Cast || Cast->getType() != Fn->getType()) continue; 1938 1939 // Check whether the replacement will introduce new direct calls. 1940 // If so, we'll need to update the call graph. 1941 if (PrepareUserNode) { 1942 for (auto &Use : Cast->uses()) { 1943 if (auto *CB = dyn_cast<CallBase>(Use.getUser())) { 1944 if (!CB->isCallee(&Use)) 1945 continue; 1946 PrepareUserNode->removeCallEdgeFor(*CB); 1947 PrepareUserNode->addCalledFunction(CB, FnNode); 1948 } 1949 } 1950 } 1951 1952 // Replace and remove the cast. 1953 Cast->replaceAllUsesWith(Fn); 1954 Cast->eraseFromParent(); 1955 } 1956 1957 // Replace any remaining uses with the function as an i8*. 1958 // This can never directly be a callee, so we don't need to update CG. 1959 Prepare->replaceAllUsesWith(CastFn); 1960 Prepare->eraseFromParent(); 1961 1962 // Kill dead bitcasts. 1963 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 1964 if (!Cast->use_empty()) break; 1965 CastFn = Cast->getOperand(0); 1966 Cast->eraseFromParent(); 1967 } 1968 } 1969 1970 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, 1971 LazyCallGraph::SCC &C) { 1972 bool Changed = false; 1973 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) { 1974 // Intrinsics can only be used in calls. 1975 auto *Prepare = cast<CallInst>((PI++)->getUser()); 1976 replacePrepare(Prepare, CG, C); 1977 Changed = true; 1978 } 1979 1980 return Changed; 1981 } 1982 1983 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent 1984 /// IPO from operating on calls to a retcon coroutine before it's been 1985 /// split. This is only safe to do after we've split all retcon 1986 /// coroutines in the module. We can do that this in this pass because 1987 /// this pass does promise to split all retcon coroutines (as opposed to 1988 /// switch coroutines, which are lowered in multiple stages). 1989 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) { 1990 bool Changed = false; 1991 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); 1992 PI != PE; ) { 1993 // Intrinsics can only be used in calls. 1994 auto *Prepare = cast<CallInst>((PI++)->getUser()); 1995 replacePrepare(Prepare, CG); 1996 Changed = true; 1997 } 1998 1999 return Changed; 2000 } 2001 2002 static bool declaresCoroSplitIntrinsics(const Module &M) { 2003 return coro::declaresIntrinsics(M, {"llvm.coro.begin", 2004 "llvm.coro.prepare.retcon", 2005 "llvm.coro.prepare.async"}); 2006 } 2007 2008 static void addPrepareFunction(const Module &M, 2009 SmallVectorImpl<Function *> &Fns, 2010 StringRef Name) { 2011 auto *PrepareFn = M.getFunction(Name); 2012 if (PrepareFn && !PrepareFn->use_empty()) 2013 Fns.push_back(PrepareFn); 2014 } 2015 2016 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C, 2017 CGSCCAnalysisManager &AM, 2018 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 2019 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a 2020 // non-zero number of nodes, so we assume that here and grab the first 2021 // node's function's module. 2022 Module &M = *C.begin()->getFunction().getParent(); 2023 auto &FAM = 2024 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 2025 2026 if (!declaresCoroSplitIntrinsics(M)) 2027 return PreservedAnalyses::all(); 2028 2029 // Check for uses of llvm.coro.prepare.retcon/async. 2030 SmallVector<Function *, 2> PrepareFns; 2031 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 2032 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 2033 2034 // Find coroutines for processing. 2035 SmallVector<LazyCallGraph::Node *, 4> Coroutines; 2036 for (LazyCallGraph::Node &N : C) 2037 if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR)) 2038 Coroutines.push_back(&N); 2039 2040 if (Coroutines.empty() && PrepareFns.empty()) 2041 return PreservedAnalyses::all(); 2042 2043 if (Coroutines.empty()) { 2044 for (auto *PrepareFn : PrepareFns) { 2045 replaceAllPrepares(PrepareFn, CG, C); 2046 } 2047 } 2048 2049 // Split all the coroutines. 2050 for (LazyCallGraph::Node *N : Coroutines) { 2051 Function &F = N->getFunction(); 2052 Attribute Attr = F.getFnAttribute(CORO_PRESPLIT_ATTR); 2053 StringRef Value = Attr.getValueAsString(); 2054 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName() 2055 << "' state: " << Value << "\n"); 2056 if (Value == UNPREPARED_FOR_SPLIT) { 2057 // Enqueue a second iteration of the CGSCC pipeline on this SCC. 2058 UR.CWorklist.insert(&C); 2059 F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT); 2060 continue; 2061 } 2062 F.removeFnAttr(CORO_PRESPLIT_ATTR); 2063 2064 SmallVector<Function *, 4> Clones; 2065 const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot); 2066 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM); 2067 2068 if ((Shape.ABI == coro::ABI::Async || Shape.ABI == coro::ABI::Retcon || 2069 Shape.ABI == coro::ABI::RetconOnce) && 2070 !Shape.CoroSuspends.empty()) { 2071 // Run the CGSCC pipeline on the newly split functions. 2072 // All clones will be in the same RefSCC, so choose a random clone. 2073 UR.RCWorklist.insert(CG.lookupRefSCC(CG.get(*Clones[0]))); 2074 } 2075 } 2076 2077 if (!PrepareFns.empty()) { 2078 for (auto *PrepareFn : PrepareFns) { 2079 replaceAllPrepares(PrepareFn, CG, C); 2080 } 2081 } 2082 2083 return PreservedAnalyses::none(); 2084 } 2085 2086 namespace { 2087 2088 // We present a coroutine to LLVM as an ordinary function with suspension 2089 // points marked up with intrinsics. We let the optimizer party on the coroutine 2090 // as a single function for as long as possible. Shortly before the coroutine is 2091 // eligible to be inlined into its callers, we split up the coroutine into parts 2092 // corresponding to initial, resume and destroy invocations of the coroutine, 2093 // add them to the current SCC and restart the IPO pipeline to optimize the 2094 // coroutine subfunctions we extracted before proceeding to the caller of the 2095 // coroutine. 2096 struct CoroSplitLegacy : public CallGraphSCCPass { 2097 static char ID; // Pass identification, replacement for typeid 2098 2099 CoroSplitLegacy(bool ReuseFrameSlot = false) 2100 : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) { 2101 initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry()); 2102 } 2103 2104 bool Run = false; 2105 bool ReuseFrameSlot; 2106 2107 // A coroutine is identified by the presence of coro.begin intrinsic, if 2108 // we don't have any, this pass has nothing to do. 2109 bool doInitialization(CallGraph &CG) override { 2110 Run = declaresCoroSplitIntrinsics(CG.getModule()); 2111 return CallGraphSCCPass::doInitialization(CG); 2112 } 2113 2114 bool runOnSCC(CallGraphSCC &SCC) override { 2115 if (!Run) 2116 return false; 2117 2118 // Check for uses of llvm.coro.prepare.retcon. 2119 SmallVector<Function *, 2> PrepareFns; 2120 auto &M = SCC.getCallGraph().getModule(); 2121 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 2122 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 2123 2124 // Find coroutines for processing. 2125 SmallVector<Function *, 4> Coroutines; 2126 for (CallGraphNode *CGN : SCC) 2127 if (auto *F = CGN->getFunction()) 2128 if (F->hasFnAttribute(CORO_PRESPLIT_ATTR)) 2129 Coroutines.push_back(F); 2130 2131 if (Coroutines.empty() && PrepareFns.empty()) 2132 return false; 2133 2134 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 2135 2136 if (Coroutines.empty()) { 2137 bool Changed = false; 2138 for (auto *PrepareFn : PrepareFns) 2139 Changed |= replaceAllPrepares(PrepareFn, CG); 2140 return Changed; 2141 } 2142 2143 createDevirtTriggerFunc(CG, SCC); 2144 2145 // Split all the coroutines. 2146 for (Function *F : Coroutines) { 2147 Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR); 2148 StringRef Value = Attr.getValueAsString(); 2149 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName() 2150 << "' state: " << Value << "\n"); 2151 // Async lowering marks coroutines to trigger a restart of the pipeline 2152 // after it has split them. 2153 if (Value == ASYNC_RESTART_AFTER_SPLIT) { 2154 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2155 continue; 2156 } 2157 if (Value == UNPREPARED_FOR_SPLIT) { 2158 prepareForSplit(*F, CG); 2159 continue; 2160 } 2161 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2162 2163 SmallVector<Function *, 4> Clones; 2164 const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot); 2165 updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC); 2166 if (Shape.ABI == coro::ABI::Async) { 2167 // Restart SCC passes. 2168 // Mark function for CoroElide pass. It will devirtualize causing a 2169 // restart of the SCC pipeline. 2170 prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/); 2171 } 2172 } 2173 2174 for (auto *PrepareFn : PrepareFns) 2175 replaceAllPrepares(PrepareFn, CG); 2176 2177 return true; 2178 } 2179 2180 void getAnalysisUsage(AnalysisUsage &AU) const override { 2181 CallGraphSCCPass::getAnalysisUsage(AU); 2182 } 2183 2184 StringRef getPassName() const override { return "Coroutine Splitting"; } 2185 }; 2186 2187 } // end anonymous namespace 2188 2189 char CoroSplitLegacy::ID = 0; 2190 2191 INITIALIZE_PASS_BEGIN( 2192 CoroSplitLegacy, "coro-split", 2193 "Split coroutine into a set of functions driving its state machine", false, 2194 false) 2195 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 2196 INITIALIZE_PASS_END( 2197 CoroSplitLegacy, "coro-split", 2198 "Split coroutine into a set of functions driving its state machine", false, 2199 false) 2200 2201 Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) { 2202 return new CoroSplitLegacy(ReuseFrameSlot); 2203 } 2204