1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This pass builds the coroutine frame and outlines resume and destroy parts
9 // of the coroutine into separate functions.
10 //
11 // We present a coroutine to an LLVM as an ordinary function with suspension
12 // points marked up with intrinsics. We let the optimizer party on the coroutine
13 // as a single function for as long as possible. Shortly before the coroutine is
14 // eligible to be inlined into its callers, we split up the coroutine into parts
15 // corresponding to an initial, resume and destroy invocations of the coroutine,
16 // add them to the current SCC and restart the IPO pipeline to optimize the
17 // coroutine subfunctions we extracted before proceeding to the caller of the
18 // coroutine.
19 //===----------------------------------------------------------------------===//
20 
21 #include "llvm/Transforms/Coroutines/CoroSplit.h"
22 #include "CoroInstr.h"
23 #include "CoroInternal.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/CallGraph.h"
30 #include "llvm/Analysis/CallGraphSCCPass.h"
31 #include "llvm/Analysis/LazyCallGraph.h"
32 #include "llvm/IR/Argument.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/BasicBlock.h"
35 #include "llvm/IR/CFG.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalValue.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/IRBuilder.h"
44 #include "llvm/IR/InstIterator.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/IR/LegacyPassManager.h"
51 #include "llvm/IR/Module.h"
52 #include "llvm/IR/Type.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/IR/Verifier.h"
55 #include "llvm/InitializePasses.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/Casting.h"
58 #include "llvm/Support/Debug.h"
59 #include "llvm/Support/PrettyStackTrace.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include "llvm/Transforms/Scalar.h"
62 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
63 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
64 #include "llvm/Transforms/Utils/Cloning.h"
65 #include "llvm/Transforms/Utils/Local.h"
66 #include "llvm/Transforms/Utils/ValueMapper.h"
67 #include <cassert>
68 #include <cstddef>
69 #include <cstdint>
70 #include <initializer_list>
71 #include <iterator>
72 
73 using namespace llvm;
74 
75 #define DEBUG_TYPE "coro-split"
76 
77 namespace {
78 
79 /// A little helper class for building
80 class CoroCloner {
81 public:
82   enum class Kind {
83     /// The shared resume function for a switch lowering.
84     SwitchResume,
85 
86     /// The shared unwind function for a switch lowering.
87     SwitchUnwind,
88 
89     /// The shared cleanup function for a switch lowering.
90     SwitchCleanup,
91 
92     /// An individual continuation function.
93     Continuation,
94 
95     /// An async resume function.
96     Async,
97   };
98 
99 private:
100   Function &OrigF;
101   Function *NewF;
102   const Twine &Suffix;
103   coro::Shape &Shape;
104   Kind FKind;
105   ValueToValueMapTy VMap;
106   IRBuilder<> Builder;
107   Value *NewFramePtr = nullptr;
108 
109   /// The active suspend instruction; meaningful only for continuation and async
110   /// ABIs.
111   AnyCoroSuspendInst *ActiveSuspend = nullptr;
112 
113 public:
114   /// Create a cloner for a switch lowering.
CoroCloner(Function & OrigF,const Twine & Suffix,coro::Shape & Shape,Kind FKind)115   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
116              Kind FKind)
117     : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
118       FKind(FKind), Builder(OrigF.getContext()) {
119     assert(Shape.ABI == coro::ABI::Switch);
120   }
121 
122   /// Create a cloner for a continuation lowering.
CoroCloner(Function & OrigF,const Twine & Suffix,coro::Shape & Shape,Function * NewF,AnyCoroSuspendInst * ActiveSuspend)123   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
124              Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
125       : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
126         FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
127         Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
128     assert(Shape.ABI == coro::ABI::Retcon ||
129            Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
130     assert(NewF && "need existing function for continuation");
131     assert(ActiveSuspend && "need active suspend point for continuation");
132   }
133 
getFunction() const134   Function *getFunction() const {
135     assert(NewF != nullptr && "declaration not yet set");
136     return NewF;
137   }
138 
139   void create();
140 
141 private:
isSwitchDestroyFunction()142   bool isSwitchDestroyFunction() {
143     switch (FKind) {
144     case Kind::Async:
145     case Kind::Continuation:
146     case Kind::SwitchResume:
147       return false;
148     case Kind::SwitchUnwind:
149     case Kind::SwitchCleanup:
150       return true;
151     }
152     llvm_unreachable("Unknown CoroCloner::Kind enum");
153   }
154 
155   void replaceEntryBlock();
156   Value *deriveNewFramePointer();
157   void replaceRetconOrAsyncSuspendUses();
158   void replaceCoroSuspends();
159   void replaceCoroEnds();
160   void replaceSwiftErrorOps();
161   void salvageDebugInfo();
162   void handleFinalSuspend();
163 };
164 
165 } // end anonymous namespace
166 
maybeFreeRetconStorage(IRBuilder<> & Builder,const coro::Shape & Shape,Value * FramePtr,CallGraph * CG)167 static void maybeFreeRetconStorage(IRBuilder<> &Builder,
168                                    const coro::Shape &Shape, Value *FramePtr,
169                                    CallGraph *CG) {
170   assert(Shape.ABI == coro::ABI::Retcon ||
171          Shape.ABI == coro::ABI::RetconOnce);
172   if (Shape.RetconLowering.IsFrameInlineInStorage)
173     return;
174 
175   Shape.emitDealloc(Builder, FramePtr, CG);
176 }
177 
178 /// Replace an llvm.coro.end.async.
179 /// Will inline the must tail call function call if there is one.
180 /// \returns true if cleanup of the coro.end block is needed, false otherwise.
replaceCoroEndAsync(AnyCoroEndInst * End)181 static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
182   IRBuilder<> Builder(End);
183 
184   auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
185   if (!EndAsync) {
186     Builder.CreateRetVoid();
187     return true /*needs cleanup of coro.end block*/;
188   }
189 
190   auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
191   if (!MustTailCallFunc) {
192     Builder.CreateRetVoid();
193     return true /*needs cleanup of coro.end block*/;
194   }
195 
196   // Move the must tail call from the predecessor block into the end block.
197   auto *CoroEndBlock = End->getParent();
198   auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
199   assert(MustTailCallFuncBlock && "Must have a single predecessor block");
200   auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
201   auto *MustTailCall = cast<CallInst>(&*std::prev(It));
202   CoroEndBlock->getInstList().splice(
203       End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
204 
205   // Insert the return instruction.
206   Builder.SetInsertPoint(End);
207   Builder.CreateRetVoid();
208   InlineFunctionInfo FnInfo;
209 
210   // Remove the rest of the block, by splitting it into an unreachable block.
211   auto *BB = End->getParent();
212   BB->splitBasicBlock(End);
213   BB->getTerminator()->eraseFromParent();
214 
215   auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
216   assert(InlineRes.isSuccess() && "Expected inlining to succeed");
217   (void)InlineRes;
218 
219   // We have cleaned up the coro.end block above.
220   return false;
221 }
222 
223 /// Replace a non-unwind call to llvm.coro.end.
replaceFallthroughCoroEnd(AnyCoroEndInst * End,const coro::Shape & Shape,Value * FramePtr,bool InResume,CallGraph * CG)224 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
225                                       const coro::Shape &Shape, Value *FramePtr,
226                                       bool InResume, CallGraph *CG) {
227   // Start inserting right before the coro.end.
228   IRBuilder<> Builder(End);
229 
230   // Create the return instruction.
231   switch (Shape.ABI) {
232   // The cloned functions in switch-lowering always return void.
233   case coro::ABI::Switch:
234     // coro.end doesn't immediately end the coroutine in the main function
235     // in this lowering, because we need to deallocate the coroutine.
236     if (!InResume)
237       return;
238     Builder.CreateRetVoid();
239     break;
240 
241   // In async lowering this returns.
242   case coro::ABI::Async: {
243     bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
244     if (!CoroEndBlockNeedsCleanup)
245       return;
246     break;
247   }
248 
249   // In unique continuation lowering, the continuations always return void.
250   // But we may have implicitly allocated storage.
251   case coro::ABI::RetconOnce:
252     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
253     Builder.CreateRetVoid();
254     break;
255 
256   // In non-unique continuation lowering, we signal completion by returning
257   // a null continuation.
258   case coro::ABI::Retcon: {
259     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
260     auto RetTy = Shape.getResumeFunctionType()->getReturnType();
261     auto RetStructTy = dyn_cast<StructType>(RetTy);
262     PointerType *ContinuationTy =
263       cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
264 
265     Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
266     if (RetStructTy) {
267       ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
268                                               ReturnValue, 0);
269     }
270     Builder.CreateRet(ReturnValue);
271     break;
272   }
273   }
274 
275   // Remove the rest of the block, by splitting it into an unreachable block.
276   auto *BB = End->getParent();
277   BB->splitBasicBlock(End);
278   BB->getTerminator()->eraseFromParent();
279 }
280 
281 /// Replace an unwind call to llvm.coro.end.
replaceUnwindCoroEnd(AnyCoroEndInst * End,const coro::Shape & Shape,Value * FramePtr,bool InResume,CallGraph * CG)282 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
283                                  Value *FramePtr, bool InResume,
284                                  CallGraph *CG) {
285   IRBuilder<> Builder(End);
286 
287   switch (Shape.ABI) {
288   // In switch-lowering, this does nothing in the main function.
289   case coro::ABI::Switch:
290     if (!InResume)
291       return;
292     break;
293   // In async lowering this does nothing.
294   case coro::ABI::Async:
295     break;
296   // In continuation-lowering, this frees the continuation storage.
297   case coro::ABI::Retcon:
298   case coro::ABI::RetconOnce:
299     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
300     break;
301   }
302 
303   // If coro.end has an associated bundle, add cleanupret instruction.
304   if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
305     auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
306     auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
307     End->getParent()->splitBasicBlock(End);
308     CleanupRet->getParent()->getTerminator()->eraseFromParent();
309   }
310 }
311 
replaceCoroEnd(AnyCoroEndInst * End,const coro::Shape & Shape,Value * FramePtr,bool InResume,CallGraph * CG)312 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
313                            Value *FramePtr, bool InResume, CallGraph *CG) {
314   if (End->isUnwind())
315     replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
316   else
317     replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
318 
319   auto &Context = End->getContext();
320   End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
321                                    : ConstantInt::getFalse(Context));
322   End->eraseFromParent();
323 }
324 
325 // Create an entry block for a resume function with a switch that will jump to
326 // suspend points.
createResumeEntryBlock(Function & F,coro::Shape & Shape)327 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
328   assert(Shape.ABI == coro::ABI::Switch);
329   LLVMContext &C = F.getContext();
330 
331   // resume.entry:
332   //  %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
333   //  i32 2
334   //  % index = load i32, i32* %index.addr
335   //  switch i32 %index, label %unreachable [
336   //    i32 0, label %resume.0
337   //    i32 1, label %resume.1
338   //    ...
339   //  ]
340 
341   auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
342   auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
343 
344   IRBuilder<> Builder(NewEntry);
345   auto *FramePtr = Shape.FramePtr;
346   auto *FrameTy = Shape.FrameTy;
347   auto *GepIndex = Builder.CreateStructGEP(
348       FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
349   auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
350   auto *Switch =
351       Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
352   Shape.SwitchLowering.ResumeSwitch = Switch;
353 
354   size_t SuspendIndex = 0;
355   for (auto *AnyS : Shape.CoroSuspends) {
356     auto *S = cast<CoroSuspendInst>(AnyS);
357     ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
358 
359     // Replace CoroSave with a store to Index:
360     //    %index.addr = getelementptr %f.frame... (index field number)
361     //    store i32 0, i32* %index.addr1
362     auto *Save = S->getCoroSave();
363     Builder.SetInsertPoint(Save);
364     if (S->isFinal()) {
365       // Final suspend point is represented by storing zero in ResumeFnAddr.
366       auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr,
367                                  coro::Shape::SwitchFieldIndex::Resume,
368                                   "ResumeFn.addr");
369       auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
370           cast<PointerType>(GepIndex->getType())->getElementType()));
371       Builder.CreateStore(NullPtr, GepIndex);
372     } else {
373       auto *GepIndex = Builder.CreateStructGEP(
374           FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
375       Builder.CreateStore(IndexVal, GepIndex);
376     }
377     Save->replaceAllUsesWith(ConstantTokenNone::get(C));
378     Save->eraseFromParent();
379 
380     // Split block before and after coro.suspend and add a jump from an entry
381     // switch:
382     //
383     //  whateverBB:
384     //    whatever
385     //    %0 = call i8 @llvm.coro.suspend(token none, i1 false)
386     //    switch i8 %0, label %suspend[i8 0, label %resume
387     //                                 i8 1, label %cleanup]
388     // becomes:
389     //
390     //  whateverBB:
391     //     whatever
392     //     br label %resume.0.landing
393     //
394     //  resume.0: ; <--- jump from the switch in the resume.entry
395     //     %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
396     //     br label %resume.0.landing
397     //
398     //  resume.0.landing:
399     //     %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
400     //     switch i8 % 1, label %suspend [i8 0, label %resume
401     //                                    i8 1, label %cleanup]
402 
403     auto *SuspendBB = S->getParent();
404     auto *ResumeBB =
405         SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
406     auto *LandingBB = ResumeBB->splitBasicBlock(
407         S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
408     Switch->addCase(IndexVal, ResumeBB);
409 
410     cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
411     auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
412     S->replaceAllUsesWith(PN);
413     PN->addIncoming(Builder.getInt8(-1), SuspendBB);
414     PN->addIncoming(S, ResumeBB);
415 
416     ++SuspendIndex;
417   }
418 
419   Builder.SetInsertPoint(UnreachBB);
420   Builder.CreateUnreachable();
421 
422   Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
423 }
424 
425 
426 // Rewrite final suspend point handling. We do not use suspend index to
427 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the
428 // coroutine frame, since it is undefined behavior to resume a coroutine
429 // suspended at the final suspend point. Thus, in the resume function, we can
430 // simply remove the last case (when coro::Shape is built, the final suspend
431 // point (if present) is always the last element of CoroSuspends array).
432 // In the destroy function, we add a code sequence to check if ResumeFnAddress
433 // is Null, and if so, jump to the appropriate label to handle cleanup from the
434 // final suspend point.
handleFinalSuspend()435 void CoroCloner::handleFinalSuspend() {
436   assert(Shape.ABI == coro::ABI::Switch &&
437          Shape.SwitchLowering.HasFinalSuspend);
438   auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
439   auto FinalCaseIt = std::prev(Switch->case_end());
440   BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
441   Switch->removeCase(FinalCaseIt);
442   if (isSwitchDestroyFunction()) {
443     BasicBlock *OldSwitchBB = Switch->getParent();
444     auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
445     Builder.SetInsertPoint(OldSwitchBB->getTerminator());
446     auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
447                                        coro::Shape::SwitchFieldIndex::Resume,
448                                              "ResumeFn.addr");
449     auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
450                                     GepIndex);
451     auto *Cond = Builder.CreateIsNull(Load);
452     Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
453     OldSwitchBB->getTerminator()->eraseFromParent();
454   }
455 }
456 
457 static FunctionType *
getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst * Suspend)458 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
459   auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
460   auto *StructTy = cast<StructType>(AsyncSuspend->getType());
461   auto &Context = Suspend->getParent()->getParent()->getContext();
462   auto *VoidTy = Type::getVoidTy(Context);
463   return FunctionType::get(VoidTy, StructTy->elements(), false);
464 }
465 
createCloneDeclaration(Function & OrigF,coro::Shape & Shape,const Twine & Suffix,Module::iterator InsertBefore,AnyCoroSuspendInst * ActiveSuspend)466 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
467                                         const Twine &Suffix,
468                                         Module::iterator InsertBefore,
469                                         AnyCoroSuspendInst *ActiveSuspend) {
470   Module *M = OrigF.getParent();
471   auto *FnTy = (Shape.ABI != coro::ABI::Async)
472                    ? Shape.getResumeFunctionType()
473                    : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
474 
475   Function *NewF =
476       Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
477                        OrigF.getName() + Suffix);
478   NewF->addParamAttr(0, Attribute::NonNull);
479 
480   // For the async lowering ABI we can't guarantee that the context argument is
481   // not access via a different pointer not based on the argument.
482   if (Shape.ABI != coro::ABI::Async)
483     NewF->addParamAttr(0, Attribute::NoAlias);
484 
485   M->getFunctionList().insert(InsertBefore, NewF);
486 
487   return NewF;
488 }
489 
490 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the
491 /// arguments to the continuation function.
492 ///
493 /// This assumes that the builder has a meaningful insertion point.
replaceRetconOrAsyncSuspendUses()494 void CoroCloner::replaceRetconOrAsyncSuspendUses() {
495   assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
496          Shape.ABI == coro::ABI::Async);
497 
498   auto NewS = VMap[ActiveSuspend];
499   if (NewS->use_empty()) return;
500 
501   // Copy out all the continuation arguments after the buffer pointer into
502   // an easily-indexed data structure for convenience.
503   SmallVector<Value*, 8> Args;
504   // The async ABI includes all arguments -- including the first argument.
505   bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
506   for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
507             E = NewF->arg_end();
508        I != E; ++I)
509     Args.push_back(&*I);
510 
511   // If the suspend returns a single scalar value, we can just do a simple
512   // replacement.
513   if (!isa<StructType>(NewS->getType())) {
514     assert(Args.size() == 1);
515     NewS->replaceAllUsesWith(Args.front());
516     return;
517   }
518 
519   // Try to peephole extracts of an aggregate return.
520   for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) {
521     auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser());
522     if (!EVI || EVI->getNumIndices() != 1)
523       continue;
524 
525     EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
526     EVI->eraseFromParent();
527   }
528 
529   // If we have no remaining uses, we're done.
530   if (NewS->use_empty()) return;
531 
532   // Otherwise, we need to create an aggregate.
533   Value *Agg = UndefValue::get(NewS->getType());
534   for (size_t I = 0, E = Args.size(); I != E; ++I)
535     Agg = Builder.CreateInsertValue(Agg, Args[I], I);
536 
537   NewS->replaceAllUsesWith(Agg);
538 }
539 
replaceCoroSuspends()540 void CoroCloner::replaceCoroSuspends() {
541   Value *SuspendResult;
542 
543   switch (Shape.ABI) {
544   // In switch lowering, replace coro.suspend with the appropriate value
545   // for the type of function we're extracting.
546   // Replacing coro.suspend with (0) will result in control flow proceeding to
547   // a resume label associated with a suspend point, replacing it with (1) will
548   // result in control flow proceeding to a cleanup label associated with this
549   // suspend point.
550   case coro::ABI::Switch:
551     SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
552     break;
553 
554   // In async lowering there are no uses of the result.
555   case coro::ABI::Async:
556     return;
557 
558   // In returned-continuation lowering, the arguments from earlier
559   // continuations are theoretically arbitrary, and they should have been
560   // spilled.
561   case coro::ABI::RetconOnce:
562   case coro::ABI::Retcon:
563     return;
564   }
565 
566   for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
567     // The active suspend was handled earlier.
568     if (CS == ActiveSuspend) continue;
569 
570     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
571     MappedCS->replaceAllUsesWith(SuspendResult);
572     MappedCS->eraseFromParent();
573   }
574 }
575 
replaceCoroEnds()576 void CoroCloner::replaceCoroEnds() {
577   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
578     // We use a null call graph because there's no call graph node for
579     // the cloned function yet.  We'll just be rebuilding that later.
580     auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
581     replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
582   }
583 }
584 
replaceSwiftErrorOps(Function & F,coro::Shape & Shape,ValueToValueMapTy * VMap)585 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
586                                  ValueToValueMapTy *VMap) {
587   if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
588     return;
589   Value *CachedSlot = nullptr;
590   auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
591     if (CachedSlot) {
592       assert(CachedSlot->getType()->getPointerElementType() == ValueTy &&
593              "multiple swifterror slots in function with different types");
594       return CachedSlot;
595     }
596 
597     // Check if the function has a swifterror argument.
598     for (auto &Arg : F.args()) {
599       if (Arg.isSwiftError()) {
600         CachedSlot = &Arg;
601         assert(Arg.getType()->getPointerElementType() == ValueTy &&
602                "swifterror argument does not have expected type");
603         return &Arg;
604       }
605     }
606 
607     // Create a swifterror alloca.
608     IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
609     auto Alloca = Builder.CreateAlloca(ValueTy);
610     Alloca->setSwiftError(true);
611 
612     CachedSlot = Alloca;
613     return Alloca;
614   };
615 
616   for (CallInst *Op : Shape.SwiftErrorOps) {
617     auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
618     IRBuilder<> Builder(MappedOp);
619 
620     // If there are no arguments, this is a 'get' operation.
621     Value *MappedResult;
622     if (Op->getNumArgOperands() == 0) {
623       auto ValueTy = Op->getType();
624       auto Slot = getSwiftErrorSlot(ValueTy);
625       MappedResult = Builder.CreateLoad(ValueTy, Slot);
626     } else {
627       assert(Op->getNumArgOperands() == 1);
628       auto Value = MappedOp->getArgOperand(0);
629       auto ValueTy = Value->getType();
630       auto Slot = getSwiftErrorSlot(ValueTy);
631       Builder.CreateStore(Value, Slot);
632       MappedResult = Slot;
633     }
634 
635     MappedOp->replaceAllUsesWith(MappedResult);
636     MappedOp->eraseFromParent();
637   }
638 
639   // If we're updating the original function, we've invalidated SwiftErrorOps.
640   if (VMap == nullptr) {
641     Shape.SwiftErrorOps.clear();
642   }
643 }
644 
replaceSwiftErrorOps()645 void CoroCloner::replaceSwiftErrorOps() {
646   ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
647 }
648 
salvageDebugInfo()649 void CoroCloner::salvageDebugInfo() {
650   SmallVector<DbgVariableIntrinsic *, 8> Worklist;
651   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
652   for (auto &BB : *NewF)
653     for (auto &I : BB)
654       if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
655         Worklist.push_back(DVI);
656   for (DbgVariableIntrinsic *DVI : Worklist)
657     coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.ReuseFrameSlot);
658 
659   // Remove all salvaged dbg.declare intrinsics that became
660   // either unreachable or stale due to the CoroSplit transformation.
661   auto IsUnreachableBlock = [&](BasicBlock *BB) {
662     return BB->hasNPredecessors(0) && BB != &NewF->getEntryBlock();
663   };
664   for (DbgVariableIntrinsic *DVI : Worklist) {
665     if (IsUnreachableBlock(DVI->getParent()))
666       DVI->eraseFromParent();
667     else if (dyn_cast_or_null<AllocaInst>(DVI->getVariableLocationOp(0))) {
668       // Count all non-debuginfo uses in reachable blocks.
669       unsigned Uses = 0;
670       for (auto *User : DVI->getVariableLocationOp(0)->users())
671         if (auto *I = dyn_cast<Instruction>(User))
672           if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
673             ++Uses;
674       if (!Uses)
675         DVI->eraseFromParent();
676     }
677   }
678 }
679 
replaceEntryBlock()680 void CoroCloner::replaceEntryBlock() {
681   // In the original function, the AllocaSpillBlock is a block immediately
682   // following the allocation of the frame object which defines GEPs for
683   // all the allocas that have been moved into the frame, and it ends by
684   // branching to the original beginning of the coroutine.  Make this
685   // the entry block of the cloned function.
686   auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
687   auto *OldEntry = &NewF->getEntryBlock();
688   Entry->setName("entry" + Suffix);
689   Entry->moveBefore(OldEntry);
690   Entry->getTerminator()->eraseFromParent();
691 
692   // Clear all predecessors of the new entry block.  There should be
693   // exactly one predecessor, which we created when splitting out
694   // AllocaSpillBlock to begin with.
695   assert(Entry->hasOneUse());
696   auto BranchToEntry = cast<BranchInst>(Entry->user_back());
697   assert(BranchToEntry->isUnconditional());
698   Builder.SetInsertPoint(BranchToEntry);
699   Builder.CreateUnreachable();
700   BranchToEntry->eraseFromParent();
701 
702   // Branch from the entry to the appropriate place.
703   Builder.SetInsertPoint(Entry);
704   switch (Shape.ABI) {
705   case coro::ABI::Switch: {
706     // In switch-lowering, we built a resume-entry block in the original
707     // function.  Make the entry block branch to this.
708     auto *SwitchBB =
709       cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
710     Builder.CreateBr(SwitchBB);
711     break;
712   }
713   case coro::ABI::Async:
714   case coro::ABI::Retcon:
715   case coro::ABI::RetconOnce: {
716     // In continuation ABIs, we want to branch to immediately after the
717     // active suspend point.  Earlier phases will have put the suspend in its
718     // own basic block, so just thread our jump directly to its successor.
719     assert((Shape.ABI == coro::ABI::Async &&
720             isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
721            ((Shape.ABI == coro::ABI::Retcon ||
722              Shape.ABI == coro::ABI::RetconOnce) &&
723             isa<CoroSuspendRetconInst>(ActiveSuspend)));
724     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
725     auto Branch = cast<BranchInst>(MappedCS->getNextNode());
726     assert(Branch->isUnconditional());
727     Builder.CreateBr(Branch->getSuccessor(0));
728     break;
729   }
730   }
731 
732   // Any static alloca that's still being used but not reachable from the new
733   // entry needs to be moved to the new entry.
734   Function *F = OldEntry->getParent();
735   DominatorTree DT{*F};
736   for (auto IT = inst_begin(F), End = inst_end(F); IT != End;) {
737     Instruction &I = *IT++;
738     auto *Alloca = dyn_cast<AllocaInst>(&I);
739     if (!Alloca || I.use_empty())
740       continue;
741     if (DT.isReachableFromEntry(I.getParent()) ||
742         !isa<ConstantInt>(Alloca->getArraySize()))
743       continue;
744     I.moveBefore(*Entry, Entry->getFirstInsertionPt());
745   }
746 }
747 
748 /// Derive the value of the new frame pointer.
deriveNewFramePointer()749 Value *CoroCloner::deriveNewFramePointer() {
750   // Builder should be inserting to the front of the new entry block.
751 
752   switch (Shape.ABI) {
753   // In switch-lowering, the argument is the frame pointer.
754   case coro::ABI::Switch:
755     return &*NewF->arg_begin();
756   // In async-lowering, one of the arguments is an async context as determined
757   // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
758   // the resume function from the async context projection function associated
759   // with the active suspend. The frame is located as a tail to the async
760   // context header.
761   case coro::ABI::Async: {
762     auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
763     auto *CalleeContext =
764         NewF->getArg(ActiveAsyncSuspend->getStorageArgumentIndex());
765     auto *FramePtrTy = Shape.FrameTy->getPointerTo();
766     auto *ProjectionFunc =
767         ActiveAsyncSuspend->getAsyncContextProjectionFunction();
768     auto DbgLoc =
769         cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
770     // Calling i8* (i8*)
771     auto *CallerContext = Builder.CreateCall(
772         cast<FunctionType>(ProjectionFunc->getType()->getPointerElementType()),
773         ProjectionFunc, CalleeContext);
774     CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
775     CallerContext->setDebugLoc(DbgLoc);
776     // The frame is located after the async_context header.
777     auto &Context = Builder.getContext();
778     auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
779         Type::getInt8Ty(Context), CallerContext,
780         Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
781     // Inline the projection function.
782     InlineFunctionInfo InlineInfo;
783     auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
784     assert(InlineRes.isSuccess());
785     (void)InlineRes;
786     return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
787   }
788   // In continuation-lowering, the argument is the opaque storage.
789   case coro::ABI::Retcon:
790   case coro::ABI::RetconOnce: {
791     Argument *NewStorage = &*NewF->arg_begin();
792     auto FramePtrTy = Shape.FrameTy->getPointerTo();
793 
794     // If the storage is inline, just bitcast to the storage to the frame type.
795     if (Shape.RetconLowering.IsFrameInlineInStorage)
796       return Builder.CreateBitCast(NewStorage, FramePtrTy);
797 
798     // Otherwise, load the real frame from the opaque storage.
799     auto FramePtrPtr =
800       Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
801     return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
802   }
803   }
804   llvm_unreachable("bad ABI");
805 }
806 
addFramePointerAttrs(AttributeList & Attrs,LLVMContext & Context,unsigned ParamIndex,uint64_t Size,Align Alignment)807 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
808                                  unsigned ParamIndex,
809                                  uint64_t Size, Align Alignment) {
810   AttrBuilder ParamAttrs;
811   ParamAttrs.addAttribute(Attribute::NonNull);
812   ParamAttrs.addAttribute(Attribute::NoAlias);
813   ParamAttrs.addAlignmentAttr(Alignment);
814   ParamAttrs.addDereferenceableAttr(Size);
815   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
816 }
817 
818 /// Clone the body of the original function into a resume function of
819 /// some sort.
create()820 void CoroCloner::create() {
821   // Create the new function if we don't already have one.
822   if (!NewF) {
823     NewF = createCloneDeclaration(OrigF, Shape, Suffix,
824                                   OrigF.getParent()->end(), ActiveSuspend);
825   }
826 
827   // Replace all args with undefs. The buildCoroutineFrame algorithm already
828   // rewritten access to the args that occurs after suspend points with loads
829   // and stores to/from the coroutine frame.
830   for (Argument &A : OrigF.args())
831     VMap[&A] = UndefValue::get(A.getType());
832 
833   SmallVector<ReturnInst *, 4> Returns;
834 
835   // Ignore attempts to change certain attributes of the function.
836   // TODO: maybe there should be a way to suppress this during cloning?
837   auto savedVisibility = NewF->getVisibility();
838   auto savedUnnamedAddr = NewF->getUnnamedAddr();
839   auto savedDLLStorageClass = NewF->getDLLStorageClass();
840 
841   // NewF's linkage (which CloneFunctionInto does *not* change) might not
842   // be compatible with the visibility of OrigF (which it *does* change),
843   // so protect against that.
844   auto savedLinkage = NewF->getLinkage();
845   NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
846 
847   CloneFunctionInto(NewF, &OrigF, VMap,
848                     CloneFunctionChangeType::LocalChangesOnly, Returns);
849 
850   auto &Context = NewF->getContext();
851 
852   // For async functions / continuations, adjust the scope line of the
853   // clone to the line number of the suspend point. The scope line is
854   // associated with all pre-prologue instructions. This avoids a jump
855   // in the linetable from the function declaration to the suspend point.
856   if (DISubprogram *SP = NewF->getSubprogram()) {
857     assert(SP != OrigF.getSubprogram() && SP->isDistinct());
858     if (ActiveSuspend)
859       if (auto DL = ActiveSuspend->getDebugLoc())
860         SP->setScopeLine(DL->getLine());
861     // Update the linkage name to reflect the modified symbol name. It
862     // is necessary to update the linkage name in Swift, since the
863     // mangling changes for resume functions. It might also be the
864     // right thing to do in C++, but due to a limitation in LLVM's
865     // AsmPrinter we can only do this if the function doesn't have an
866     // abstract specification, since the DWARF backend expects the
867     // abstract specification to contain the linkage name and asserts
868     // that they are identical.
869     if (!SP->getDeclaration() && SP->getUnit() &&
870         SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
871       SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
872   }
873 
874   NewF->setLinkage(savedLinkage);
875   NewF->setVisibility(savedVisibility);
876   NewF->setUnnamedAddr(savedUnnamedAddr);
877   NewF->setDLLStorageClass(savedDLLStorageClass);
878 
879   // Replace the attributes of the new function:
880   auto OrigAttrs = NewF->getAttributes();
881   auto NewAttrs = AttributeList();
882 
883   switch (Shape.ABI) {
884   case coro::ABI::Switch:
885     // Bootstrap attributes by copying function attributes from the
886     // original function.  This should include optimization settings and so on.
887     NewAttrs = NewAttrs.addAttributes(Context, AttributeList::FunctionIndex,
888                                       OrigAttrs.getFnAttributes());
889 
890     addFramePointerAttrs(NewAttrs, Context, 0,
891                          Shape.FrameSize, Shape.FrameAlign);
892     break;
893   case coro::ABI::Async: {
894     // Transfer the original function's attributes.
895     auto FnAttrs = OrigF.getAttributes().getFnAttributes();
896     NewAttrs =
897         NewAttrs.addAttributes(Context, AttributeList::FunctionIndex, FnAttrs);
898     break;
899   }
900   case coro::ABI::Retcon:
901   case coro::ABI::RetconOnce:
902     // If we have a continuation prototype, just use its attributes,
903     // full-stop.
904     NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
905 
906     addFramePointerAttrs(NewAttrs, Context, 0,
907                          Shape.getRetconCoroId()->getStorageSize(),
908                          Shape.getRetconCoroId()->getStorageAlignment());
909     break;
910   }
911 
912   switch (Shape.ABI) {
913   // In these ABIs, the cloned functions always return 'void', and the
914   // existing return sites are meaningless.  Note that for unique
915   // continuations, this includes the returns associated with suspends;
916   // this is fine because we can't suspend twice.
917   case coro::ABI::Switch:
918   case coro::ABI::RetconOnce:
919     // Remove old returns.
920     for (ReturnInst *Return : Returns)
921       changeToUnreachable(Return, /*UseLLVMTrap=*/false);
922     break;
923 
924   // With multi-suspend continuations, we'll already have eliminated the
925   // original returns and inserted returns before all the suspend points,
926   // so we want to leave any returns in place.
927   case coro::ABI::Retcon:
928     break;
929   // Async lowering will insert musttail call functions at all suspend points
930   // followed by a return.
931   // Don't change returns to unreachable because that will trip up the verifier.
932   // These returns should be unreachable from the clone.
933   case coro::ABI::Async:
934     break;
935   }
936 
937   NewF->setAttributes(NewAttrs);
938   NewF->setCallingConv(Shape.getResumeFunctionCC());
939 
940   // Set up the new entry block.
941   replaceEntryBlock();
942 
943   Builder.SetInsertPoint(&NewF->getEntryBlock().front());
944   NewFramePtr = deriveNewFramePointer();
945 
946   // Remap frame pointer.
947   Value *OldFramePtr = VMap[Shape.FramePtr];
948   NewFramePtr->takeName(OldFramePtr);
949   OldFramePtr->replaceAllUsesWith(NewFramePtr);
950 
951   // Remap vFrame pointer.
952   auto *NewVFrame = Builder.CreateBitCast(
953       NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
954   Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
955   OldVFrame->replaceAllUsesWith(NewVFrame);
956 
957   switch (Shape.ABI) {
958   case coro::ABI::Switch:
959     // Rewrite final suspend handling as it is not done via switch (allows to
960     // remove final case from the switch, since it is undefined behavior to
961     // resume the coroutine suspended at the final suspend point.
962     if (Shape.SwitchLowering.HasFinalSuspend)
963       handleFinalSuspend();
964     break;
965   case coro::ABI::Async:
966   case coro::ABI::Retcon:
967   case coro::ABI::RetconOnce:
968     // Replace uses of the active suspend with the corresponding
969     // continuation-function arguments.
970     assert(ActiveSuspend != nullptr &&
971            "no active suspend when lowering a continuation-style coroutine");
972     replaceRetconOrAsyncSuspendUses();
973     break;
974   }
975 
976   // Handle suspends.
977   replaceCoroSuspends();
978 
979   // Handle swifterror.
980   replaceSwiftErrorOps();
981 
982   // Remove coro.end intrinsics.
983   replaceCoroEnds();
984 
985   // Salvage debug info that points into the coroutine frame.
986   salvageDebugInfo();
987 
988   // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
989   // to suppress deallocation code.
990   if (Shape.ABI == coro::ABI::Switch)
991     coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
992                           /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
993 }
994 
995 // Create a resume clone by cloning the body of the original function, setting
996 // new entry block and replacing coro.suspend an appropriate value to force
997 // resume or cleanup pass for every suspend point.
createClone(Function & F,const Twine & Suffix,coro::Shape & Shape,CoroCloner::Kind FKind)998 static Function *createClone(Function &F, const Twine &Suffix,
999                              coro::Shape &Shape, CoroCloner::Kind FKind) {
1000   CoroCloner Cloner(F, Suffix, Shape, FKind);
1001   Cloner.create();
1002   return Cloner.getFunction();
1003 }
1004 
1005 /// Remove calls to llvm.coro.end in the original function.
removeCoroEnds(const coro::Shape & Shape,CallGraph * CG)1006 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
1007   for (auto End : Shape.CoroEnds) {
1008     replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
1009   }
1010 }
1011 
updateAsyncFuncPointerContextSize(coro::Shape & Shape)1012 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1013   assert(Shape.ABI == coro::ABI::Async);
1014 
1015   auto *FuncPtrStruct = cast<ConstantStruct>(
1016       Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1017   auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1018   auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1019   auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1020                                           Shape.AsyncLowering.ContextSize);
1021   auto *NewFuncPtrStruct = ConstantStruct::get(
1022       FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1023 
1024   Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1025 }
1026 
replaceFrameSize(coro::Shape & Shape)1027 static void replaceFrameSize(coro::Shape &Shape) {
1028   if (Shape.ABI == coro::ABI::Async)
1029     updateAsyncFuncPointerContextSize(Shape);
1030 
1031   if (Shape.CoroSizes.empty())
1032     return;
1033 
1034   // In the same function all coro.sizes should have the same result type.
1035   auto *SizeIntrin = Shape.CoroSizes.back();
1036   Module *M = SizeIntrin->getModule();
1037   const DataLayout &DL = M->getDataLayout();
1038   auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1039   auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1040 
1041   for (CoroSizeInst *CS : Shape.CoroSizes) {
1042     CS->replaceAllUsesWith(SizeConstant);
1043     CS->eraseFromParent();
1044   }
1045 }
1046 
1047 // Create a global constant array containing pointers to functions provided and
1048 // set Info parameter of CoroBegin to point at this constant. Example:
1049 //
1050 //   @f.resumers = internal constant [2 x void(%f.frame*)*]
1051 //                    [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1052 //   define void @f() {
1053 //     ...
1054 //     call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1055 //                    i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1056 //
1057 // Assumes that all the functions have the same signature.
setCoroInfo(Function & F,coro::Shape & Shape,ArrayRef<Function * > Fns)1058 static void setCoroInfo(Function &F, coro::Shape &Shape,
1059                         ArrayRef<Function *> Fns) {
1060   // This only works under the switch-lowering ABI because coro elision
1061   // only works on the switch-lowering ABI.
1062   assert(Shape.ABI == coro::ABI::Switch);
1063 
1064   SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1065   assert(!Args.empty());
1066   Function *Part = *Fns.begin();
1067   Module *M = Part->getParent();
1068   auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1069 
1070   auto *ConstVal = ConstantArray::get(ArrTy, Args);
1071   auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1072                                 GlobalVariable::PrivateLinkage, ConstVal,
1073                                 F.getName() + Twine(".resumers"));
1074 
1075   // Update coro.begin instruction to refer to this constant.
1076   LLVMContext &C = F.getContext();
1077   auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
1078   Shape.getSwitchCoroId()->setInfo(BC);
1079 }
1080 
1081 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
updateCoroFrame(coro::Shape & Shape,Function * ResumeFn,Function * DestroyFn,Function * CleanupFn)1082 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1083                             Function *DestroyFn, Function *CleanupFn) {
1084   assert(Shape.ABI == coro::ABI::Switch);
1085 
1086   IRBuilder<> Builder(Shape.FramePtr->getNextNode());
1087   auto *ResumeAddr = Builder.CreateStructGEP(
1088       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1089       "resume.addr");
1090   Builder.CreateStore(ResumeFn, ResumeAddr);
1091 
1092   Value *DestroyOrCleanupFn = DestroyFn;
1093 
1094   CoroIdInst *CoroId = Shape.getSwitchCoroId();
1095   if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1096     // If there is a CoroAlloc and it returns false (meaning we elide the
1097     // allocation, use CleanupFn instead of DestroyFn).
1098     DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1099   }
1100 
1101   auto *DestroyAddr = Builder.CreateStructGEP(
1102       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1103       "destroy.addr");
1104   Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1105 }
1106 
postSplitCleanup(Function & F)1107 static void postSplitCleanup(Function &F) {
1108   removeUnreachableBlocks(F);
1109 
1110   // For now, we do a mandatory verification step because we don't
1111   // entirely trust this pass.  Note that we don't want to add a verifier
1112   // pass to FPM below because it will also verify all the global data.
1113   if (verifyFunction(F, &errs()))
1114     report_fatal_error("Broken function");
1115 
1116   legacy::FunctionPassManager FPM(F.getParent());
1117 
1118   FPM.add(createSCCPPass());
1119   FPM.add(createCFGSimplificationPass());
1120   FPM.add(createEarlyCSEPass());
1121   FPM.add(createCFGSimplificationPass());
1122 
1123   FPM.doInitialization();
1124   FPM.run(F);
1125   FPM.doFinalization();
1126 }
1127 
1128 // Assuming we arrived at the block NewBlock from Prev instruction, store
1129 // PHI's incoming values in the ResolvedValues map.
1130 static void
scanPHIsAndUpdateValueMap(Instruction * Prev,BasicBlock * NewBlock,DenseMap<Value *,Value * > & ResolvedValues)1131 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1132                           DenseMap<Value *, Value *> &ResolvedValues) {
1133   auto *PrevBB = Prev->getParent();
1134   for (PHINode &PN : NewBlock->phis()) {
1135     auto V = PN.getIncomingValueForBlock(PrevBB);
1136     // See if we already resolved it.
1137     auto VI = ResolvedValues.find(V);
1138     if (VI != ResolvedValues.end())
1139       V = VI->second;
1140     // Remember the value.
1141     ResolvedValues[&PN] = V;
1142   }
1143 }
1144 
1145 // Replace a sequence of branches leading to a ret, with a clone of a ret
1146 // instruction. Suspend instruction represented by a switch, track the PHI
1147 // values and select the correct case successor when possible.
simplifyTerminatorLeadingToRet(Instruction * InitialInst)1148 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1149   DenseMap<Value *, Value *> ResolvedValues;
1150   BasicBlock *UnconditionalSucc = nullptr;
1151 
1152   Instruction *I = InitialInst;
1153   while (I->isTerminator() ||
1154          (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) {
1155     if (isa<ReturnInst>(I)) {
1156       if (I != InitialInst) {
1157         // If InitialInst is an unconditional branch,
1158         // remove PHI values that come from basic block of InitialInst
1159         if (UnconditionalSucc)
1160           UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1161         ReplaceInstWithInst(InitialInst, I->clone());
1162       }
1163       return true;
1164     }
1165     if (auto *BR = dyn_cast<BranchInst>(I)) {
1166       if (BR->isUnconditional()) {
1167         BasicBlock *BB = BR->getSuccessor(0);
1168         if (I == InitialInst)
1169           UnconditionalSucc = BB;
1170         scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1171         I = BB->getFirstNonPHIOrDbgOrLifetime();
1172         continue;
1173       }
1174     } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1175       auto *BR = dyn_cast<BranchInst>(I->getNextNode());
1176       if (BR && BR->isConditional() && CondCmp == BR->getCondition()) {
1177         // If the case number of suspended switch instruction is reduced to
1178         // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1179         // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1180         ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1181         if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) {
1182           Value *V = CondCmp->getOperand(0);
1183           auto it = ResolvedValues.find(V);
1184           if (it != ResolvedValues.end())
1185             V = it->second;
1186 
1187           if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) {
1188             BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue())
1189                                  ? BR->getSuccessor(0)
1190                                  : BR->getSuccessor(1);
1191             scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1192             I = BB->getFirstNonPHIOrDbgOrLifetime();
1193             continue;
1194           }
1195         }
1196       }
1197     } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1198       Value *V = SI->getCondition();
1199       auto it = ResolvedValues.find(V);
1200       if (it != ResolvedValues.end())
1201         V = it->second;
1202       if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1203         BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1204         scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1205         I = BB->getFirstNonPHIOrDbgOrLifetime();
1206         continue;
1207       }
1208     }
1209     return false;
1210   }
1211   return false;
1212 }
1213 
1214 // Check whether CI obeys the rules of musttail attribute.
shouldBeMustTail(const CallInst & CI,const Function & F)1215 static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1216   if (CI.isInlineAsm())
1217     return false;
1218 
1219   // Match prototypes and calling conventions of resume function.
1220   FunctionType *CalleeTy = CI.getFunctionType();
1221   if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1222     return false;
1223 
1224   Type *CalleeParmTy = CalleeTy->getParamType(0);
1225   if (!CalleeParmTy->isPointerTy() ||
1226       (CalleeParmTy->getPointerAddressSpace() != 0))
1227     return false;
1228 
1229   if (CI.getCallingConv() != F.getCallingConv())
1230     return false;
1231 
1232   // CI should not has any ABI-impacting function attributes.
1233   static const Attribute::AttrKind ABIAttrs[] = {
1234       Attribute::StructRet,    Attribute::ByVal,     Attribute::InAlloca,
1235       Attribute::Preallocated, Attribute::InReg,     Attribute::Returned,
1236       Attribute::SwiftSelf,    Attribute::SwiftError};
1237   AttributeList Attrs = CI.getAttributes();
1238   for (auto AK : ABIAttrs)
1239     if (Attrs.hasParamAttribute(0, AK))
1240       return false;
1241 
1242   return true;
1243 }
1244 
1245 // Add musttail to any resume instructions that is immediately followed by a
1246 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1247 // for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1248 // This transformation is done only in the resume part of the coroutine that has
1249 // identical signature and calling convention as the coro.resume call.
addMustTailToCoroResumes(Function & F)1250 static void addMustTailToCoroResumes(Function &F) {
1251   bool changed = false;
1252 
1253   // Collect potential resume instructions.
1254   SmallVector<CallInst *, 4> Resumes;
1255   for (auto &I : instructions(F))
1256     if (auto *Call = dyn_cast<CallInst>(&I))
1257       if (shouldBeMustTail(*Call, F))
1258         Resumes.push_back(Call);
1259 
1260   // Set musttail on those that are followed by a ret instruction.
1261   for (CallInst *Call : Resumes)
1262     if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1263       Call->setTailCallKind(CallInst::TCK_MustTail);
1264       changed = true;
1265     }
1266 
1267   if (changed)
1268     removeUnreachableBlocks(F);
1269 }
1270 
1271 // Coroutine has no suspend points. Remove heap allocation for the coroutine
1272 // frame if possible.
handleNoSuspendCoroutine(coro::Shape & Shape)1273 static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1274   auto *CoroBegin = Shape.CoroBegin;
1275   auto *CoroId = CoroBegin->getId();
1276   auto *AllocInst = CoroId->getCoroAlloc();
1277   switch (Shape.ABI) {
1278   case coro::ABI::Switch: {
1279     auto SwitchId = cast<CoroIdInst>(CoroId);
1280     coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1281     if (AllocInst) {
1282       IRBuilder<> Builder(AllocInst);
1283       auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1284       Frame->setAlignment(Shape.FrameAlign);
1285       auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1286       AllocInst->replaceAllUsesWith(Builder.getFalse());
1287       AllocInst->eraseFromParent();
1288       CoroBegin->replaceAllUsesWith(VFrame);
1289     } else {
1290       CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1291     }
1292 
1293     break;
1294   }
1295   case coro::ABI::Async:
1296   case coro::ABI::Retcon:
1297   case coro::ABI::RetconOnce:
1298     CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1299     break;
1300   }
1301 
1302   CoroBegin->eraseFromParent();
1303 }
1304 
1305 // SimplifySuspendPoint needs to check that there is no calls between
1306 // coro_save and coro_suspend, since any of the calls may potentially resume
1307 // the coroutine and if that is the case we cannot eliminate the suspend point.
hasCallsInBlockBetween(Instruction * From,Instruction * To)1308 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1309   for (Instruction *I = From; I != To; I = I->getNextNode()) {
1310     // Assume that no intrinsic can resume the coroutine.
1311     if (isa<IntrinsicInst>(I))
1312       continue;
1313 
1314     if (isa<CallBase>(I))
1315       return true;
1316   }
1317   return false;
1318 }
1319 
hasCallsInBlocksBetween(BasicBlock * SaveBB,BasicBlock * ResDesBB)1320 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1321   SmallPtrSet<BasicBlock *, 8> Set;
1322   SmallVector<BasicBlock *, 8> Worklist;
1323 
1324   Set.insert(SaveBB);
1325   Worklist.push_back(ResDesBB);
1326 
1327   // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1328   // returns a token consumed by suspend instruction, all blocks in between
1329   // will have to eventually hit SaveBB when going backwards from ResDesBB.
1330   while (!Worklist.empty()) {
1331     auto *BB = Worklist.pop_back_val();
1332     Set.insert(BB);
1333     for (auto *Pred : predecessors(BB))
1334       if (Set.count(Pred) == 0)
1335         Worklist.push_back(Pred);
1336   }
1337 
1338   // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1339   Set.erase(SaveBB);
1340   Set.erase(ResDesBB);
1341 
1342   for (auto *BB : Set)
1343     if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1344       return true;
1345 
1346   return false;
1347 }
1348 
hasCallsBetween(Instruction * Save,Instruction * ResumeOrDestroy)1349 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1350   auto *SaveBB = Save->getParent();
1351   auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1352 
1353   if (SaveBB == ResumeOrDestroyBB)
1354     return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1355 
1356   // Any calls from Save to the end of the block?
1357   if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1358     return true;
1359 
1360   // Any calls from begging of the block up to ResumeOrDestroy?
1361   if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1362                              ResumeOrDestroy))
1363     return true;
1364 
1365   // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1366   if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1367     return true;
1368 
1369   return false;
1370 }
1371 
1372 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1373 // suspend point and replace it with nornal control flow.
simplifySuspendPoint(CoroSuspendInst * Suspend,CoroBeginInst * CoroBegin)1374 static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1375                                  CoroBeginInst *CoroBegin) {
1376   Instruction *Prev = Suspend->getPrevNode();
1377   if (!Prev) {
1378     auto *Pred = Suspend->getParent()->getSinglePredecessor();
1379     if (!Pred)
1380       return false;
1381     Prev = Pred->getTerminator();
1382   }
1383 
1384   CallBase *CB = dyn_cast<CallBase>(Prev);
1385   if (!CB)
1386     return false;
1387 
1388   auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1389 
1390   // See if the callsite is for resumption or destruction of the coroutine.
1391   auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1392   if (!SubFn)
1393     return false;
1394 
1395   // Does not refer to the current coroutine, we cannot do anything with it.
1396   if (SubFn->getFrame() != CoroBegin)
1397     return false;
1398 
1399   // See if the transformation is safe. Specifically, see if there are any
1400   // calls in between Save and CallInstr. They can potenitally resume the
1401   // coroutine rendering this optimization unsafe.
1402   auto *Save = Suspend->getCoroSave();
1403   if (hasCallsBetween(Save, CB))
1404     return false;
1405 
1406   // Replace llvm.coro.suspend with the value that results in resumption over
1407   // the resume or cleanup path.
1408   Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1409   Suspend->eraseFromParent();
1410   Save->eraseFromParent();
1411 
1412   // No longer need a call to coro.resume or coro.destroy.
1413   if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1414     BranchInst::Create(Invoke->getNormalDest(), Invoke);
1415   }
1416 
1417   // Grab the CalledValue from CB before erasing the CallInstr.
1418   auto *CalledValue = CB->getCalledOperand();
1419   CB->eraseFromParent();
1420 
1421   // If no more users remove it. Usually it is a bitcast of SubFn.
1422   if (CalledValue != SubFn && CalledValue->user_empty())
1423     if (auto *I = dyn_cast<Instruction>(CalledValue))
1424       I->eraseFromParent();
1425 
1426   // Now we are good to remove SubFn.
1427   if (SubFn->user_empty())
1428     SubFn->eraseFromParent();
1429 
1430   return true;
1431 }
1432 
1433 // Remove suspend points that are simplified.
simplifySuspendPoints(coro::Shape & Shape)1434 static void simplifySuspendPoints(coro::Shape &Shape) {
1435   // Currently, the only simplification we do is switch-lowering-specific.
1436   if (Shape.ABI != coro::ABI::Switch)
1437     return;
1438 
1439   auto &S = Shape.CoroSuspends;
1440   size_t I = 0, N = S.size();
1441   if (N == 0)
1442     return;
1443   while (true) {
1444     auto SI = cast<CoroSuspendInst>(S[I]);
1445     // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1446     // to resume a coroutine suspended at the final suspend point.
1447     if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1448       if (--N == I)
1449         break;
1450       std::swap(S[I], S[N]);
1451       continue;
1452     }
1453     if (++I == N)
1454       break;
1455   }
1456   S.resize(N);
1457 }
1458 
splitSwitchCoroutine(Function & F,coro::Shape & Shape,SmallVectorImpl<Function * > & Clones)1459 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1460                                  SmallVectorImpl<Function *> &Clones) {
1461   assert(Shape.ABI == coro::ABI::Switch);
1462 
1463   createResumeEntryBlock(F, Shape);
1464   auto ResumeClone = createClone(F, ".resume", Shape,
1465                                  CoroCloner::Kind::SwitchResume);
1466   auto DestroyClone = createClone(F, ".destroy", Shape,
1467                                   CoroCloner::Kind::SwitchUnwind);
1468   auto CleanupClone = createClone(F, ".cleanup", Shape,
1469                                   CoroCloner::Kind::SwitchCleanup);
1470 
1471   postSplitCleanup(*ResumeClone);
1472   postSplitCleanup(*DestroyClone);
1473   postSplitCleanup(*CleanupClone);
1474 
1475   addMustTailToCoroResumes(*ResumeClone);
1476 
1477   // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1478   updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1479 
1480   assert(Clones.empty());
1481   Clones.push_back(ResumeClone);
1482   Clones.push_back(DestroyClone);
1483   Clones.push_back(CleanupClone);
1484 
1485   // Create a constant array referring to resume/destroy/clone functions pointed
1486   // by the last argument of @llvm.coro.info, so that CoroElide pass can
1487   // determined correct function to call.
1488   setCoroInfo(F, Shape, Clones);
1489 }
1490 
replaceAsyncResumeFunction(CoroSuspendAsyncInst * Suspend,Value * Continuation)1491 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1492                                        Value *Continuation) {
1493   auto *ResumeIntrinsic = Suspend->getResumeFunction();
1494   auto &Context = Suspend->getParent()->getParent()->getContext();
1495   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1496 
1497   IRBuilder<> Builder(ResumeIntrinsic);
1498   auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1499   ResumeIntrinsic->replaceAllUsesWith(Val);
1500   ResumeIntrinsic->eraseFromParent();
1501   Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg,
1502                       UndefValue::get(Int8PtrTy));
1503 }
1504 
1505 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
coerceArguments(IRBuilder<> & Builder,FunctionType * FnTy,ArrayRef<Value * > FnArgs,SmallVectorImpl<Value * > & CallArgs)1506 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1507                             ArrayRef<Value *> FnArgs,
1508                             SmallVectorImpl<Value *> &CallArgs) {
1509   size_t ArgIdx = 0;
1510   for (auto paramTy : FnTy->params()) {
1511     assert(ArgIdx < FnArgs.size());
1512     if (paramTy != FnArgs[ArgIdx]->getType())
1513       CallArgs.push_back(
1514           Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1515     else
1516       CallArgs.push_back(FnArgs[ArgIdx]);
1517     ++ArgIdx;
1518   }
1519 }
1520 
createMustTailCall(DebugLoc Loc,Function * MustTailCallFn,ArrayRef<Value * > Arguments,IRBuilder<> & Builder)1521 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1522                                    ArrayRef<Value *> Arguments,
1523                                    IRBuilder<> &Builder) {
1524   auto *FnTy =
1525       cast<FunctionType>(MustTailCallFn->getType()->getPointerElementType());
1526   // Coerce the arguments, llvm optimizations seem to ignore the types in
1527   // vaarg functions and throws away casts in optimized mode.
1528   SmallVector<Value *, 8> CallArgs;
1529   coerceArguments(Builder, FnTy, Arguments, CallArgs);
1530 
1531   auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1532   TailCall->setTailCallKind(CallInst::TCK_MustTail);
1533   TailCall->setDebugLoc(Loc);
1534   TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1535   return TailCall;
1536 }
1537 
splitAsyncCoroutine(Function & F,coro::Shape & Shape,SmallVectorImpl<Function * > & Clones)1538 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1539                                 SmallVectorImpl<Function *> &Clones) {
1540   assert(Shape.ABI == coro::ABI::Async);
1541   assert(Clones.empty());
1542   // Reset various things that the optimizer might have decided it
1543   // "knows" about the coroutine function due to not seeing a return.
1544   F.removeFnAttr(Attribute::NoReturn);
1545   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1546   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1547 
1548   auto &Context = F.getContext();
1549   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1550 
1551   auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1552   IRBuilder<> Builder(Id);
1553 
1554   auto *FramePtr = Id->getStorage();
1555   FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1556   FramePtr = Builder.CreateConstInBoundsGEP1_32(
1557       Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1558       "async.ctx.frameptr");
1559 
1560   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1561   {
1562     // Make sure we don't invalidate Shape.FramePtr.
1563     TrackingVH<Instruction> Handle(Shape.FramePtr);
1564     Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1565     Shape.FramePtr = Handle.getValPtr();
1566   }
1567 
1568   // Create all the functions in order after the main function.
1569   auto NextF = std::next(F.getIterator());
1570 
1571   // Create a continuation function for each of the suspend points.
1572   Clones.reserve(Shape.CoroSuspends.size());
1573   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1574     auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1575 
1576     // Create the clone declaration.
1577     auto *Continuation = createCloneDeclaration(
1578         F, Shape, ".resume." + Twine(Idx), NextF, Suspend);
1579     Clones.push_back(Continuation);
1580 
1581     // Insert a branch to a new return block immediately before the suspend
1582     // point.
1583     auto *SuspendBB = Suspend->getParent();
1584     auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1585     auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1586 
1587     // Place it before the first suspend.
1588     auto *ReturnBB =
1589         BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1590     Branch->setSuccessor(0, ReturnBB);
1591 
1592     IRBuilder<> Builder(ReturnBB);
1593 
1594     // Insert the call to the tail call function and inline it.
1595     auto *Fn = Suspend->getMustTailCallFunction();
1596     SmallVector<Value *, 8> Args(Suspend->args());
1597     auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1598         CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1599     auto *TailCall =
1600         coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1601     Builder.CreateRetVoid();
1602     InlineFunctionInfo FnInfo;
1603     auto InlineRes = InlineFunction(*TailCall, FnInfo);
1604     assert(InlineRes.isSuccess() && "Expected inlining to succeed");
1605     (void)InlineRes;
1606 
1607     // Replace the lvm.coro.async.resume intrisic call.
1608     replaceAsyncResumeFunction(Suspend, Continuation);
1609   }
1610 
1611   assert(Clones.size() == Shape.CoroSuspends.size());
1612   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1613     auto *Suspend = Shape.CoroSuspends[Idx];
1614     auto *Clone = Clones[Idx];
1615 
1616     CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1617   }
1618 }
1619 
splitRetconCoroutine(Function & F,coro::Shape & Shape,SmallVectorImpl<Function * > & Clones)1620 static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1621                                  SmallVectorImpl<Function *> &Clones) {
1622   assert(Shape.ABI == coro::ABI::Retcon ||
1623          Shape.ABI == coro::ABI::RetconOnce);
1624   assert(Clones.empty());
1625 
1626   // Reset various things that the optimizer might have decided it
1627   // "knows" about the coroutine function due to not seeing a return.
1628   F.removeFnAttr(Attribute::NoReturn);
1629   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1630   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1631 
1632   // Allocate the frame.
1633   auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1634   Value *RawFramePtr;
1635   if (Shape.RetconLowering.IsFrameInlineInStorage) {
1636     RawFramePtr = Id->getStorage();
1637   } else {
1638     IRBuilder<> Builder(Id);
1639 
1640     // Determine the size of the frame.
1641     const DataLayout &DL = F.getParent()->getDataLayout();
1642     auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1643 
1644     // Allocate.  We don't need to update the call graph node because we're
1645     // going to recompute it from scratch after splitting.
1646     // FIXME: pass the required alignment
1647     RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1648     RawFramePtr =
1649       Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1650 
1651     // Stash the allocated frame pointer in the continuation storage.
1652     auto Dest = Builder.CreateBitCast(Id->getStorage(),
1653                                       RawFramePtr->getType()->getPointerTo());
1654     Builder.CreateStore(RawFramePtr, Dest);
1655   }
1656 
1657   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1658   {
1659     // Make sure we don't invalidate Shape.FramePtr.
1660     TrackingVH<Instruction> Handle(Shape.FramePtr);
1661     Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1662     Shape.FramePtr = Handle.getValPtr();
1663   }
1664 
1665   // Create a unique return block.
1666   BasicBlock *ReturnBB = nullptr;
1667   SmallVector<PHINode *, 4> ReturnPHIs;
1668 
1669   // Create all the functions in order after the main function.
1670   auto NextF = std::next(F.getIterator());
1671 
1672   // Create a continuation function for each of the suspend points.
1673   Clones.reserve(Shape.CoroSuspends.size());
1674   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1675     auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1676 
1677     // Create the clone declaration.
1678     auto Continuation =
1679         createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1680     Clones.push_back(Continuation);
1681 
1682     // Insert a branch to the unified return block immediately before
1683     // the suspend point.
1684     auto SuspendBB = Suspend->getParent();
1685     auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1686     auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1687 
1688     // Create the unified return block.
1689     if (!ReturnBB) {
1690       // Place it before the first suspend.
1691       ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1692                                     NewSuspendBB);
1693       Shape.RetconLowering.ReturnBlock = ReturnBB;
1694 
1695       IRBuilder<> Builder(ReturnBB);
1696 
1697       // Create PHIs for all the return values.
1698       assert(ReturnPHIs.empty());
1699 
1700       // First, the continuation.
1701       ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1702                                              Shape.CoroSuspends.size()));
1703 
1704       // Next, all the directly-yielded values.
1705       for (auto ResultTy : Shape.getRetconResultTypes())
1706         ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1707                                                Shape.CoroSuspends.size()));
1708 
1709       // Build the return value.
1710       auto RetTy = F.getReturnType();
1711 
1712       // Cast the continuation value if necessary.
1713       // We can't rely on the types matching up because that type would
1714       // have to be infinite.
1715       auto CastedContinuationTy =
1716         (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1717       auto *CastedContinuation =
1718         Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1719 
1720       Value *RetV;
1721       if (ReturnPHIs.size() == 1) {
1722         RetV = CastedContinuation;
1723       } else {
1724         RetV = UndefValue::get(RetTy);
1725         RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1726         for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1727           RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1728       }
1729 
1730       Builder.CreateRet(RetV);
1731     }
1732 
1733     // Branch to the return block.
1734     Branch->setSuccessor(0, ReturnBB);
1735     ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1736     size_t NextPHIIndex = 1;
1737     for (auto &VUse : Suspend->value_operands())
1738       ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1739     assert(NextPHIIndex == ReturnPHIs.size());
1740   }
1741 
1742   assert(Clones.size() == Shape.CoroSuspends.size());
1743   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1744     auto Suspend = Shape.CoroSuspends[i];
1745     auto Clone = Clones[i];
1746 
1747     CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1748   }
1749 }
1750 
1751 namespace {
1752   class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1753     Function &F;
1754   public:
PrettyStackTraceFunction(Function & F)1755     PrettyStackTraceFunction(Function &F) : F(F) {}
print(raw_ostream & OS) const1756     void print(raw_ostream &OS) const override {
1757       OS << "While splitting coroutine ";
1758       F.printAsOperand(OS, /*print type*/ false, F.getParent());
1759       OS << "\n";
1760     }
1761   };
1762 }
1763 
splitCoroutine(Function & F,SmallVectorImpl<Function * > & Clones,bool ReuseFrameSlot)1764 static coro::Shape splitCoroutine(Function &F,
1765                                   SmallVectorImpl<Function *> &Clones,
1766                                   bool ReuseFrameSlot) {
1767   PrettyStackTraceFunction prettyStackTrace(F);
1768 
1769   // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1770   // up by uses in unreachable blocks, so remove them as a first pass.
1771   removeUnreachableBlocks(F);
1772 
1773   coro::Shape Shape(F, ReuseFrameSlot);
1774   if (!Shape.CoroBegin)
1775     return Shape;
1776 
1777   simplifySuspendPoints(Shape);
1778   buildCoroutineFrame(F, Shape);
1779   replaceFrameSize(Shape);
1780 
1781   // If there are no suspend points, no split required, just remove
1782   // the allocation and deallocation blocks, they are not needed.
1783   if (Shape.CoroSuspends.empty()) {
1784     handleNoSuspendCoroutine(Shape);
1785   } else {
1786     switch (Shape.ABI) {
1787     case coro::ABI::Switch:
1788       splitSwitchCoroutine(F, Shape, Clones);
1789       break;
1790     case coro::ABI::Async:
1791       splitAsyncCoroutine(F, Shape, Clones);
1792       break;
1793     case coro::ABI::Retcon:
1794     case coro::ABI::RetconOnce:
1795       splitRetconCoroutine(F, Shape, Clones);
1796       break;
1797     }
1798   }
1799 
1800   // Replace all the swifterror operations in the original function.
1801   // This invalidates SwiftErrorOps in the Shape.
1802   replaceSwiftErrorOps(F, Shape, nullptr);
1803 
1804   return Shape;
1805 }
1806 
1807 static void
updateCallGraphAfterCoroutineSplit(Function & F,const coro::Shape & Shape,const SmallVectorImpl<Function * > & Clones,CallGraph & CG,CallGraphSCC & SCC)1808 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
1809                                    const SmallVectorImpl<Function *> &Clones,
1810                                    CallGraph &CG, CallGraphSCC &SCC) {
1811   if (!Shape.CoroBegin)
1812     return;
1813 
1814   removeCoroEnds(Shape, &CG);
1815   postSplitCleanup(F);
1816 
1817   // Update call graph and add the functions we created to the SCC.
1818   coro::updateCallGraph(F, Clones, CG, SCC);
1819 }
1820 
updateCallGraphAfterCoroutineSplit(LazyCallGraph::Node & N,const coro::Shape & Shape,const SmallVectorImpl<Function * > & Clones,LazyCallGraph::SCC & C,LazyCallGraph & CG,CGSCCAnalysisManager & AM,CGSCCUpdateResult & UR,FunctionAnalysisManager & FAM)1821 static void updateCallGraphAfterCoroutineSplit(
1822     LazyCallGraph::Node &N, const coro::Shape &Shape,
1823     const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1824     LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1825     FunctionAnalysisManager &FAM) {
1826   if (!Shape.CoroBegin)
1827     return;
1828 
1829   for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1830     auto &Context = End->getContext();
1831     End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1832     End->eraseFromParent();
1833   }
1834 
1835   if (!Clones.empty()) {
1836     switch (Shape.ABI) {
1837     case coro::ABI::Switch:
1838       // Each clone in the Switch lowering is independent of the other clones.
1839       // Let the LazyCallGraph know about each one separately.
1840       for (Function *Clone : Clones)
1841         CG.addSplitFunction(N.getFunction(), *Clone);
1842       break;
1843     case coro::ABI::Async:
1844     case coro::ABI::Retcon:
1845     case coro::ABI::RetconOnce:
1846       // Each clone in the Async/Retcon lowering references of the other clones.
1847       // Let the LazyCallGraph know about all of them at once.
1848       if (!Clones.empty())
1849         CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
1850       break;
1851     }
1852 
1853     // Let the CGSCC infra handle the changes to the original function.
1854     updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
1855   }
1856 
1857   // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
1858   // to the split functions.
1859   postSplitCleanup(N.getFunction());
1860   updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
1861 }
1862 
1863 // When we see the coroutine the first time, we insert an indirect call to a
1864 // devirt trigger function and mark the coroutine that it is now ready for
1865 // split.
1866 // Async lowering uses this after it has split the function to restart the
1867 // pipeline.
prepareForSplit(Function & F,CallGraph & CG,bool MarkForAsyncRestart=false)1868 static void prepareForSplit(Function &F, CallGraph &CG,
1869                             bool MarkForAsyncRestart = false) {
1870   Module &M = *F.getParent();
1871   LLVMContext &Context = F.getContext();
1872 #ifndef NDEBUG
1873   Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN);
1874   assert(DevirtFn && "coro.devirt.trigger function not found");
1875 #endif
1876 
1877   F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart
1878                                       ? ASYNC_RESTART_AFTER_SPLIT
1879                                       : PREPARED_FOR_SPLIT);
1880 
1881   // Insert an indirect call sequence that will be devirtualized by CoroElide
1882   // pass:
1883   //    %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
1884   //    %1 = bitcast i8* %0 to void(i8*)*
1885   //    call void %1(i8* null)
1886   coro::LowererBase Lowerer(M);
1887   Instruction *InsertPt =
1888       MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
1889                           : F.getEntryBlock().getTerminator();
1890   auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context));
1891   auto *DevirtFnAddr =
1892       Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
1893   FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context),
1894                                          {Type::getInt8PtrTy(Context)}, false);
1895   auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
1896 
1897   // Update CG graph with an indirect call we just added.
1898   CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
1899 }
1900 
1901 // Make sure that there is a devirtualization trigger function that the
1902 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
1903 // trigger function is not found, we will create one and add it to the current
1904 // SCC.
createDevirtTriggerFunc(CallGraph & CG,CallGraphSCC & SCC)1905 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
1906   Module &M = CG.getModule();
1907   if (M.getFunction(CORO_DEVIRT_TRIGGER_FN))
1908     return;
1909 
1910   LLVMContext &C = M.getContext();
1911   auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
1912                                  /*isVarArg=*/false);
1913   Function *DevirtFn =
1914       Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
1915                        CORO_DEVIRT_TRIGGER_FN, &M);
1916   DevirtFn->addFnAttr(Attribute::AlwaysInline);
1917   auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
1918   ReturnInst::Create(C, Entry);
1919 
1920   auto *Node = CG.getOrInsertFunction(DevirtFn);
1921 
1922   SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
1923   Nodes.push_back(Node);
1924   SCC.initialize(Nodes);
1925 }
1926 
1927 /// Replace a call to llvm.coro.prepare.retcon.
replacePrepare(CallInst * Prepare,LazyCallGraph & CG,LazyCallGraph::SCC & C)1928 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
1929                            LazyCallGraph::SCC &C) {
1930   auto CastFn = Prepare->getArgOperand(0); // as an i8*
1931   auto Fn = CastFn->stripPointerCasts();   // as its original type
1932 
1933   // Attempt to peephole this pattern:
1934   //    %0 = bitcast [[TYPE]] @some_function to i8*
1935   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
1936   //    %2 = bitcast %1 to [[TYPE]]
1937   // ==>
1938   //    %2 = @some_function
1939   for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) {
1940     // Look for bitcasts back to the original function type.
1941     auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
1942     if (!Cast || Cast->getType() != Fn->getType())
1943       continue;
1944 
1945     // Replace and remove the cast.
1946     Cast->replaceAllUsesWith(Fn);
1947     Cast->eraseFromParent();
1948   }
1949 
1950   // Replace any remaining uses with the function as an i8*.
1951   // This can never directly be a callee, so we don't need to update CG.
1952   Prepare->replaceAllUsesWith(CastFn);
1953   Prepare->eraseFromParent();
1954 
1955   // Kill dead bitcasts.
1956   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
1957     if (!Cast->use_empty())
1958       break;
1959     CastFn = Cast->getOperand(0);
1960     Cast->eraseFromParent();
1961   }
1962 }
1963 /// Replace a call to llvm.coro.prepare.retcon.
replacePrepare(CallInst * Prepare,CallGraph & CG)1964 static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
1965   auto CastFn = Prepare->getArgOperand(0); // as an i8*
1966   auto Fn = CastFn->stripPointerCasts(); // as its original type
1967 
1968   // Find call graph nodes for the preparation.
1969   CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
1970   if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
1971     PrepareUserNode = CG[Prepare->getFunction()];
1972     FnNode = CG[ConcreteFn];
1973   }
1974 
1975   // Attempt to peephole this pattern:
1976   //    %0 = bitcast [[TYPE]] @some_function to i8*
1977   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
1978   //    %2 = bitcast %1 to [[TYPE]]
1979   // ==>
1980   //    %2 = @some_function
1981   for (auto UI = Prepare->use_begin(), UE = Prepare->use_end();
1982          UI != UE; ) {
1983     // Look for bitcasts back to the original function type.
1984     auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
1985     if (!Cast || Cast->getType() != Fn->getType()) continue;
1986 
1987     // Check whether the replacement will introduce new direct calls.
1988     // If so, we'll need to update the call graph.
1989     if (PrepareUserNode) {
1990       for (auto &Use : Cast->uses()) {
1991         if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
1992           if (!CB->isCallee(&Use))
1993             continue;
1994           PrepareUserNode->removeCallEdgeFor(*CB);
1995           PrepareUserNode->addCalledFunction(CB, FnNode);
1996         }
1997       }
1998     }
1999 
2000     // Replace and remove the cast.
2001     Cast->replaceAllUsesWith(Fn);
2002     Cast->eraseFromParent();
2003   }
2004 
2005   // Replace any remaining uses with the function as an i8*.
2006   // This can never directly be a callee, so we don't need to update CG.
2007   Prepare->replaceAllUsesWith(CastFn);
2008   Prepare->eraseFromParent();
2009 
2010   // Kill dead bitcasts.
2011   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2012     if (!Cast->use_empty()) break;
2013     CastFn = Cast->getOperand(0);
2014     Cast->eraseFromParent();
2015   }
2016 }
2017 
replaceAllPrepares(Function * PrepareFn,LazyCallGraph & CG,LazyCallGraph::SCC & C)2018 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2019                                LazyCallGraph::SCC &C) {
2020   bool Changed = false;
2021   for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) {
2022     // Intrinsics can only be used in calls.
2023     auto *Prepare = cast<CallInst>((PI++)->getUser());
2024     replacePrepare(Prepare, CG, C);
2025     Changed = true;
2026   }
2027 
2028   return Changed;
2029 }
2030 
2031 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
2032 /// IPO from operating on calls to a retcon coroutine before it's been
2033 /// split.  This is only safe to do after we've split all retcon
2034 /// coroutines in the module.  We can do that this in this pass because
2035 /// this pass does promise to split all retcon coroutines (as opposed to
2036 /// switch coroutines, which are lowered in multiple stages).
replaceAllPrepares(Function * PrepareFn,CallGraph & CG)2037 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
2038   bool Changed = false;
2039   for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end();
2040          PI != PE; ) {
2041     // Intrinsics can only be used in calls.
2042     auto *Prepare = cast<CallInst>((PI++)->getUser());
2043     replacePrepare(Prepare, CG);
2044     Changed = true;
2045   }
2046 
2047   return Changed;
2048 }
2049 
declaresCoroSplitIntrinsics(const Module & M)2050 static bool declaresCoroSplitIntrinsics(const Module &M) {
2051   return coro::declaresIntrinsics(M, {"llvm.coro.begin",
2052                                       "llvm.coro.prepare.retcon",
2053                                       "llvm.coro.prepare.async"});
2054 }
2055 
addPrepareFunction(const Module & M,SmallVectorImpl<Function * > & Fns,StringRef Name)2056 static void addPrepareFunction(const Module &M,
2057                                SmallVectorImpl<Function *> &Fns,
2058                                StringRef Name) {
2059   auto *PrepareFn = M.getFunction(Name);
2060   if (PrepareFn && !PrepareFn->use_empty())
2061     Fns.push_back(PrepareFn);
2062 }
2063 
run(LazyCallGraph::SCC & C,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)2064 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2065                                      CGSCCAnalysisManager &AM,
2066                                      LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2067   // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2068   //     non-zero number of nodes, so we assume that here and grab the first
2069   //     node's function's module.
2070   Module &M = *C.begin()->getFunction().getParent();
2071   auto &FAM =
2072       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2073 
2074   if (!declaresCoroSplitIntrinsics(M))
2075     return PreservedAnalyses::all();
2076 
2077   // Check for uses of llvm.coro.prepare.retcon/async.
2078   SmallVector<Function *, 2> PrepareFns;
2079   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2080   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2081 
2082   // Find coroutines for processing.
2083   SmallVector<LazyCallGraph::Node *, 4> Coroutines;
2084   for (LazyCallGraph::Node &N : C)
2085     if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR))
2086       Coroutines.push_back(&N);
2087 
2088   if (Coroutines.empty() && PrepareFns.empty())
2089     return PreservedAnalyses::all();
2090 
2091   if (Coroutines.empty()) {
2092     for (auto *PrepareFn : PrepareFns) {
2093       replaceAllPrepares(PrepareFn, CG, C);
2094     }
2095   }
2096 
2097   // Split all the coroutines.
2098   for (LazyCallGraph::Node *N : Coroutines) {
2099     Function &F = N->getFunction();
2100     Attribute Attr = F.getFnAttribute(CORO_PRESPLIT_ATTR);
2101     StringRef Value = Attr.getValueAsString();
2102     LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2103                       << "' state: " << Value << "\n");
2104     if (Value == UNPREPARED_FOR_SPLIT) {
2105       // Enqueue a second iteration of the CGSCC pipeline on this SCC.
2106       UR.CWorklist.insert(&C);
2107       F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT);
2108       continue;
2109     }
2110     F.removeFnAttr(CORO_PRESPLIT_ATTR);
2111 
2112     SmallVector<Function *, 4> Clones;
2113     const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot);
2114     updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2115 
2116     if ((Shape.ABI == coro::ABI::Async || Shape.ABI == coro::ABI::Retcon ||
2117          Shape.ABI == coro::ABI::RetconOnce) &&
2118         !Shape.CoroSuspends.empty()) {
2119       // Run the CGSCC pipeline on the newly split functions.
2120       // All clones will be in the same RefSCC, so choose a random clone.
2121       UR.RCWorklist.insert(CG.lookupRefSCC(CG.get(*Clones[0])));
2122     }
2123   }
2124 
2125   if (!PrepareFns.empty()) {
2126     for (auto *PrepareFn : PrepareFns) {
2127       replaceAllPrepares(PrepareFn, CG, C);
2128     }
2129   }
2130 
2131   return PreservedAnalyses::none();
2132 }
2133 
2134 namespace {
2135 
2136 // We present a coroutine to LLVM as an ordinary function with suspension
2137 // points marked up with intrinsics. We let the optimizer party on the coroutine
2138 // as a single function for as long as possible. Shortly before the coroutine is
2139 // eligible to be inlined into its callers, we split up the coroutine into parts
2140 // corresponding to initial, resume and destroy invocations of the coroutine,
2141 // add them to the current SCC and restart the IPO pipeline to optimize the
2142 // coroutine subfunctions we extracted before proceeding to the caller of the
2143 // coroutine.
2144 struct CoroSplitLegacy : public CallGraphSCCPass {
2145   static char ID; // Pass identification, replacement for typeid
2146 
CoroSplitLegacy__anonc40076480511::CoroSplitLegacy2147   CoroSplitLegacy(bool ReuseFrameSlot = false)
2148       : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) {
2149     initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
2150   }
2151 
2152   bool Run = false;
2153   bool ReuseFrameSlot;
2154 
2155   // A coroutine is identified by the presence of coro.begin intrinsic, if
2156   // we don't have any, this pass has nothing to do.
doInitialization__anonc40076480511::CoroSplitLegacy2157   bool doInitialization(CallGraph &CG) override {
2158     Run = declaresCoroSplitIntrinsics(CG.getModule());
2159     return CallGraphSCCPass::doInitialization(CG);
2160   }
2161 
runOnSCC__anonc40076480511::CoroSplitLegacy2162   bool runOnSCC(CallGraphSCC &SCC) override {
2163     if (!Run)
2164       return false;
2165 
2166     // Check for uses of llvm.coro.prepare.retcon.
2167     SmallVector<Function *, 2> PrepareFns;
2168     auto &M = SCC.getCallGraph().getModule();
2169     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2170     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2171 
2172     // Find coroutines for processing.
2173     SmallVector<Function *, 4> Coroutines;
2174     for (CallGraphNode *CGN : SCC)
2175       if (auto *F = CGN->getFunction())
2176         if (F->hasFnAttribute(CORO_PRESPLIT_ATTR))
2177           Coroutines.push_back(F);
2178 
2179     if (Coroutines.empty() && PrepareFns.empty())
2180       return false;
2181 
2182     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2183 
2184     if (Coroutines.empty()) {
2185       bool Changed = false;
2186       for (auto *PrepareFn : PrepareFns)
2187         Changed |= replaceAllPrepares(PrepareFn, CG);
2188       return Changed;
2189     }
2190 
2191     createDevirtTriggerFunc(CG, SCC);
2192 
2193     // Split all the coroutines.
2194     for (Function *F : Coroutines) {
2195       Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR);
2196       StringRef Value = Attr.getValueAsString();
2197       LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()
2198                         << "' state: " << Value << "\n");
2199       // Async lowering marks coroutines to trigger a restart of the pipeline
2200       // after it has split them.
2201       if (Value == ASYNC_RESTART_AFTER_SPLIT) {
2202         F->removeFnAttr(CORO_PRESPLIT_ATTR);
2203         continue;
2204       }
2205       if (Value == UNPREPARED_FOR_SPLIT) {
2206         prepareForSplit(*F, CG);
2207         continue;
2208       }
2209       F->removeFnAttr(CORO_PRESPLIT_ATTR);
2210 
2211       SmallVector<Function *, 4> Clones;
2212       const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot);
2213       updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
2214       if (Shape.ABI == coro::ABI::Async) {
2215         // Restart SCC passes.
2216         // Mark function for CoroElide pass. It will devirtualize causing a
2217         // restart of the SCC pipeline.
2218         prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
2219       }
2220     }
2221 
2222     for (auto *PrepareFn : PrepareFns)
2223       replaceAllPrepares(PrepareFn, CG);
2224 
2225     return true;
2226   }
2227 
getAnalysisUsage__anonc40076480511::CoroSplitLegacy2228   void getAnalysisUsage(AnalysisUsage &AU) const override {
2229     CallGraphSCCPass::getAnalysisUsage(AU);
2230   }
2231 
getPassName__anonc40076480511::CoroSplitLegacy2232   StringRef getPassName() const override { return "Coroutine Splitting"; }
2233 };
2234 
2235 } // end anonymous namespace
2236 
2237 char CoroSplitLegacy::ID = 0;
2238 
2239 INITIALIZE_PASS_BEGIN(
2240     CoroSplitLegacy, "coro-split",
2241     "Split coroutine into a set of functions driving its state machine", false,
2242     false)
INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)2243 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
2244 INITIALIZE_PASS_END(
2245     CoroSplitLegacy, "coro-split",
2246     "Split coroutine into a set of functions driving its state machine", false,
2247     false)
2248 
2249 Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) {
2250   return new CoroSplitLegacy(ReuseFrameSlot);
2251 }
2252