1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This pass builds the coroutine frame and outlines resume and destroy parts
9 // of the coroutine into separate functions.
10 //
11 // We present a coroutine to an LLVM as an ordinary function with suspension
12 // points marked up with intrinsics. We let the optimizer party on the coroutine
13 // as a single function for as long as possible. Shortly before the coroutine is
14 // eligible to be inlined into its callers, we split up the coroutine into parts
15 // corresponding to an initial, resume and destroy invocations of the coroutine,
16 // add them to the current SCC and restart the IPO pipeline to optimize the
17 // coroutine subfunctions we extracted before proceeding to the caller of the
18 // coroutine.
19 //===----------------------------------------------------------------------===//
20 
21 #include "llvm/Transforms/Coroutines/CoroSplit.h"
22 #include "CoroInstr.h"
23 #include "CoroInternal.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/CFG.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/CallGraphSCCPass.h"
32 #include "llvm/Analysis/LazyCallGraph.h"
33 #include "llvm/IR/Argument.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Dominators.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/InstIterator.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/IntrinsicInst.h"
51 #include "llvm/IR/LLVMContext.h"
52 #include "llvm/IR/LegacyPassManager.h"
53 #include "llvm/IR/Module.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/IR/Verifier.h"
57 #include "llvm/InitializePasses.h"
58 #include "llvm/Pass.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/PrettyStackTrace.h"
62 #include "llvm/Support/raw_ostream.h"
63 #include "llvm/Transforms/Scalar.h"
64 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
65 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
66 #include "llvm/Transforms/Utils/Cloning.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include "llvm/Transforms/Utils/ValueMapper.h"
69 #include <cassert>
70 #include <cstddef>
71 #include <cstdint>
72 #include <initializer_list>
73 #include <iterator>
74 
75 using namespace llvm;
76 
77 #define DEBUG_TYPE "coro-split"
78 
79 namespace {
80 
81 /// A little helper class for building
82 class CoroCloner {
83 public:
84   enum class Kind {
85     /// The shared resume function for a switch lowering.
86     SwitchResume,
87 
88     /// The shared unwind function for a switch lowering.
89     SwitchUnwind,
90 
91     /// The shared cleanup function for a switch lowering.
92     SwitchCleanup,
93 
94     /// An individual continuation function.
95     Continuation,
96 
97     /// An async resume function.
98     Async,
99   };
100 
101 private:
102   Function &OrigF;
103   Function *NewF;
104   const Twine &Suffix;
105   coro::Shape &Shape;
106   Kind FKind;
107   ValueToValueMapTy VMap;
108   IRBuilder<> Builder;
109   Value *NewFramePtr = nullptr;
110 
111   /// The active suspend instruction; meaningful only for continuation and async
112   /// ABIs.
113   AnyCoroSuspendInst *ActiveSuspend = nullptr;
114 
115 public:
116   /// Create a cloner for a switch lowering.
CoroCloner(Function & OrigF,const Twine & Suffix,coro::Shape & Shape,Kind FKind)117   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
118              Kind FKind)
119     : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
120       FKind(FKind), Builder(OrigF.getContext()) {
121     assert(Shape.ABI == coro::ABI::Switch);
122   }
123 
124   /// Create a cloner for a continuation lowering.
CoroCloner(Function & OrigF,const Twine & Suffix,coro::Shape & Shape,Function * NewF,AnyCoroSuspendInst * ActiveSuspend)125   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
126              Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
127       : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
128         FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
129         Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
130     assert(Shape.ABI == coro::ABI::Retcon ||
131            Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
132     assert(NewF && "need existing function for continuation");
133     assert(ActiveSuspend && "need active suspend point for continuation");
134   }
135 
getFunction() const136   Function *getFunction() const {
137     assert(NewF != nullptr && "declaration not yet set");
138     return NewF;
139   }
140 
141   void create();
142 
143 private:
isSwitchDestroyFunction()144   bool isSwitchDestroyFunction() {
145     switch (FKind) {
146     case Kind::Async:
147     case Kind::Continuation:
148     case Kind::SwitchResume:
149       return false;
150     case Kind::SwitchUnwind:
151     case Kind::SwitchCleanup:
152       return true;
153     }
154     llvm_unreachable("Unknown CoroCloner::Kind enum");
155   }
156 
157   void replaceEntryBlock();
158   Value *deriveNewFramePointer();
159   void replaceRetconOrAsyncSuspendUses();
160   void replaceCoroSuspends();
161   void replaceCoroEnds();
162   void replaceSwiftErrorOps();
163   void salvageDebugInfo();
164   void handleFinalSuspend();
165 };
166 
167 } // end anonymous namespace
168 
maybeFreeRetconStorage(IRBuilder<> & Builder,const coro::Shape & Shape,Value * FramePtr,CallGraph * CG)169 static void maybeFreeRetconStorage(IRBuilder<> &Builder,
170                                    const coro::Shape &Shape, Value *FramePtr,
171                                    CallGraph *CG) {
172   assert(Shape.ABI == coro::ABI::Retcon ||
173          Shape.ABI == coro::ABI::RetconOnce);
174   if (Shape.RetconLowering.IsFrameInlineInStorage)
175     return;
176 
177   Shape.emitDealloc(Builder, FramePtr, CG);
178 }
179 
180 /// Replace an llvm.coro.end.async.
181 /// Will inline the must tail call function call if there is one.
182 /// \returns true if cleanup of the coro.end block is needed, false otherwise.
replaceCoroEndAsync(AnyCoroEndInst * End)183 static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
184   IRBuilder<> Builder(End);
185 
186   auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
187   if (!EndAsync) {
188     Builder.CreateRetVoid();
189     return true /*needs cleanup of coro.end block*/;
190   }
191 
192   auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
193   if (!MustTailCallFunc) {
194     Builder.CreateRetVoid();
195     return true /*needs cleanup of coro.end block*/;
196   }
197 
198   // Move the must tail call from the predecessor block into the end block.
199   auto *CoroEndBlock = End->getParent();
200   auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
201   assert(MustTailCallFuncBlock && "Must have a single predecessor block");
202   auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
203   auto *MustTailCall = cast<CallInst>(&*std::prev(It));
204   CoroEndBlock->getInstList().splice(
205       End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
206 
207   // Insert the return instruction.
208   Builder.SetInsertPoint(End);
209   Builder.CreateRetVoid();
210   InlineFunctionInfo FnInfo;
211 
212   // Remove the rest of the block, by splitting it into an unreachable block.
213   auto *BB = End->getParent();
214   BB->splitBasicBlock(End);
215   BB->getTerminator()->eraseFromParent();
216 
217   auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
218   assert(InlineRes.isSuccess() && "Expected inlining to succeed");
219   (void)InlineRes;
220 
221   // We have cleaned up the coro.end block above.
222   return false;
223 }
224 
225 /// Replace a non-unwind call to llvm.coro.end.
replaceFallthroughCoroEnd(AnyCoroEndInst * End,const coro::Shape & Shape,Value * FramePtr,bool InResume,CallGraph * CG)226 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
227                                       const coro::Shape &Shape, Value *FramePtr,
228                                       bool InResume, CallGraph *CG) {
229   // Start inserting right before the coro.end.
230   IRBuilder<> Builder(End);
231 
232   // Create the return instruction.
233   switch (Shape.ABI) {
234   // The cloned functions in switch-lowering always return void.
235   case coro::ABI::Switch:
236     // coro.end doesn't immediately end the coroutine in the main function
237     // in this lowering, because we need to deallocate the coroutine.
238     if (!InResume)
239       return;
240     Builder.CreateRetVoid();
241     break;
242 
243   // In async lowering this returns.
244   case coro::ABI::Async: {
245     bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
246     if (!CoroEndBlockNeedsCleanup)
247       return;
248     break;
249   }
250 
251   // In unique continuation lowering, the continuations always return void.
252   // But we may have implicitly allocated storage.
253   case coro::ABI::RetconOnce:
254     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
255     Builder.CreateRetVoid();
256     break;
257 
258   // In non-unique continuation lowering, we signal completion by returning
259   // a null continuation.
260   case coro::ABI::Retcon: {
261     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
262     auto RetTy = Shape.getResumeFunctionType()->getReturnType();
263     auto RetStructTy = dyn_cast<StructType>(RetTy);
264     PointerType *ContinuationTy =
265       cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
266 
267     Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
268     if (RetStructTy) {
269       ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
270                                               ReturnValue, 0);
271     }
272     Builder.CreateRet(ReturnValue);
273     break;
274   }
275   }
276 
277   // Remove the rest of the block, by splitting it into an unreachable block.
278   auto *BB = End->getParent();
279   BB->splitBasicBlock(End);
280   BB->getTerminator()->eraseFromParent();
281 }
282 
283 /// Replace an unwind call to llvm.coro.end.
replaceUnwindCoroEnd(AnyCoroEndInst * End,const coro::Shape & Shape,Value * FramePtr,bool InResume,CallGraph * CG)284 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
285                                  Value *FramePtr, bool InResume,
286                                  CallGraph *CG) {
287   IRBuilder<> Builder(End);
288 
289   switch (Shape.ABI) {
290   // In switch-lowering, this does nothing in the main function.
291   case coro::ABI::Switch:
292     if (!InResume)
293       return;
294     break;
295   // In async lowering this does nothing.
296   case coro::ABI::Async:
297     break;
298   // In continuation-lowering, this frees the continuation storage.
299   case coro::ABI::Retcon:
300   case coro::ABI::RetconOnce:
301     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
302     break;
303   }
304 
305   // If coro.end has an associated bundle, add cleanupret instruction.
306   if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
307     auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
308     auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
309     End->getParent()->splitBasicBlock(End);
310     CleanupRet->getParent()->getTerminator()->eraseFromParent();
311   }
312 }
313 
replaceCoroEnd(AnyCoroEndInst * End,const coro::Shape & Shape,Value * FramePtr,bool InResume,CallGraph * CG)314 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
315                            Value *FramePtr, bool InResume, CallGraph *CG) {
316   if (End->isUnwind())
317     replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
318   else
319     replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
320 
321   auto &Context = End->getContext();
322   End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
323                                    : ConstantInt::getFalse(Context));
324   End->eraseFromParent();
325 }
326 
327 // Create an entry block for a resume function with a switch that will jump to
328 // suspend points.
createResumeEntryBlock(Function & F,coro::Shape & Shape)329 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
330   assert(Shape.ABI == coro::ABI::Switch);
331   LLVMContext &C = F.getContext();
332 
333   // resume.entry:
334   //  %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
335   //  i32 2
336   //  % index = load i32, i32* %index.addr
337   //  switch i32 %index, label %unreachable [
338   //    i32 0, label %resume.0
339   //    i32 1, label %resume.1
340   //    ...
341   //  ]
342 
343   auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
344   auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
345 
346   IRBuilder<> Builder(NewEntry);
347   auto *FramePtr = Shape.FramePtr;
348   auto *FrameTy = Shape.FrameTy;
349   auto *GepIndex = Builder.CreateStructGEP(
350       FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
351   auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
352   auto *Switch =
353       Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
354   Shape.SwitchLowering.ResumeSwitch = Switch;
355 
356   size_t SuspendIndex = 0;
357   for (auto *AnyS : Shape.CoroSuspends) {
358     auto *S = cast<CoroSuspendInst>(AnyS);
359     ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
360 
361     // Replace CoroSave with a store to Index:
362     //    %index.addr = getelementptr %f.frame... (index field number)
363     //    store i32 0, i32* %index.addr1
364     auto *Save = S->getCoroSave();
365     Builder.SetInsertPoint(Save);
366     if (S->isFinal()) {
367       // Final suspend point is represented by storing zero in ResumeFnAddr.
368       auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr,
369                                  coro::Shape::SwitchFieldIndex::Resume,
370                                   "ResumeFn.addr");
371       auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
372           FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume)));
373       Builder.CreateStore(NullPtr, GepIndex);
374     } else {
375       auto *GepIndex = Builder.CreateStructGEP(
376           FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
377       Builder.CreateStore(IndexVal, GepIndex);
378     }
379     Save->replaceAllUsesWith(ConstantTokenNone::get(C));
380     Save->eraseFromParent();
381 
382     // Split block before and after coro.suspend and add a jump from an entry
383     // switch:
384     //
385     //  whateverBB:
386     //    whatever
387     //    %0 = call i8 @llvm.coro.suspend(token none, i1 false)
388     //    switch i8 %0, label %suspend[i8 0, label %resume
389     //                                 i8 1, label %cleanup]
390     // becomes:
391     //
392     //  whateverBB:
393     //     whatever
394     //     br label %resume.0.landing
395     //
396     //  resume.0: ; <--- jump from the switch in the resume.entry
397     //     %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
398     //     br label %resume.0.landing
399     //
400     //  resume.0.landing:
401     //     %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
402     //     switch i8 % 1, label %suspend [i8 0, label %resume
403     //                                    i8 1, label %cleanup]
404 
405     auto *SuspendBB = S->getParent();
406     auto *ResumeBB =
407         SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
408     auto *LandingBB = ResumeBB->splitBasicBlock(
409         S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
410     Switch->addCase(IndexVal, ResumeBB);
411 
412     cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
413     auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
414     S->replaceAllUsesWith(PN);
415     PN->addIncoming(Builder.getInt8(-1), SuspendBB);
416     PN->addIncoming(S, ResumeBB);
417 
418     ++SuspendIndex;
419   }
420 
421   Builder.SetInsertPoint(UnreachBB);
422   Builder.CreateUnreachable();
423 
424   Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
425 }
426 
427 
428 // Rewrite final suspend point handling. We do not use suspend index to
429 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the
430 // coroutine frame, since it is undefined behavior to resume a coroutine
431 // suspended at the final suspend point. Thus, in the resume function, we can
432 // simply remove the last case (when coro::Shape is built, the final suspend
433 // point (if present) is always the last element of CoroSuspends array).
434 // In the destroy function, we add a code sequence to check if ResumeFnAddress
435 // is Null, and if so, jump to the appropriate label to handle cleanup from the
436 // final suspend point.
handleFinalSuspend()437 void CoroCloner::handleFinalSuspend() {
438   assert(Shape.ABI == coro::ABI::Switch &&
439          Shape.SwitchLowering.HasFinalSuspend);
440   auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
441   auto FinalCaseIt = std::prev(Switch->case_end());
442   BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
443   Switch->removeCase(FinalCaseIt);
444   if (isSwitchDestroyFunction()) {
445     BasicBlock *OldSwitchBB = Switch->getParent();
446     auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
447     Builder.SetInsertPoint(OldSwitchBB->getTerminator());
448     auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
449                                        coro::Shape::SwitchFieldIndex::Resume,
450                                              "ResumeFn.addr");
451     auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
452                                     GepIndex);
453     auto *Cond = Builder.CreateIsNull(Load);
454     Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
455     OldSwitchBB->getTerminator()->eraseFromParent();
456   }
457 }
458 
459 static FunctionType *
getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst * Suspend)460 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
461   auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
462   auto *StructTy = cast<StructType>(AsyncSuspend->getType());
463   auto &Context = Suspend->getParent()->getParent()->getContext();
464   auto *VoidTy = Type::getVoidTy(Context);
465   return FunctionType::get(VoidTy, StructTy->elements(), false);
466 }
467 
createCloneDeclaration(Function & OrigF,coro::Shape & Shape,const Twine & Suffix,Module::iterator InsertBefore,AnyCoroSuspendInst * ActiveSuspend)468 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
469                                         const Twine &Suffix,
470                                         Module::iterator InsertBefore,
471                                         AnyCoroSuspendInst *ActiveSuspend) {
472   Module *M = OrigF.getParent();
473   auto *FnTy = (Shape.ABI != coro::ABI::Async)
474                    ? Shape.getResumeFunctionType()
475                    : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
476 
477   Function *NewF =
478       Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
479                        OrigF.getName() + Suffix);
480   if (Shape.ABI != coro::ABI::Async)
481     NewF->addParamAttr(0, Attribute::NonNull);
482 
483   // For the async lowering ABI we can't guarantee that the context argument is
484   // not access via a different pointer not based on the argument.
485   if (Shape.ABI != coro::ABI::Async)
486     NewF->addParamAttr(0, Attribute::NoAlias);
487 
488   M->getFunctionList().insert(InsertBefore, NewF);
489 
490   return NewF;
491 }
492 
493 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the
494 /// arguments to the continuation function.
495 ///
496 /// This assumes that the builder has a meaningful insertion point.
replaceRetconOrAsyncSuspendUses()497 void CoroCloner::replaceRetconOrAsyncSuspendUses() {
498   assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
499          Shape.ABI == coro::ABI::Async);
500 
501   auto NewS = VMap[ActiveSuspend];
502   if (NewS->use_empty()) return;
503 
504   // Copy out all the continuation arguments after the buffer pointer into
505   // an easily-indexed data structure for convenience.
506   SmallVector<Value*, 8> Args;
507   // The async ABI includes all arguments -- including the first argument.
508   bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
509   for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
510             E = NewF->arg_end();
511        I != E; ++I)
512     Args.push_back(&*I);
513 
514   // If the suspend returns a single scalar value, we can just do a simple
515   // replacement.
516   if (!isa<StructType>(NewS->getType())) {
517     assert(Args.size() == 1);
518     NewS->replaceAllUsesWith(Args.front());
519     return;
520   }
521 
522   // Try to peephole extracts of an aggregate return.
523   for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) {
524     auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser());
525     if (!EVI || EVI->getNumIndices() != 1)
526       continue;
527 
528     EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
529     EVI->eraseFromParent();
530   }
531 
532   // If we have no remaining uses, we're done.
533   if (NewS->use_empty()) return;
534 
535   // Otherwise, we need to create an aggregate.
536   Value *Agg = UndefValue::get(NewS->getType());
537   for (size_t I = 0, E = Args.size(); I != E; ++I)
538     Agg = Builder.CreateInsertValue(Agg, Args[I], I);
539 
540   NewS->replaceAllUsesWith(Agg);
541 }
542 
replaceCoroSuspends()543 void CoroCloner::replaceCoroSuspends() {
544   Value *SuspendResult;
545 
546   switch (Shape.ABI) {
547   // In switch lowering, replace coro.suspend with the appropriate value
548   // for the type of function we're extracting.
549   // Replacing coro.suspend with (0) will result in control flow proceeding to
550   // a resume label associated with a suspend point, replacing it with (1) will
551   // result in control flow proceeding to a cleanup label associated with this
552   // suspend point.
553   case coro::ABI::Switch:
554     SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
555     break;
556 
557   // In async lowering there are no uses of the result.
558   case coro::ABI::Async:
559     return;
560 
561   // In returned-continuation lowering, the arguments from earlier
562   // continuations are theoretically arbitrary, and they should have been
563   // spilled.
564   case coro::ABI::RetconOnce:
565   case coro::ABI::Retcon:
566     return;
567   }
568 
569   for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
570     // The active suspend was handled earlier.
571     if (CS == ActiveSuspend) continue;
572 
573     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
574     MappedCS->replaceAllUsesWith(SuspendResult);
575     MappedCS->eraseFromParent();
576   }
577 }
578 
replaceCoroEnds()579 void CoroCloner::replaceCoroEnds() {
580   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
581     // We use a null call graph because there's no call graph node for
582     // the cloned function yet.  We'll just be rebuilding that later.
583     auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
584     replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
585   }
586 }
587 
replaceSwiftErrorOps(Function & F,coro::Shape & Shape,ValueToValueMapTy * VMap)588 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
589                                  ValueToValueMapTy *VMap) {
590   if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
591     return;
592   Value *CachedSlot = nullptr;
593   auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
594     if (CachedSlot) {
595       assert(CachedSlot->getType()->getPointerElementType() == ValueTy &&
596              "multiple swifterror slots in function with different types");
597       return CachedSlot;
598     }
599 
600     // Check if the function has a swifterror argument.
601     for (auto &Arg : F.args()) {
602       if (Arg.isSwiftError()) {
603         CachedSlot = &Arg;
604         assert(Arg.getType()->getPointerElementType() == ValueTy &&
605                "swifterror argument does not have expected type");
606         return &Arg;
607       }
608     }
609 
610     // Create a swifterror alloca.
611     IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
612     auto Alloca = Builder.CreateAlloca(ValueTy);
613     Alloca->setSwiftError(true);
614 
615     CachedSlot = Alloca;
616     return Alloca;
617   };
618 
619   for (CallInst *Op : Shape.SwiftErrorOps) {
620     auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
621     IRBuilder<> Builder(MappedOp);
622 
623     // If there are no arguments, this is a 'get' operation.
624     Value *MappedResult;
625     if (Op->getNumArgOperands() == 0) {
626       auto ValueTy = Op->getType();
627       auto Slot = getSwiftErrorSlot(ValueTy);
628       MappedResult = Builder.CreateLoad(ValueTy, Slot);
629     } else {
630       assert(Op->getNumArgOperands() == 1);
631       auto Value = MappedOp->getArgOperand(0);
632       auto ValueTy = Value->getType();
633       auto Slot = getSwiftErrorSlot(ValueTy);
634       Builder.CreateStore(Value, Slot);
635       MappedResult = Slot;
636     }
637 
638     MappedOp->replaceAllUsesWith(MappedResult);
639     MappedOp->eraseFromParent();
640   }
641 
642   // If we're updating the original function, we've invalidated SwiftErrorOps.
643   if (VMap == nullptr) {
644     Shape.SwiftErrorOps.clear();
645   }
646 }
647 
replaceSwiftErrorOps()648 void CoroCloner::replaceSwiftErrorOps() {
649   ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
650 }
651 
salvageDebugInfo()652 void CoroCloner::salvageDebugInfo() {
653   SmallVector<DbgVariableIntrinsic *, 8> Worklist;
654   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
655   for (auto &BB : *NewF)
656     for (auto &I : BB)
657       if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
658         Worklist.push_back(DVI);
659   for (DbgVariableIntrinsic *DVI : Worklist)
660     coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.ReuseFrameSlot);
661 
662   // Remove all salvaged dbg.declare intrinsics that became
663   // either unreachable or stale due to the CoroSplit transformation.
664   DominatorTree DomTree(*NewF);
665   auto IsUnreachableBlock = [&](BasicBlock *BB) {
666     return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
667                                    &DomTree);
668   };
669   for (DbgVariableIntrinsic *DVI : Worklist) {
670     if (IsUnreachableBlock(DVI->getParent()))
671       DVI->eraseFromParent();
672     else if (dyn_cast_or_null<AllocaInst>(DVI->getVariableLocationOp(0))) {
673       // Count all non-debuginfo uses in reachable blocks.
674       unsigned Uses = 0;
675       for (auto *User : DVI->getVariableLocationOp(0)->users())
676         if (auto *I = dyn_cast<Instruction>(User))
677           if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
678             ++Uses;
679       if (!Uses)
680         DVI->eraseFromParent();
681     }
682   }
683 }
684 
replaceEntryBlock()685 void CoroCloner::replaceEntryBlock() {
686   // In the original function, the AllocaSpillBlock is a block immediately
687   // following the allocation of the frame object which defines GEPs for
688   // all the allocas that have been moved into the frame, and it ends by
689   // branching to the original beginning of the coroutine.  Make this
690   // the entry block of the cloned function.
691   auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
692   auto *OldEntry = &NewF->getEntryBlock();
693   Entry->setName("entry" + Suffix);
694   Entry->moveBefore(OldEntry);
695   Entry->getTerminator()->eraseFromParent();
696 
697   // Clear all predecessors of the new entry block.  There should be
698   // exactly one predecessor, which we created when splitting out
699   // AllocaSpillBlock to begin with.
700   assert(Entry->hasOneUse());
701   auto BranchToEntry = cast<BranchInst>(Entry->user_back());
702   assert(BranchToEntry->isUnconditional());
703   Builder.SetInsertPoint(BranchToEntry);
704   Builder.CreateUnreachable();
705   BranchToEntry->eraseFromParent();
706 
707   // Branch from the entry to the appropriate place.
708   Builder.SetInsertPoint(Entry);
709   switch (Shape.ABI) {
710   case coro::ABI::Switch: {
711     // In switch-lowering, we built a resume-entry block in the original
712     // function.  Make the entry block branch to this.
713     auto *SwitchBB =
714       cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
715     Builder.CreateBr(SwitchBB);
716     break;
717   }
718   case coro::ABI::Async:
719   case coro::ABI::Retcon:
720   case coro::ABI::RetconOnce: {
721     // In continuation ABIs, we want to branch to immediately after the
722     // active suspend point.  Earlier phases will have put the suspend in its
723     // own basic block, so just thread our jump directly to its successor.
724     assert((Shape.ABI == coro::ABI::Async &&
725             isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
726            ((Shape.ABI == coro::ABI::Retcon ||
727              Shape.ABI == coro::ABI::RetconOnce) &&
728             isa<CoroSuspendRetconInst>(ActiveSuspend)));
729     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
730     auto Branch = cast<BranchInst>(MappedCS->getNextNode());
731     assert(Branch->isUnconditional());
732     Builder.CreateBr(Branch->getSuccessor(0));
733     break;
734   }
735   }
736 
737   // Any static alloca that's still being used but not reachable from the new
738   // entry needs to be moved to the new entry.
739   Function *F = OldEntry->getParent();
740   DominatorTree DT{*F};
741   for (auto IT = inst_begin(F), End = inst_end(F); IT != End;) {
742     Instruction &I = *IT++;
743     auto *Alloca = dyn_cast<AllocaInst>(&I);
744     if (!Alloca || I.use_empty())
745       continue;
746     if (DT.isReachableFromEntry(I.getParent()) ||
747         !isa<ConstantInt>(Alloca->getArraySize()))
748       continue;
749     I.moveBefore(*Entry, Entry->getFirstInsertionPt());
750   }
751 }
752 
753 /// Derive the value of the new frame pointer.
deriveNewFramePointer()754 Value *CoroCloner::deriveNewFramePointer() {
755   // Builder should be inserting to the front of the new entry block.
756 
757   switch (Shape.ABI) {
758   // In switch-lowering, the argument is the frame pointer.
759   case coro::ABI::Switch:
760     return &*NewF->arg_begin();
761   // In async-lowering, one of the arguments is an async context as determined
762   // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
763   // the resume function from the async context projection function associated
764   // with the active suspend. The frame is located as a tail to the async
765   // context header.
766   case coro::ABI::Async: {
767     auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
768     auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
769     auto *CalleeContext = NewF->getArg(ContextIdx);
770     auto *FramePtrTy = Shape.FrameTy->getPointerTo();
771     auto *ProjectionFunc =
772         ActiveAsyncSuspend->getAsyncContextProjectionFunction();
773     auto DbgLoc =
774         cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
775     // Calling i8* (i8*)
776     auto *CallerContext = Builder.CreateCall(
777         cast<FunctionType>(ProjectionFunc->getType()->getPointerElementType()),
778         ProjectionFunc, CalleeContext);
779     CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
780     CallerContext->setDebugLoc(DbgLoc);
781     // The frame is located after the async_context header.
782     auto &Context = Builder.getContext();
783     auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
784         Type::getInt8Ty(Context), CallerContext,
785         Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
786     // Inline the projection function.
787     InlineFunctionInfo InlineInfo;
788     auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
789     assert(InlineRes.isSuccess());
790     (void)InlineRes;
791     return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
792   }
793   // In continuation-lowering, the argument is the opaque storage.
794   case coro::ABI::Retcon:
795   case coro::ABI::RetconOnce: {
796     Argument *NewStorage = &*NewF->arg_begin();
797     auto FramePtrTy = Shape.FrameTy->getPointerTo();
798 
799     // If the storage is inline, just bitcast to the storage to the frame type.
800     if (Shape.RetconLowering.IsFrameInlineInStorage)
801       return Builder.CreateBitCast(NewStorage, FramePtrTy);
802 
803     // Otherwise, load the real frame from the opaque storage.
804     auto FramePtrPtr =
805       Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
806     return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
807   }
808   }
809   llvm_unreachable("bad ABI");
810 }
811 
addFramePointerAttrs(AttributeList & Attrs,LLVMContext & Context,unsigned ParamIndex,uint64_t Size,Align Alignment)812 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
813                                  unsigned ParamIndex,
814                                  uint64_t Size, Align Alignment) {
815   AttrBuilder ParamAttrs;
816   ParamAttrs.addAttribute(Attribute::NonNull);
817   ParamAttrs.addAttribute(Attribute::NoAlias);
818   ParamAttrs.addAlignmentAttr(Alignment);
819   ParamAttrs.addDereferenceableAttr(Size);
820   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
821 }
822 
addAsyncContextAttrs(AttributeList & Attrs,LLVMContext & Context,unsigned ParamIndex)823 static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
824                                  unsigned ParamIndex) {
825   AttrBuilder ParamAttrs;
826   ParamAttrs.addAttribute(Attribute::SwiftAsync);
827   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
828 }
829 
addSwiftSelfAttrs(AttributeList & Attrs,LLVMContext & Context,unsigned ParamIndex)830 static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
831                               unsigned ParamIndex) {
832   AttrBuilder ParamAttrs;
833   ParamAttrs.addAttribute(Attribute::SwiftSelf);
834   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
835 }
836 
837 /// Clone the body of the original function into a resume function of
838 /// some sort.
create()839 void CoroCloner::create() {
840   // Create the new function if we don't already have one.
841   if (!NewF) {
842     NewF = createCloneDeclaration(OrigF, Shape, Suffix,
843                                   OrigF.getParent()->end(), ActiveSuspend);
844   }
845 
846   // Replace all args with undefs. The buildCoroutineFrame algorithm already
847   // rewritten access to the args that occurs after suspend points with loads
848   // and stores to/from the coroutine frame.
849   for (Argument &A : OrigF.args())
850     VMap[&A] = UndefValue::get(A.getType());
851 
852   SmallVector<ReturnInst *, 4> Returns;
853 
854   // Ignore attempts to change certain attributes of the function.
855   // TODO: maybe there should be a way to suppress this during cloning?
856   auto savedVisibility = NewF->getVisibility();
857   auto savedUnnamedAddr = NewF->getUnnamedAddr();
858   auto savedDLLStorageClass = NewF->getDLLStorageClass();
859 
860   // NewF's linkage (which CloneFunctionInto does *not* change) might not
861   // be compatible with the visibility of OrigF (which it *does* change),
862   // so protect against that.
863   auto savedLinkage = NewF->getLinkage();
864   NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
865 
866   CloneFunctionInto(NewF, &OrigF, VMap,
867                     CloneFunctionChangeType::LocalChangesOnly, Returns);
868 
869   auto &Context = NewF->getContext();
870 
871   // For async functions / continuations, adjust the scope line of the
872   // clone to the line number of the suspend point. However, only
873   // adjust the scope line when the files are the same. This ensures
874   // line number and file name belong together. The scope line is
875   // associated with all pre-prologue instructions. This avoids a jump
876   // in the linetable from the function declaration to the suspend point.
877   if (DISubprogram *SP = NewF->getSubprogram()) {
878     assert(SP != OrigF.getSubprogram() && SP->isDistinct());
879     if (ActiveSuspend)
880       if (auto DL = ActiveSuspend->getDebugLoc())
881         if (SP->getFile() == DL->getFile())
882           SP->setScopeLine(DL->getLine());
883     // Update the linkage name to reflect the modified symbol name. It
884     // is necessary to update the linkage name in Swift, since the
885     // mangling changes for resume functions. It might also be the
886     // right thing to do in C++, but due to a limitation in LLVM's
887     // AsmPrinter we can only do this if the function doesn't have an
888     // abstract specification, since the DWARF backend expects the
889     // abstract specification to contain the linkage name and asserts
890     // that they are identical.
891     if (!SP->getDeclaration() && SP->getUnit() &&
892         SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
893       SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
894   }
895 
896   NewF->setLinkage(savedLinkage);
897   NewF->setVisibility(savedVisibility);
898   NewF->setUnnamedAddr(savedUnnamedAddr);
899   NewF->setDLLStorageClass(savedDLLStorageClass);
900 
901   // Replace the attributes of the new function:
902   auto OrigAttrs = NewF->getAttributes();
903   auto NewAttrs = AttributeList();
904 
905   switch (Shape.ABI) {
906   case coro::ABI::Switch:
907     // Bootstrap attributes by copying function attributes from the
908     // original function.  This should include optimization settings and so on.
909     NewAttrs = NewAttrs.addAttributes(Context, AttributeList::FunctionIndex,
910                                       OrigAttrs.getFnAttributes());
911 
912     addFramePointerAttrs(NewAttrs, Context, 0,
913                          Shape.FrameSize, Shape.FrameAlign);
914     break;
915   case coro::ABI::Async: {
916     auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
917     if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
918                                 Attribute::SwiftAsync)) {
919       uint32_t ArgAttributeIndices =
920           ActiveAsyncSuspend->getStorageArgumentIndex();
921       auto ContextArgIndex = ArgAttributeIndices & 0xff;
922       addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
923 
924       // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
925       // `swiftself`.
926       auto SwiftSelfIndex = ArgAttributeIndices >> 8;
927       if (SwiftSelfIndex)
928         addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
929     }
930 
931     // Transfer the original function's attributes.
932     auto FnAttrs = OrigF.getAttributes().getFnAttributes();
933     NewAttrs =
934         NewAttrs.addAttributes(Context, AttributeList::FunctionIndex, FnAttrs);
935     break;
936   }
937   case coro::ABI::Retcon:
938   case coro::ABI::RetconOnce:
939     // If we have a continuation prototype, just use its attributes,
940     // full-stop.
941     NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
942 
943     addFramePointerAttrs(NewAttrs, Context, 0,
944                          Shape.getRetconCoroId()->getStorageSize(),
945                          Shape.getRetconCoroId()->getStorageAlignment());
946     break;
947   }
948 
949   switch (Shape.ABI) {
950   // In these ABIs, the cloned functions always return 'void', and the
951   // existing return sites are meaningless.  Note that for unique
952   // continuations, this includes the returns associated with suspends;
953   // this is fine because we can't suspend twice.
954   case coro::ABI::Switch:
955   case coro::ABI::RetconOnce:
956     // Remove old returns.
957     for (ReturnInst *Return : Returns)
958       changeToUnreachable(Return);
959     break;
960 
961   // With multi-suspend continuations, we'll already have eliminated the
962   // original returns and inserted returns before all the suspend points,
963   // so we want to leave any returns in place.
964   case coro::ABI::Retcon:
965     break;
966   // Async lowering will insert musttail call functions at all suspend points
967   // followed by a return.
968   // Don't change returns to unreachable because that will trip up the verifier.
969   // These returns should be unreachable from the clone.
970   case coro::ABI::Async:
971     break;
972   }
973 
974   NewF->setAttributes(NewAttrs);
975   NewF->setCallingConv(Shape.getResumeFunctionCC());
976 
977   // Set up the new entry block.
978   replaceEntryBlock();
979 
980   Builder.SetInsertPoint(&NewF->getEntryBlock().front());
981   NewFramePtr = deriveNewFramePointer();
982 
983   // Remap frame pointer.
984   Value *OldFramePtr = VMap[Shape.FramePtr];
985   NewFramePtr->takeName(OldFramePtr);
986   OldFramePtr->replaceAllUsesWith(NewFramePtr);
987 
988   // Remap vFrame pointer.
989   auto *NewVFrame = Builder.CreateBitCast(
990       NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
991   Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
992   OldVFrame->replaceAllUsesWith(NewVFrame);
993 
994   switch (Shape.ABI) {
995   case coro::ABI::Switch:
996     // Rewrite final suspend handling as it is not done via switch (allows to
997     // remove final case from the switch, since it is undefined behavior to
998     // resume the coroutine suspended at the final suspend point.
999     if (Shape.SwitchLowering.HasFinalSuspend)
1000       handleFinalSuspend();
1001     break;
1002   case coro::ABI::Async:
1003   case coro::ABI::Retcon:
1004   case coro::ABI::RetconOnce:
1005     // Replace uses of the active suspend with the corresponding
1006     // continuation-function arguments.
1007     assert(ActiveSuspend != nullptr &&
1008            "no active suspend when lowering a continuation-style coroutine");
1009     replaceRetconOrAsyncSuspendUses();
1010     break;
1011   }
1012 
1013   // Handle suspends.
1014   replaceCoroSuspends();
1015 
1016   // Handle swifterror.
1017   replaceSwiftErrorOps();
1018 
1019   // Remove coro.end intrinsics.
1020   replaceCoroEnds();
1021 
1022   // Salvage debug info that points into the coroutine frame.
1023   salvageDebugInfo();
1024 
1025   // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1026   // to suppress deallocation code.
1027   if (Shape.ABI == coro::ABI::Switch)
1028     coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1029                           /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
1030 }
1031 
1032 // Create a resume clone by cloning the body of the original function, setting
1033 // new entry block and replacing coro.suspend an appropriate value to force
1034 // resume or cleanup pass for every suspend point.
createClone(Function & F,const Twine & Suffix,coro::Shape & Shape,CoroCloner::Kind FKind)1035 static Function *createClone(Function &F, const Twine &Suffix,
1036                              coro::Shape &Shape, CoroCloner::Kind FKind) {
1037   CoroCloner Cloner(F, Suffix, Shape, FKind);
1038   Cloner.create();
1039   return Cloner.getFunction();
1040 }
1041 
1042 /// Remove calls to llvm.coro.end in the original function.
removeCoroEnds(const coro::Shape & Shape,CallGraph * CG)1043 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
1044   for (auto End : Shape.CoroEnds) {
1045     replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
1046   }
1047 }
1048 
updateAsyncFuncPointerContextSize(coro::Shape & Shape)1049 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1050   assert(Shape.ABI == coro::ABI::Async);
1051 
1052   auto *FuncPtrStruct = cast<ConstantStruct>(
1053       Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1054   auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1055   auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1056   auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1057                                           Shape.AsyncLowering.ContextSize);
1058   auto *NewFuncPtrStruct = ConstantStruct::get(
1059       FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1060 
1061   Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1062 }
1063 
replaceFrameSize(coro::Shape & Shape)1064 static void replaceFrameSize(coro::Shape &Shape) {
1065   if (Shape.ABI == coro::ABI::Async)
1066     updateAsyncFuncPointerContextSize(Shape);
1067 
1068   if (Shape.CoroSizes.empty())
1069     return;
1070 
1071   // In the same function all coro.sizes should have the same result type.
1072   auto *SizeIntrin = Shape.CoroSizes.back();
1073   Module *M = SizeIntrin->getModule();
1074   const DataLayout &DL = M->getDataLayout();
1075   auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1076   auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1077 
1078   for (CoroSizeInst *CS : Shape.CoroSizes) {
1079     CS->replaceAllUsesWith(SizeConstant);
1080     CS->eraseFromParent();
1081   }
1082 }
1083 
1084 // Create a global constant array containing pointers to functions provided and
1085 // set Info parameter of CoroBegin to point at this constant. Example:
1086 //
1087 //   @f.resumers = internal constant [2 x void(%f.frame*)*]
1088 //                    [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1089 //   define void @f() {
1090 //     ...
1091 //     call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1092 //                    i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1093 //
1094 // Assumes that all the functions have the same signature.
setCoroInfo(Function & F,coro::Shape & Shape,ArrayRef<Function * > Fns)1095 static void setCoroInfo(Function &F, coro::Shape &Shape,
1096                         ArrayRef<Function *> Fns) {
1097   // This only works under the switch-lowering ABI because coro elision
1098   // only works on the switch-lowering ABI.
1099   assert(Shape.ABI == coro::ABI::Switch);
1100 
1101   SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1102   assert(!Args.empty());
1103   Function *Part = *Fns.begin();
1104   Module *M = Part->getParent();
1105   auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1106 
1107   auto *ConstVal = ConstantArray::get(ArrTy, Args);
1108   auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1109                                 GlobalVariable::PrivateLinkage, ConstVal,
1110                                 F.getName() + Twine(".resumers"));
1111 
1112   // Update coro.begin instruction to refer to this constant.
1113   LLVMContext &C = F.getContext();
1114   auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
1115   Shape.getSwitchCoroId()->setInfo(BC);
1116 }
1117 
1118 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
updateCoroFrame(coro::Shape & Shape,Function * ResumeFn,Function * DestroyFn,Function * CleanupFn)1119 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1120                             Function *DestroyFn, Function *CleanupFn) {
1121   assert(Shape.ABI == coro::ABI::Switch);
1122 
1123   IRBuilder<> Builder(Shape.FramePtr->getNextNode());
1124   auto *ResumeAddr = Builder.CreateStructGEP(
1125       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1126       "resume.addr");
1127   Builder.CreateStore(ResumeFn, ResumeAddr);
1128 
1129   Value *DestroyOrCleanupFn = DestroyFn;
1130 
1131   CoroIdInst *CoroId = Shape.getSwitchCoroId();
1132   if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1133     // If there is a CoroAlloc and it returns false (meaning we elide the
1134     // allocation, use CleanupFn instead of DestroyFn).
1135     DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1136   }
1137 
1138   auto *DestroyAddr = Builder.CreateStructGEP(
1139       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1140       "destroy.addr");
1141   Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1142 }
1143 
postSplitCleanup(Function & F)1144 static void postSplitCleanup(Function &F) {
1145   removeUnreachableBlocks(F);
1146 
1147   // For now, we do a mandatory verification step because we don't
1148   // entirely trust this pass.  Note that we don't want to add a verifier
1149   // pass to FPM below because it will also verify all the global data.
1150   if (verifyFunction(F, &errs()))
1151     report_fatal_error("Broken function");
1152 }
1153 
1154 // Assuming we arrived at the block NewBlock from Prev instruction, store
1155 // PHI's incoming values in the ResolvedValues map.
1156 static void
scanPHIsAndUpdateValueMap(Instruction * Prev,BasicBlock * NewBlock,DenseMap<Value *,Value * > & ResolvedValues)1157 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1158                           DenseMap<Value *, Value *> &ResolvedValues) {
1159   auto *PrevBB = Prev->getParent();
1160   for (PHINode &PN : NewBlock->phis()) {
1161     auto V = PN.getIncomingValueForBlock(PrevBB);
1162     // See if we already resolved it.
1163     auto VI = ResolvedValues.find(V);
1164     if (VI != ResolvedValues.end())
1165       V = VI->second;
1166     // Remember the value.
1167     ResolvedValues[&PN] = V;
1168   }
1169 }
1170 
1171 // Replace a sequence of branches leading to a ret, with a clone of a ret
1172 // instruction. Suspend instruction represented by a switch, track the PHI
1173 // values and select the correct case successor when possible.
simplifyTerminatorLeadingToRet(Instruction * InitialInst)1174 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1175   DenseMap<Value *, Value *> ResolvedValues;
1176   BasicBlock *UnconditionalSucc = nullptr;
1177 
1178   Instruction *I = InitialInst;
1179   while (I->isTerminator() ||
1180          (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) {
1181     if (isa<ReturnInst>(I)) {
1182       if (I != InitialInst) {
1183         // If InitialInst is an unconditional branch,
1184         // remove PHI values that come from basic block of InitialInst
1185         if (UnconditionalSucc)
1186           UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1187         ReplaceInstWithInst(InitialInst, I->clone());
1188       }
1189       return true;
1190     }
1191     if (auto *BR = dyn_cast<BranchInst>(I)) {
1192       if (BR->isUnconditional()) {
1193         BasicBlock *BB = BR->getSuccessor(0);
1194         if (I == InitialInst)
1195           UnconditionalSucc = BB;
1196         scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1197         I = BB->getFirstNonPHIOrDbgOrLifetime();
1198         continue;
1199       }
1200     } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1201       auto *BR = dyn_cast<BranchInst>(I->getNextNode());
1202       if (BR && BR->isConditional() && CondCmp == BR->getCondition()) {
1203         // If the case number of suspended switch instruction is reduced to
1204         // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1205         // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1206         ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1207         if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) {
1208           Value *V = CondCmp->getOperand(0);
1209           auto it = ResolvedValues.find(V);
1210           if (it != ResolvedValues.end())
1211             V = it->second;
1212 
1213           if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) {
1214             BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue())
1215                                  ? BR->getSuccessor(0)
1216                                  : BR->getSuccessor(1);
1217             scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1218             I = BB->getFirstNonPHIOrDbgOrLifetime();
1219             continue;
1220           }
1221         }
1222       }
1223     } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1224       Value *V = SI->getCondition();
1225       auto it = ResolvedValues.find(V);
1226       if (it != ResolvedValues.end())
1227         V = it->second;
1228       if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1229         BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1230         scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1231         I = BB->getFirstNonPHIOrDbgOrLifetime();
1232         continue;
1233       }
1234     }
1235     return false;
1236   }
1237   return false;
1238 }
1239 
1240 // Check whether CI obeys the rules of musttail attribute.
shouldBeMustTail(const CallInst & CI,const Function & F)1241 static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1242   if (CI.isInlineAsm())
1243     return false;
1244 
1245   // Match prototypes and calling conventions of resume function.
1246   FunctionType *CalleeTy = CI.getFunctionType();
1247   if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1248     return false;
1249 
1250   Type *CalleeParmTy = CalleeTy->getParamType(0);
1251   if (!CalleeParmTy->isPointerTy() ||
1252       (CalleeParmTy->getPointerAddressSpace() != 0))
1253     return false;
1254 
1255   if (CI.getCallingConv() != F.getCallingConv())
1256     return false;
1257 
1258   // CI should not has any ABI-impacting function attributes.
1259   static const Attribute::AttrKind ABIAttrs[] = {
1260       Attribute::StructRet,    Attribute::ByVal,     Attribute::InAlloca,
1261       Attribute::Preallocated, Attribute::InReg,     Attribute::Returned,
1262       Attribute::SwiftSelf,    Attribute::SwiftError};
1263   AttributeList Attrs = CI.getAttributes();
1264   for (auto AK : ABIAttrs)
1265     if (Attrs.hasParamAttribute(0, AK))
1266       return false;
1267 
1268   return true;
1269 }
1270 
1271 // Add musttail to any resume instructions that is immediately followed by a
1272 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1273 // for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1274 // This transformation is done only in the resume part of the coroutine that has
1275 // identical signature and calling convention as the coro.resume call.
addMustTailToCoroResumes(Function & F)1276 static void addMustTailToCoroResumes(Function &F) {
1277   bool changed = false;
1278 
1279   // Collect potential resume instructions.
1280   SmallVector<CallInst *, 4> Resumes;
1281   for (auto &I : instructions(F))
1282     if (auto *Call = dyn_cast<CallInst>(&I))
1283       if (shouldBeMustTail(*Call, F))
1284         Resumes.push_back(Call);
1285 
1286   // Set musttail on those that are followed by a ret instruction.
1287   for (CallInst *Call : Resumes)
1288     if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1289       Call->setTailCallKind(CallInst::TCK_MustTail);
1290       changed = true;
1291     }
1292 
1293   if (changed)
1294     removeUnreachableBlocks(F);
1295 }
1296 
1297 // Coroutine has no suspend points. Remove heap allocation for the coroutine
1298 // frame if possible.
handleNoSuspendCoroutine(coro::Shape & Shape)1299 static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1300   auto *CoroBegin = Shape.CoroBegin;
1301   auto *CoroId = CoroBegin->getId();
1302   auto *AllocInst = CoroId->getCoroAlloc();
1303   switch (Shape.ABI) {
1304   case coro::ABI::Switch: {
1305     auto SwitchId = cast<CoroIdInst>(CoroId);
1306     coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1307     if (AllocInst) {
1308       IRBuilder<> Builder(AllocInst);
1309       auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1310       Frame->setAlignment(Shape.FrameAlign);
1311       auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1312       AllocInst->replaceAllUsesWith(Builder.getFalse());
1313       AllocInst->eraseFromParent();
1314       CoroBegin->replaceAllUsesWith(VFrame);
1315     } else {
1316       CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1317     }
1318 
1319     break;
1320   }
1321   case coro::ABI::Async:
1322   case coro::ABI::Retcon:
1323   case coro::ABI::RetconOnce:
1324     CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1325     break;
1326   }
1327 
1328   CoroBegin->eraseFromParent();
1329 }
1330 
1331 // SimplifySuspendPoint needs to check that there is no calls between
1332 // coro_save and coro_suspend, since any of the calls may potentially resume
1333 // the coroutine and if that is the case we cannot eliminate the suspend point.
hasCallsInBlockBetween(Instruction * From,Instruction * To)1334 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1335   for (Instruction *I = From; I != To; I = I->getNextNode()) {
1336     // Assume that no intrinsic can resume the coroutine.
1337     if (isa<IntrinsicInst>(I))
1338       continue;
1339 
1340     if (isa<CallBase>(I))
1341       return true;
1342   }
1343   return false;
1344 }
1345 
hasCallsInBlocksBetween(BasicBlock * SaveBB,BasicBlock * ResDesBB)1346 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1347   SmallPtrSet<BasicBlock *, 8> Set;
1348   SmallVector<BasicBlock *, 8> Worklist;
1349 
1350   Set.insert(SaveBB);
1351   Worklist.push_back(ResDesBB);
1352 
1353   // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1354   // returns a token consumed by suspend instruction, all blocks in between
1355   // will have to eventually hit SaveBB when going backwards from ResDesBB.
1356   while (!Worklist.empty()) {
1357     auto *BB = Worklist.pop_back_val();
1358     Set.insert(BB);
1359     for (auto *Pred : predecessors(BB))
1360       if (Set.count(Pred) == 0)
1361         Worklist.push_back(Pred);
1362   }
1363 
1364   // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1365   Set.erase(SaveBB);
1366   Set.erase(ResDesBB);
1367 
1368   for (auto *BB : Set)
1369     if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1370       return true;
1371 
1372   return false;
1373 }
1374 
hasCallsBetween(Instruction * Save,Instruction * ResumeOrDestroy)1375 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1376   auto *SaveBB = Save->getParent();
1377   auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1378 
1379   if (SaveBB == ResumeOrDestroyBB)
1380     return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1381 
1382   // Any calls from Save to the end of the block?
1383   if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1384     return true;
1385 
1386   // Any calls from begging of the block up to ResumeOrDestroy?
1387   if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1388                              ResumeOrDestroy))
1389     return true;
1390 
1391   // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1392   if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1393     return true;
1394 
1395   return false;
1396 }
1397 
1398 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1399 // suspend point and replace it with nornal control flow.
simplifySuspendPoint(CoroSuspendInst * Suspend,CoroBeginInst * CoroBegin)1400 static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1401                                  CoroBeginInst *CoroBegin) {
1402   Instruction *Prev = Suspend->getPrevNode();
1403   if (!Prev) {
1404     auto *Pred = Suspend->getParent()->getSinglePredecessor();
1405     if (!Pred)
1406       return false;
1407     Prev = Pred->getTerminator();
1408   }
1409 
1410   CallBase *CB = dyn_cast<CallBase>(Prev);
1411   if (!CB)
1412     return false;
1413 
1414   auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1415 
1416   // See if the callsite is for resumption or destruction of the coroutine.
1417   auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1418   if (!SubFn)
1419     return false;
1420 
1421   // Does not refer to the current coroutine, we cannot do anything with it.
1422   if (SubFn->getFrame() != CoroBegin)
1423     return false;
1424 
1425   // See if the transformation is safe. Specifically, see if there are any
1426   // calls in between Save and CallInstr. They can potenitally resume the
1427   // coroutine rendering this optimization unsafe.
1428   auto *Save = Suspend->getCoroSave();
1429   if (hasCallsBetween(Save, CB))
1430     return false;
1431 
1432   // Replace llvm.coro.suspend with the value that results in resumption over
1433   // the resume or cleanup path.
1434   Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1435   Suspend->eraseFromParent();
1436   Save->eraseFromParent();
1437 
1438   // No longer need a call to coro.resume or coro.destroy.
1439   if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1440     BranchInst::Create(Invoke->getNormalDest(), Invoke);
1441   }
1442 
1443   // Grab the CalledValue from CB before erasing the CallInstr.
1444   auto *CalledValue = CB->getCalledOperand();
1445   CB->eraseFromParent();
1446 
1447   // If no more users remove it. Usually it is a bitcast of SubFn.
1448   if (CalledValue != SubFn && CalledValue->user_empty())
1449     if (auto *I = dyn_cast<Instruction>(CalledValue))
1450       I->eraseFromParent();
1451 
1452   // Now we are good to remove SubFn.
1453   if (SubFn->user_empty())
1454     SubFn->eraseFromParent();
1455 
1456   return true;
1457 }
1458 
1459 // Remove suspend points that are simplified.
simplifySuspendPoints(coro::Shape & Shape)1460 static void simplifySuspendPoints(coro::Shape &Shape) {
1461   // Currently, the only simplification we do is switch-lowering-specific.
1462   if (Shape.ABI != coro::ABI::Switch)
1463     return;
1464 
1465   auto &S = Shape.CoroSuspends;
1466   size_t I = 0, N = S.size();
1467   if (N == 0)
1468     return;
1469   while (true) {
1470     auto SI = cast<CoroSuspendInst>(S[I]);
1471     // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1472     // to resume a coroutine suspended at the final suspend point.
1473     if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1474       if (--N == I)
1475         break;
1476       std::swap(S[I], S[N]);
1477       continue;
1478     }
1479     if (++I == N)
1480       break;
1481   }
1482   S.resize(N);
1483 }
1484 
splitSwitchCoroutine(Function & F,coro::Shape & Shape,SmallVectorImpl<Function * > & Clones)1485 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1486                                  SmallVectorImpl<Function *> &Clones) {
1487   assert(Shape.ABI == coro::ABI::Switch);
1488 
1489   createResumeEntryBlock(F, Shape);
1490   auto ResumeClone = createClone(F, ".resume", Shape,
1491                                  CoroCloner::Kind::SwitchResume);
1492   auto DestroyClone = createClone(F, ".destroy", Shape,
1493                                   CoroCloner::Kind::SwitchUnwind);
1494   auto CleanupClone = createClone(F, ".cleanup", Shape,
1495                                   CoroCloner::Kind::SwitchCleanup);
1496 
1497   postSplitCleanup(*ResumeClone);
1498   postSplitCleanup(*DestroyClone);
1499   postSplitCleanup(*CleanupClone);
1500 
1501   addMustTailToCoroResumes(*ResumeClone);
1502 
1503   // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1504   updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1505 
1506   assert(Clones.empty());
1507   Clones.push_back(ResumeClone);
1508   Clones.push_back(DestroyClone);
1509   Clones.push_back(CleanupClone);
1510 
1511   // Create a constant array referring to resume/destroy/clone functions pointed
1512   // by the last argument of @llvm.coro.info, so that CoroElide pass can
1513   // determined correct function to call.
1514   setCoroInfo(F, Shape, Clones);
1515 }
1516 
replaceAsyncResumeFunction(CoroSuspendAsyncInst * Suspend,Value * Continuation)1517 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1518                                        Value *Continuation) {
1519   auto *ResumeIntrinsic = Suspend->getResumeFunction();
1520   auto &Context = Suspend->getParent()->getParent()->getContext();
1521   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1522 
1523   IRBuilder<> Builder(ResumeIntrinsic);
1524   auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1525   ResumeIntrinsic->replaceAllUsesWith(Val);
1526   ResumeIntrinsic->eraseFromParent();
1527   Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg,
1528                       UndefValue::get(Int8PtrTy));
1529 }
1530 
1531 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
coerceArguments(IRBuilder<> & Builder,FunctionType * FnTy,ArrayRef<Value * > FnArgs,SmallVectorImpl<Value * > & CallArgs)1532 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1533                             ArrayRef<Value *> FnArgs,
1534                             SmallVectorImpl<Value *> &CallArgs) {
1535   size_t ArgIdx = 0;
1536   for (auto paramTy : FnTy->params()) {
1537     assert(ArgIdx < FnArgs.size());
1538     if (paramTy != FnArgs[ArgIdx]->getType())
1539       CallArgs.push_back(
1540           Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1541     else
1542       CallArgs.push_back(FnArgs[ArgIdx]);
1543     ++ArgIdx;
1544   }
1545 }
1546 
createMustTailCall(DebugLoc Loc,Function * MustTailCallFn,ArrayRef<Value * > Arguments,IRBuilder<> & Builder)1547 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1548                                    ArrayRef<Value *> Arguments,
1549                                    IRBuilder<> &Builder) {
1550   auto *FnTy =
1551       cast<FunctionType>(MustTailCallFn->getType()->getPointerElementType());
1552   // Coerce the arguments, llvm optimizations seem to ignore the types in
1553   // vaarg functions and throws away casts in optimized mode.
1554   SmallVector<Value *, 8> CallArgs;
1555   coerceArguments(Builder, FnTy, Arguments, CallArgs);
1556 
1557   auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1558   TailCall->setTailCallKind(CallInst::TCK_MustTail);
1559   TailCall->setDebugLoc(Loc);
1560   TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1561   return TailCall;
1562 }
1563 
splitAsyncCoroutine(Function & F,coro::Shape & Shape,SmallVectorImpl<Function * > & Clones)1564 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1565                                 SmallVectorImpl<Function *> &Clones) {
1566   assert(Shape.ABI == coro::ABI::Async);
1567   assert(Clones.empty());
1568   // Reset various things that the optimizer might have decided it
1569   // "knows" about the coroutine function due to not seeing a return.
1570   F.removeFnAttr(Attribute::NoReturn);
1571   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1572   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1573 
1574   auto &Context = F.getContext();
1575   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1576 
1577   auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1578   IRBuilder<> Builder(Id);
1579 
1580   auto *FramePtr = Id->getStorage();
1581   FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1582   FramePtr = Builder.CreateConstInBoundsGEP1_32(
1583       Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1584       "async.ctx.frameptr");
1585 
1586   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1587   {
1588     // Make sure we don't invalidate Shape.FramePtr.
1589     TrackingVH<Instruction> Handle(Shape.FramePtr);
1590     Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1591     Shape.FramePtr = Handle.getValPtr();
1592   }
1593 
1594   // Create all the functions in order after the main function.
1595   auto NextF = std::next(F.getIterator());
1596 
1597   // Create a continuation function for each of the suspend points.
1598   Clones.reserve(Shape.CoroSuspends.size());
1599   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1600     auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1601 
1602     // Create the clone declaration.
1603     auto ResumeNameSuffix = ".resume.";
1604     auto ProjectionFunctionName =
1605         Suspend->getAsyncContextProjectionFunction()->getName();
1606     bool UseSwiftMangling = false;
1607     if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) {
1608       ResumeNameSuffix = "TQ";
1609       UseSwiftMangling = true;
1610     } else if (ProjectionFunctionName.equals(
1611                    "__swift_async_resume_get_context")) {
1612       ResumeNameSuffix = "TY";
1613       UseSwiftMangling = true;
1614     }
1615     auto *Continuation = createCloneDeclaration(
1616         F, Shape,
1617         UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1618                          : ResumeNameSuffix + Twine(Idx),
1619         NextF, Suspend);
1620     Clones.push_back(Continuation);
1621 
1622     // Insert a branch to a new return block immediately before the suspend
1623     // point.
1624     auto *SuspendBB = Suspend->getParent();
1625     auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1626     auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1627 
1628     // Place it before the first suspend.
1629     auto *ReturnBB =
1630         BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1631     Branch->setSuccessor(0, ReturnBB);
1632 
1633     IRBuilder<> Builder(ReturnBB);
1634 
1635     // Insert the call to the tail call function and inline it.
1636     auto *Fn = Suspend->getMustTailCallFunction();
1637     SmallVector<Value *, 8> Args(Suspend->args());
1638     auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1639         CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1640     auto *TailCall =
1641         coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1642     Builder.CreateRetVoid();
1643     InlineFunctionInfo FnInfo;
1644     auto InlineRes = InlineFunction(*TailCall, FnInfo);
1645     assert(InlineRes.isSuccess() && "Expected inlining to succeed");
1646     (void)InlineRes;
1647 
1648     // Replace the lvm.coro.async.resume intrisic call.
1649     replaceAsyncResumeFunction(Suspend, Continuation);
1650   }
1651 
1652   assert(Clones.size() == Shape.CoroSuspends.size());
1653   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1654     auto *Suspend = Shape.CoroSuspends[Idx];
1655     auto *Clone = Clones[Idx];
1656 
1657     CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1658   }
1659 }
1660 
splitRetconCoroutine(Function & F,coro::Shape & Shape,SmallVectorImpl<Function * > & Clones)1661 static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1662                                  SmallVectorImpl<Function *> &Clones) {
1663   assert(Shape.ABI == coro::ABI::Retcon ||
1664          Shape.ABI == coro::ABI::RetconOnce);
1665   assert(Clones.empty());
1666 
1667   // Reset various things that the optimizer might have decided it
1668   // "knows" about the coroutine function due to not seeing a return.
1669   F.removeFnAttr(Attribute::NoReturn);
1670   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1671   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1672 
1673   // Allocate the frame.
1674   auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1675   Value *RawFramePtr;
1676   if (Shape.RetconLowering.IsFrameInlineInStorage) {
1677     RawFramePtr = Id->getStorage();
1678   } else {
1679     IRBuilder<> Builder(Id);
1680 
1681     // Determine the size of the frame.
1682     const DataLayout &DL = F.getParent()->getDataLayout();
1683     auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1684 
1685     // Allocate.  We don't need to update the call graph node because we're
1686     // going to recompute it from scratch after splitting.
1687     // FIXME: pass the required alignment
1688     RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1689     RawFramePtr =
1690       Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1691 
1692     // Stash the allocated frame pointer in the continuation storage.
1693     auto Dest = Builder.CreateBitCast(Id->getStorage(),
1694                                       RawFramePtr->getType()->getPointerTo());
1695     Builder.CreateStore(RawFramePtr, Dest);
1696   }
1697 
1698   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1699   {
1700     // Make sure we don't invalidate Shape.FramePtr.
1701     TrackingVH<Instruction> Handle(Shape.FramePtr);
1702     Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1703     Shape.FramePtr = Handle.getValPtr();
1704   }
1705 
1706   // Create a unique return block.
1707   BasicBlock *ReturnBB = nullptr;
1708   SmallVector<PHINode *, 4> ReturnPHIs;
1709 
1710   // Create all the functions in order after the main function.
1711   auto NextF = std::next(F.getIterator());
1712 
1713   // Create a continuation function for each of the suspend points.
1714   Clones.reserve(Shape.CoroSuspends.size());
1715   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1716     auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1717 
1718     // Create the clone declaration.
1719     auto Continuation =
1720         createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1721     Clones.push_back(Continuation);
1722 
1723     // Insert a branch to the unified return block immediately before
1724     // the suspend point.
1725     auto SuspendBB = Suspend->getParent();
1726     auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1727     auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1728 
1729     // Create the unified return block.
1730     if (!ReturnBB) {
1731       // Place it before the first suspend.
1732       ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1733                                     NewSuspendBB);
1734       Shape.RetconLowering.ReturnBlock = ReturnBB;
1735 
1736       IRBuilder<> Builder(ReturnBB);
1737 
1738       // Create PHIs for all the return values.
1739       assert(ReturnPHIs.empty());
1740 
1741       // First, the continuation.
1742       ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1743                                              Shape.CoroSuspends.size()));
1744 
1745       // Next, all the directly-yielded values.
1746       for (auto ResultTy : Shape.getRetconResultTypes())
1747         ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1748                                                Shape.CoroSuspends.size()));
1749 
1750       // Build the return value.
1751       auto RetTy = F.getReturnType();
1752 
1753       // Cast the continuation value if necessary.
1754       // We can't rely on the types matching up because that type would
1755       // have to be infinite.
1756       auto CastedContinuationTy =
1757         (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1758       auto *CastedContinuation =
1759         Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1760 
1761       Value *RetV;
1762       if (ReturnPHIs.size() == 1) {
1763         RetV = CastedContinuation;
1764       } else {
1765         RetV = UndefValue::get(RetTy);
1766         RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1767         for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1768           RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1769       }
1770 
1771       Builder.CreateRet(RetV);
1772     }
1773 
1774     // Branch to the return block.
1775     Branch->setSuccessor(0, ReturnBB);
1776     ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1777     size_t NextPHIIndex = 1;
1778     for (auto &VUse : Suspend->value_operands())
1779       ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1780     assert(NextPHIIndex == ReturnPHIs.size());
1781   }
1782 
1783   assert(Clones.size() == Shape.CoroSuspends.size());
1784   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1785     auto Suspend = Shape.CoroSuspends[i];
1786     auto Clone = Clones[i];
1787 
1788     CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1789   }
1790 }
1791 
1792 namespace {
1793   class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1794     Function &F;
1795   public:
PrettyStackTraceFunction(Function & F)1796     PrettyStackTraceFunction(Function &F) : F(F) {}
print(raw_ostream & OS) const1797     void print(raw_ostream &OS) const override {
1798       OS << "While splitting coroutine ";
1799       F.printAsOperand(OS, /*print type*/ false, F.getParent());
1800       OS << "\n";
1801     }
1802   };
1803 }
1804 
splitCoroutine(Function & F,SmallVectorImpl<Function * > & Clones,bool ReuseFrameSlot)1805 static coro::Shape splitCoroutine(Function &F,
1806                                   SmallVectorImpl<Function *> &Clones,
1807                                   bool ReuseFrameSlot) {
1808   PrettyStackTraceFunction prettyStackTrace(F);
1809 
1810   // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1811   // up by uses in unreachable blocks, so remove them as a first pass.
1812   removeUnreachableBlocks(F);
1813 
1814   coro::Shape Shape(F, ReuseFrameSlot);
1815   if (!Shape.CoroBegin)
1816     return Shape;
1817 
1818   simplifySuspendPoints(Shape);
1819   buildCoroutineFrame(F, Shape);
1820   replaceFrameSize(Shape);
1821 
1822   // If there are no suspend points, no split required, just remove
1823   // the allocation and deallocation blocks, they are not needed.
1824   if (Shape.CoroSuspends.empty()) {
1825     handleNoSuspendCoroutine(Shape);
1826   } else {
1827     switch (Shape.ABI) {
1828     case coro::ABI::Switch:
1829       splitSwitchCoroutine(F, Shape, Clones);
1830       break;
1831     case coro::ABI::Async:
1832       splitAsyncCoroutine(F, Shape, Clones);
1833       break;
1834     case coro::ABI::Retcon:
1835     case coro::ABI::RetconOnce:
1836       splitRetconCoroutine(F, Shape, Clones);
1837       break;
1838     }
1839   }
1840 
1841   // Replace all the swifterror operations in the original function.
1842   // This invalidates SwiftErrorOps in the Shape.
1843   replaceSwiftErrorOps(F, Shape, nullptr);
1844 
1845   return Shape;
1846 }
1847 
1848 static void
updateCallGraphAfterCoroutineSplit(Function & F,const coro::Shape & Shape,const SmallVectorImpl<Function * > & Clones,CallGraph & CG,CallGraphSCC & SCC)1849 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
1850                                    const SmallVectorImpl<Function *> &Clones,
1851                                    CallGraph &CG, CallGraphSCC &SCC) {
1852   if (!Shape.CoroBegin)
1853     return;
1854 
1855   removeCoroEnds(Shape, &CG);
1856   postSplitCleanup(F);
1857 
1858   // Update call graph and add the functions we created to the SCC.
1859   coro::updateCallGraph(F, Clones, CG, SCC);
1860 }
1861 
updateCallGraphAfterCoroutineSplit(LazyCallGraph::Node & N,const coro::Shape & Shape,const SmallVectorImpl<Function * > & Clones,LazyCallGraph::SCC & C,LazyCallGraph & CG,CGSCCAnalysisManager & AM,CGSCCUpdateResult & UR,FunctionAnalysisManager & FAM)1862 static void updateCallGraphAfterCoroutineSplit(
1863     LazyCallGraph::Node &N, const coro::Shape &Shape,
1864     const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1865     LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1866     FunctionAnalysisManager &FAM) {
1867   if (!Shape.CoroBegin)
1868     return;
1869 
1870   for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1871     auto &Context = End->getContext();
1872     End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1873     End->eraseFromParent();
1874   }
1875 
1876   if (!Clones.empty()) {
1877     switch (Shape.ABI) {
1878     case coro::ABI::Switch:
1879       // Each clone in the Switch lowering is independent of the other clones.
1880       // Let the LazyCallGraph know about each one separately.
1881       for (Function *Clone : Clones)
1882         CG.addSplitFunction(N.getFunction(), *Clone);
1883       break;
1884     case coro::ABI::Async:
1885     case coro::ABI::Retcon:
1886     case coro::ABI::RetconOnce:
1887       // Each clone in the Async/Retcon lowering references of the other clones.
1888       // Let the LazyCallGraph know about all of them at once.
1889       if (!Clones.empty())
1890         CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
1891       break;
1892     }
1893 
1894     // Let the CGSCC infra handle the changes to the original function.
1895     updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
1896   }
1897 
1898   // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
1899   // to the split functions.
1900   postSplitCleanup(N.getFunction());
1901   updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
1902 }
1903 
1904 // When we see the coroutine the first time, we insert an indirect call to a
1905 // devirt trigger function and mark the coroutine that it is now ready for
1906 // split.
1907 // Async lowering uses this after it has split the function to restart the
1908 // pipeline.
prepareForSplit(Function & F,CallGraph & CG,bool MarkForAsyncRestart=false)1909 static void prepareForSplit(Function &F, CallGraph &CG,
1910                             bool MarkForAsyncRestart = false) {
1911   Module &M = *F.getParent();
1912   LLVMContext &Context = F.getContext();
1913 #ifndef NDEBUG
1914   Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN);
1915   assert(DevirtFn && "coro.devirt.trigger function not found");
1916 #endif
1917 
1918   F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart
1919                                       ? ASYNC_RESTART_AFTER_SPLIT
1920                                       : PREPARED_FOR_SPLIT);
1921 
1922   // Insert an indirect call sequence that will be devirtualized by CoroElide
1923   // pass:
1924   //    %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
1925   //    %1 = bitcast i8* %0 to void(i8*)*
1926   //    call void %1(i8* null)
1927   coro::LowererBase Lowerer(M);
1928   Instruction *InsertPt =
1929       MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
1930                           : F.getEntryBlock().getTerminator();
1931   auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context));
1932   auto *DevirtFnAddr =
1933       Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
1934   FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context),
1935                                          {Type::getInt8PtrTy(Context)}, false);
1936   auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
1937 
1938   // Update CG graph with an indirect call we just added.
1939   CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
1940 }
1941 
1942 // Make sure that there is a devirtualization trigger function that the
1943 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
1944 // trigger function is not found, we will create one and add it to the current
1945 // SCC.
createDevirtTriggerFunc(CallGraph & CG,CallGraphSCC & SCC)1946 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
1947   Module &M = CG.getModule();
1948   if (M.getFunction(CORO_DEVIRT_TRIGGER_FN))
1949     return;
1950 
1951   LLVMContext &C = M.getContext();
1952   auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
1953                                  /*isVarArg=*/false);
1954   Function *DevirtFn =
1955       Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
1956                        CORO_DEVIRT_TRIGGER_FN, &M);
1957   DevirtFn->addFnAttr(Attribute::AlwaysInline);
1958   auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
1959   ReturnInst::Create(C, Entry);
1960 
1961   auto *Node = CG.getOrInsertFunction(DevirtFn);
1962 
1963   SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
1964   Nodes.push_back(Node);
1965   SCC.initialize(Nodes);
1966 }
1967 
1968 /// Replace a call to llvm.coro.prepare.retcon.
replacePrepare(CallInst * Prepare,LazyCallGraph & CG,LazyCallGraph::SCC & C)1969 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
1970                            LazyCallGraph::SCC &C) {
1971   auto CastFn = Prepare->getArgOperand(0); // as an i8*
1972   auto Fn = CastFn->stripPointerCasts();   // as its original type
1973 
1974   // Attempt to peephole this pattern:
1975   //    %0 = bitcast [[TYPE]] @some_function to i8*
1976   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
1977   //    %2 = bitcast %1 to [[TYPE]]
1978   // ==>
1979   //    %2 = @some_function
1980   for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) {
1981     // Look for bitcasts back to the original function type.
1982     auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
1983     if (!Cast || Cast->getType() != Fn->getType())
1984       continue;
1985 
1986     // Replace and remove the cast.
1987     Cast->replaceAllUsesWith(Fn);
1988     Cast->eraseFromParent();
1989   }
1990 
1991   // Replace any remaining uses with the function as an i8*.
1992   // This can never directly be a callee, so we don't need to update CG.
1993   Prepare->replaceAllUsesWith(CastFn);
1994   Prepare->eraseFromParent();
1995 
1996   // Kill dead bitcasts.
1997   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
1998     if (!Cast->use_empty())
1999       break;
2000     CastFn = Cast->getOperand(0);
2001     Cast->eraseFromParent();
2002   }
2003 }
2004 /// Replace a call to llvm.coro.prepare.retcon.
replacePrepare(CallInst * Prepare,CallGraph & CG)2005 static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
2006   auto CastFn = Prepare->getArgOperand(0); // as an i8*
2007   auto Fn = CastFn->stripPointerCasts(); // as its original type
2008 
2009   // Find call graph nodes for the preparation.
2010   CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
2011   if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
2012     PrepareUserNode = CG[Prepare->getFunction()];
2013     FnNode = CG[ConcreteFn];
2014   }
2015 
2016   // Attempt to peephole this pattern:
2017   //    %0 = bitcast [[TYPE]] @some_function to i8*
2018   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
2019   //    %2 = bitcast %1 to [[TYPE]]
2020   // ==>
2021   //    %2 = @some_function
2022   for (auto UI = Prepare->use_begin(), UE = Prepare->use_end();
2023          UI != UE; ) {
2024     // Look for bitcasts back to the original function type.
2025     auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
2026     if (!Cast || Cast->getType() != Fn->getType()) continue;
2027 
2028     // Check whether the replacement will introduce new direct calls.
2029     // If so, we'll need to update the call graph.
2030     if (PrepareUserNode) {
2031       for (auto &Use : Cast->uses()) {
2032         if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
2033           if (!CB->isCallee(&Use))
2034             continue;
2035           PrepareUserNode->removeCallEdgeFor(*CB);
2036           PrepareUserNode->addCalledFunction(CB, FnNode);
2037         }
2038       }
2039     }
2040 
2041     // Replace and remove the cast.
2042     Cast->replaceAllUsesWith(Fn);
2043     Cast->eraseFromParent();
2044   }
2045 
2046   // Replace any remaining uses with the function as an i8*.
2047   // This can never directly be a callee, so we don't need to update CG.
2048   Prepare->replaceAllUsesWith(CastFn);
2049   Prepare->eraseFromParent();
2050 
2051   // Kill dead bitcasts.
2052   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2053     if (!Cast->use_empty()) break;
2054     CastFn = Cast->getOperand(0);
2055     Cast->eraseFromParent();
2056   }
2057 }
2058 
replaceAllPrepares(Function * PrepareFn,LazyCallGraph & CG,LazyCallGraph::SCC & C)2059 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2060                                LazyCallGraph::SCC &C) {
2061   bool Changed = false;
2062   for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) {
2063     // Intrinsics can only be used in calls.
2064     auto *Prepare = cast<CallInst>((PI++)->getUser());
2065     replacePrepare(Prepare, CG, C);
2066     Changed = true;
2067   }
2068 
2069   return Changed;
2070 }
2071 
2072 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
2073 /// IPO from operating on calls to a retcon coroutine before it's been
2074 /// split.  This is only safe to do after we've split all retcon
2075 /// coroutines in the module.  We can do that this in this pass because
2076 /// this pass does promise to split all retcon coroutines (as opposed to
2077 /// switch coroutines, which are lowered in multiple stages).
replaceAllPrepares(Function * PrepareFn,CallGraph & CG)2078 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
2079   bool Changed = false;
2080   for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end();
2081          PI != PE; ) {
2082     // Intrinsics can only be used in calls.
2083     auto *Prepare = cast<CallInst>((PI++)->getUser());
2084     replacePrepare(Prepare, CG);
2085     Changed = true;
2086   }
2087 
2088   return Changed;
2089 }
2090 
declaresCoroSplitIntrinsics(const Module & M)2091 static bool declaresCoroSplitIntrinsics(const Module &M) {
2092   return coro::declaresIntrinsics(M, {"llvm.coro.begin",
2093                                       "llvm.coro.prepare.retcon",
2094                                       "llvm.coro.prepare.async"});
2095 }
2096 
addPrepareFunction(const Module & M,SmallVectorImpl<Function * > & Fns,StringRef Name)2097 static void addPrepareFunction(const Module &M,
2098                                SmallVectorImpl<Function *> &Fns,
2099                                StringRef Name) {
2100   auto *PrepareFn = M.getFunction(Name);
2101   if (PrepareFn && !PrepareFn->use_empty())
2102     Fns.push_back(PrepareFn);
2103 }
2104 
run(LazyCallGraph::SCC & C,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)2105 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2106                                      CGSCCAnalysisManager &AM,
2107                                      LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2108   // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2109   //     non-zero number of nodes, so we assume that here and grab the first
2110   //     node's function's module.
2111   Module &M = *C.begin()->getFunction().getParent();
2112   auto &FAM =
2113       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2114 
2115   if (!declaresCoroSplitIntrinsics(M))
2116     return PreservedAnalyses::all();
2117 
2118   // Check for uses of llvm.coro.prepare.retcon/async.
2119   SmallVector<Function *, 2> PrepareFns;
2120   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2121   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2122 
2123   // Find coroutines for processing.
2124   SmallVector<LazyCallGraph::Node *, 4> Coroutines;
2125   for (LazyCallGraph::Node &N : C)
2126     if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR))
2127       Coroutines.push_back(&N);
2128 
2129   if (Coroutines.empty() && PrepareFns.empty())
2130     return PreservedAnalyses::all();
2131 
2132   if (Coroutines.empty()) {
2133     for (auto *PrepareFn : PrepareFns) {
2134       replaceAllPrepares(PrepareFn, CG, C);
2135     }
2136   }
2137 
2138   // Split all the coroutines.
2139   for (LazyCallGraph::Node *N : Coroutines) {
2140     Function &F = N->getFunction();
2141     LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2142                       << "' state: "
2143                       << F.getFnAttribute(CORO_PRESPLIT_ATTR).getValueAsString()
2144                       << "\n");
2145     F.removeFnAttr(CORO_PRESPLIT_ATTR);
2146 
2147     SmallVector<Function *, 4> Clones;
2148     const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot);
2149     updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2150 
2151     if (!Shape.CoroSuspends.empty()) {
2152       // Run the CGSCC pipeline on the original and newly split functions.
2153       UR.CWorklist.insert(&C);
2154       for (Function *Clone : Clones)
2155         UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2156     }
2157   }
2158 
2159   if (!PrepareFns.empty()) {
2160     for (auto *PrepareFn : PrepareFns) {
2161       replaceAllPrepares(PrepareFn, CG, C);
2162     }
2163   }
2164 
2165   return PreservedAnalyses::none();
2166 }
2167 
2168 namespace {
2169 
2170 // We present a coroutine to LLVM as an ordinary function with suspension
2171 // points marked up with intrinsics. We let the optimizer party on the coroutine
2172 // as a single function for as long as possible. Shortly before the coroutine is
2173 // eligible to be inlined into its callers, we split up the coroutine into parts
2174 // corresponding to initial, resume and destroy invocations of the coroutine,
2175 // add them to the current SCC and restart the IPO pipeline to optimize the
2176 // coroutine subfunctions we extracted before proceeding to the caller of the
2177 // coroutine.
2178 struct CoroSplitLegacy : public CallGraphSCCPass {
2179   static char ID; // Pass identification, replacement for typeid
2180 
CoroSplitLegacy__anon518a2de20511::CoroSplitLegacy2181   CoroSplitLegacy(bool ReuseFrameSlot = false)
2182       : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) {
2183     initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
2184   }
2185 
2186   bool Run = false;
2187   bool ReuseFrameSlot;
2188 
2189   // A coroutine is identified by the presence of coro.begin intrinsic, if
2190   // we don't have any, this pass has nothing to do.
doInitialization__anon518a2de20511::CoroSplitLegacy2191   bool doInitialization(CallGraph &CG) override {
2192     Run = declaresCoroSplitIntrinsics(CG.getModule());
2193     return CallGraphSCCPass::doInitialization(CG);
2194   }
2195 
runOnSCC__anon518a2de20511::CoroSplitLegacy2196   bool runOnSCC(CallGraphSCC &SCC) override {
2197     if (!Run)
2198       return false;
2199 
2200     // Check for uses of llvm.coro.prepare.retcon.
2201     SmallVector<Function *, 2> PrepareFns;
2202     auto &M = SCC.getCallGraph().getModule();
2203     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2204     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2205 
2206     // Find coroutines for processing.
2207     SmallVector<Function *, 4> Coroutines;
2208     for (CallGraphNode *CGN : SCC)
2209       if (auto *F = CGN->getFunction())
2210         if (F->hasFnAttribute(CORO_PRESPLIT_ATTR))
2211           Coroutines.push_back(F);
2212 
2213     if (Coroutines.empty() && PrepareFns.empty())
2214       return false;
2215 
2216     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2217 
2218     if (Coroutines.empty()) {
2219       bool Changed = false;
2220       for (auto *PrepareFn : PrepareFns)
2221         Changed |= replaceAllPrepares(PrepareFn, CG);
2222       return Changed;
2223     }
2224 
2225     createDevirtTriggerFunc(CG, SCC);
2226 
2227     // Split all the coroutines.
2228     for (Function *F : Coroutines) {
2229       Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR);
2230       StringRef Value = Attr.getValueAsString();
2231       LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()
2232                         << "' state: " << Value << "\n");
2233       // Async lowering marks coroutines to trigger a restart of the pipeline
2234       // after it has split them.
2235       if (Value == ASYNC_RESTART_AFTER_SPLIT) {
2236         F->removeFnAttr(CORO_PRESPLIT_ATTR);
2237         continue;
2238       }
2239       if (Value == UNPREPARED_FOR_SPLIT) {
2240         prepareForSplit(*F, CG);
2241         continue;
2242       }
2243       F->removeFnAttr(CORO_PRESPLIT_ATTR);
2244 
2245       SmallVector<Function *, 4> Clones;
2246       const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot);
2247       updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
2248       if (Shape.ABI == coro::ABI::Async) {
2249         // Restart SCC passes.
2250         // Mark function for CoroElide pass. It will devirtualize causing a
2251         // restart of the SCC pipeline.
2252         prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
2253       }
2254     }
2255 
2256     for (auto *PrepareFn : PrepareFns)
2257       replaceAllPrepares(PrepareFn, CG);
2258 
2259     return true;
2260   }
2261 
getAnalysisUsage__anon518a2de20511::CoroSplitLegacy2262   void getAnalysisUsage(AnalysisUsage &AU) const override {
2263     CallGraphSCCPass::getAnalysisUsage(AU);
2264   }
2265 
getPassName__anon518a2de20511::CoroSplitLegacy2266   StringRef getPassName() const override { return "Coroutine Splitting"; }
2267 };
2268 
2269 } // end anonymous namespace
2270 
2271 char CoroSplitLegacy::ID = 0;
2272 
2273 INITIALIZE_PASS_BEGIN(
2274     CoroSplitLegacy, "coro-split",
2275     "Split coroutine into a set of functions driving its state machine", false,
2276     false)
INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)2277 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
2278 INITIALIZE_PASS_END(
2279     CoroSplitLegacy, "coro-split",
2280     "Split coroutine into a set of functions driving its state machine", false,
2281     false)
2282 
2283 Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) {
2284   return new CoroSplitLegacy(ReuseFrameSlot);
2285 }
2286