1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //
16 // TODO: pack values tightly using liveness info.
17 //===----------------------------------------------------------------------===//
18 
19 #include "CoroInternal.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/SmallString.h"
22 #include "llvm/Analysis/PtrUseVisitor.h"
23 #include "llvm/Analysis/StackLifetime.h"
24 #include "llvm/Config/llvm-config.h"
25 #include "llvm/IR/CFG.h"
26 #include "llvm/IR/DIBuilder.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstIterator.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/OptimizedStructLayout.h"
34 #include "llvm/Support/circular_raw_ostream.h"
35 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
38 #include <algorithm>
39 
40 using namespace llvm;
41 
42 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
43 // "coro-frame", which results in leaner debug spew.
44 #define DEBUG_TYPE "coro-suspend-crossing"
45 
46 static cl::opt<bool> EnableReuseStorageInFrame(
47     "reuse-storage-in-coroutine-frame", cl::Hidden,
48     cl::desc(
49         "Enable the optimization which would reuse the storage in the coroutine \
50          frame for allocas whose liferanges are not overlapped, for testing purposes"),
51     llvm::cl::init(false));
52 
53 enum { SmallVectorThreshold = 32 };
54 
55 // Provides two way mapping between the blocks and numbers.
56 namespace {
57 class BlockToIndexMapping {
58   SmallVector<BasicBlock *, SmallVectorThreshold> V;
59 
60 public:
61   size_t size() const { return V.size(); }
62 
63   BlockToIndexMapping(Function &F) {
64     for (BasicBlock &BB : F)
65       V.push_back(&BB);
66     llvm::sort(V);
67   }
68 
69   size_t blockToIndex(BasicBlock *BB) const {
70     auto *I = llvm::lower_bound(V, BB);
71     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
72     return I - V.begin();
73   }
74 
75   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
76 };
77 } // end anonymous namespace
78 
79 // The SuspendCrossingInfo maintains data that allows to answer a question
80 // whether given two BasicBlocks A and B there is a path from A to B that
81 // passes through a suspend point.
82 //
83 // For every basic block 'i' it maintains a BlockData that consists of:
84 //   Consumes:  a bit vector which contains a set of indices of blocks that can
85 //              reach block 'i'
86 //   Kills: a bit vector which contains a set of indices of blocks that can
87 //          reach block 'i', but one of the path will cross a suspend point
88 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
89 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
90 //
91 namespace {
92 struct SuspendCrossingInfo {
93   BlockToIndexMapping Mapping;
94 
95   struct BlockData {
96     BitVector Consumes;
97     BitVector Kills;
98     bool Suspend = false;
99     bool End = false;
100   };
101   SmallVector<BlockData, SmallVectorThreshold> Block;
102 
103   iterator_range<succ_iterator> successors(BlockData const &BD) const {
104     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
105     return llvm::successors(BB);
106   }
107 
108   BlockData &getBlockData(BasicBlock *BB) {
109     return Block[Mapping.blockToIndex(BB)];
110   }
111 
112   void dump() const;
113   void dump(StringRef Label, BitVector const &BV) const;
114 
115   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
116 
117   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
118     size_t const DefIndex = Mapping.blockToIndex(DefBB);
119     size_t const UseIndex = Mapping.blockToIndex(UseBB);
120 
121     bool const Result = Block[UseIndex].Kills[DefIndex];
122     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
123                       << " answer is " << Result << "\n");
124     return Result;
125   }
126 
127   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
128     auto *I = cast<Instruction>(U);
129 
130     // We rewrote PHINodes, so that only the ones with exactly one incoming
131     // value need to be analyzed.
132     if (auto *PN = dyn_cast<PHINode>(I))
133       if (PN->getNumIncomingValues() > 1)
134         return false;
135 
136     BasicBlock *UseBB = I->getParent();
137 
138     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
139     // llvm.coro.suspend.async as if they were uses in the suspend's single
140     // predecessor: the uses conceptually occur before the suspend.
141     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
142       UseBB = UseBB->getSinglePredecessor();
143       assert(UseBB && "should have split coro.suspend into its own block");
144     }
145 
146     return hasPathCrossingSuspendPoint(DefBB, UseBB);
147   }
148 
149   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
150     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
151   }
152 
153   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
154     auto *DefBB = I.getParent();
155 
156     // As a special case, treat values produced by an llvm.coro.suspend.*
157     // as if they were defined in the single successor: the uses
158     // conceptually occur after the suspend.
159     if (isa<AnyCoroSuspendInst>(I)) {
160       DefBB = DefBB->getSingleSuccessor();
161       assert(DefBB && "should have split coro.suspend into its own block");
162     }
163 
164     return isDefinitionAcrossSuspend(DefBB, U);
165   }
166 };
167 } // end anonymous namespace
168 
169 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
170 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
171                                                 BitVector const &BV) const {
172   dbgs() << Label << ":";
173   for (size_t I = 0, N = BV.size(); I < N; ++I)
174     if (BV[I])
175       dbgs() << " " << Mapping.indexToBlock(I)->getName();
176   dbgs() << "\n";
177 }
178 
179 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
180   for (size_t I = 0, N = Block.size(); I < N; ++I) {
181     BasicBlock *const B = Mapping.indexToBlock(I);
182     dbgs() << B->getName() << ":\n";
183     dump("   Consumes", Block[I].Consumes);
184     dump("      Kills", Block[I].Kills);
185   }
186   dbgs() << "\n";
187 }
188 #endif
189 
190 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
191     : Mapping(F) {
192   const size_t N = Mapping.size();
193   Block.resize(N);
194 
195   // Initialize every block so that it consumes itself
196   for (size_t I = 0; I < N; ++I) {
197     auto &B = Block[I];
198     B.Consumes.resize(N);
199     B.Kills.resize(N);
200     B.Consumes.set(I);
201   }
202 
203   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
204   // the code beyond coro.end is reachable during initial invocation of the
205   // coroutine.
206   for (auto *CE : Shape.CoroEnds)
207     getBlockData(CE->getParent()).End = true;
208 
209   // Mark all suspend blocks and indicate that they kill everything they
210   // consume. Note, that crossing coro.save also requires a spill, as any code
211   // between coro.save and coro.suspend may resume the coroutine and all of the
212   // state needs to be saved by that time.
213   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
214     BasicBlock *SuspendBlock = BarrierInst->getParent();
215     auto &B = getBlockData(SuspendBlock);
216     B.Suspend = true;
217     B.Kills |= B.Consumes;
218   };
219   for (auto *CSI : Shape.CoroSuspends) {
220     markSuspendBlock(CSI);
221     if (auto *Save = CSI->getCoroSave())
222       markSuspendBlock(Save);
223   }
224 
225   // Iterate propagating consumes and kills until they stop changing.
226   int Iteration = 0;
227   (void)Iteration;
228 
229   bool Changed;
230   do {
231     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
232     LLVM_DEBUG(dbgs() << "==============\n");
233 
234     Changed = false;
235     for (size_t I = 0; I < N; ++I) {
236       auto &B = Block[I];
237       for (BasicBlock *SI : successors(B)) {
238 
239         auto SuccNo = Mapping.blockToIndex(SI);
240 
241         // Saved Consumes and Kills bitsets so that it is easy to see
242         // if anything changed after propagation.
243         auto &S = Block[SuccNo];
244         auto SavedConsumes = S.Consumes;
245         auto SavedKills = S.Kills;
246 
247         // Propagate Kills and Consumes from block B into its successor S.
248         S.Consumes |= B.Consumes;
249         S.Kills |= B.Kills;
250 
251         // If block B is a suspend block, it should propagate kills into the
252         // its successor for every block B consumes.
253         if (B.Suspend) {
254           S.Kills |= B.Consumes;
255         }
256         if (S.Suspend) {
257           // If block S is a suspend block, it should kill all of the blocks it
258           // consumes.
259           S.Kills |= S.Consumes;
260         } else if (S.End) {
261           // If block S is an end block, it should not propagate kills as the
262           // blocks following coro.end() are reached during initial invocation
263           // of the coroutine while all the data are still available on the
264           // stack or in the registers.
265           S.Kills.reset();
266         } else {
267           // This is reached when S block it not Suspend nor coro.end and it
268           // need to make sure that it is not in the kill set.
269           S.Kills.reset(SuccNo);
270         }
271 
272         // See if anything changed.
273         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
274 
275         if (S.Kills != SavedKills) {
276           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
277                             << "\n");
278           LLVM_DEBUG(dump("S.Kills", S.Kills));
279           LLVM_DEBUG(dump("SavedKills", SavedKills));
280         }
281         if (S.Consumes != SavedConsumes) {
282           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
283           LLVM_DEBUG(dump("S.Consume", S.Consumes));
284           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
285         }
286       }
287     }
288   } while (Changed);
289   LLVM_DEBUG(dump());
290 }
291 
292 #undef DEBUG_TYPE // "coro-suspend-crossing"
293 #define DEBUG_TYPE "coro-frame"
294 
295 namespace {
296 class FrameTypeBuilder;
297 // Mapping from the to-be-spilled value to all the users that need reload.
298 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
299 struct AllocaInfo {
300   AllocaInst *Alloca;
301   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
302   bool MayWriteBeforeCoroBegin;
303   AllocaInfo(AllocaInst *Alloca,
304              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
305              bool MayWriteBeforeCoroBegin)
306       : Alloca(Alloca), Aliases(std::move(Aliases)),
307         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
308 };
309 struct FrameDataInfo {
310   // All the values (that are not allocas) that needs to be spilled to the
311   // frame.
312   SpillInfo Spills;
313   // Allocas contains all values defined as allocas that need to live in the
314   // frame.
315   SmallVector<AllocaInfo, 8> Allocas;
316 
317   SmallVector<Value *, 8> getAllDefs() const {
318     SmallVector<Value *, 8> Defs;
319     for (const auto &P : Spills)
320       Defs.push_back(P.first);
321     for (const auto &A : Allocas)
322       Defs.push_back(A.Alloca);
323     return Defs;
324   }
325 
326   uint32_t getFieldIndex(Value *V) const {
327     auto Itr = FieldIndexMap.find(V);
328     assert(Itr != FieldIndexMap.end() &&
329            "Value does not have a frame field index");
330     return Itr->second;
331   }
332 
333   void setFieldIndex(Value *V, uint32_t Index) {
334     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
335            "Cannot set the index for the same field twice.");
336     FieldIndexMap[V] = Index;
337   }
338 
339   // Remap the index of every field in the frame, using the final layout index.
340   void updateLayoutIndex(FrameTypeBuilder &B);
341 
342 private:
343   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
344   // twice by mistake.
345   bool LayoutIndexUpdateStarted = false;
346   // Map from values to their slot indexes on the frame. They will be first set
347   // with their original insertion field index. After the frame is built, their
348   // indexes will be updated into the final layout index.
349   DenseMap<Value *, uint32_t> FieldIndexMap;
350 };
351 } // namespace
352 
353 #ifndef NDEBUG
354 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
355   dbgs() << "------------- " << Title << "--------------\n";
356   for (const auto &E : Spills) {
357     E.first->dump();
358     dbgs() << "   user: ";
359     for (auto *I : E.second)
360       I->dump();
361   }
362 }
363 
364 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
365   dbgs() << "------------- Allocas --------------\n";
366   for (const auto &A : Allocas) {
367     A.Alloca->dump();
368   }
369 }
370 #endif
371 
372 namespace {
373 using FieldIDType = size_t;
374 // We cannot rely solely on natural alignment of a type when building a
375 // coroutine frame and if the alignment specified on the Alloca instruction
376 // differs from the natural alignment of the alloca type we will need to insert
377 // padding.
378 class FrameTypeBuilder {
379 private:
380   struct Field {
381     uint64_t Size;
382     uint64_t Offset;
383     Type *Ty;
384     FieldIDType LayoutFieldIndex;
385     Align Alignment;
386     Align TyAlignment;
387   };
388 
389   const DataLayout &DL;
390   LLVMContext &Context;
391   uint64_t StructSize = 0;
392   Align StructAlign;
393   bool IsFinished = false;
394 
395   SmallVector<Field, 8> Fields;
396   DenseMap<Value*, unsigned> FieldIndexByKey;
397 
398 public:
399   FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL)
400       : DL(DL), Context(Context) {}
401 
402   /// Add a field to this structure for the storage of an `alloca`
403   /// instruction.
404   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
405                                                bool IsHeader = false) {
406     Type *Ty = AI->getAllocatedType();
407 
408     // Make an array type if this is a static array allocation.
409     if (AI->isArrayAllocation()) {
410       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
411         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
412       else
413         report_fatal_error("Coroutines cannot handle non static allocas yet");
414     }
415 
416     return addField(Ty, AI->getAlign(), IsHeader);
417   }
418 
419   /// We want to put the allocas whose lifetime-ranges are not overlapped
420   /// into one slot of coroutine frame.
421   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
422   ///
423   ///     cppcoro::task<void> alternative_paths(bool cond) {
424   ///         if (cond) {
425   ///             big_structure a;
426   ///             process(a);
427   ///             co_await something();
428   ///         } else {
429   ///             big_structure b;
430   ///             process2(b);
431   ///             co_await something();
432   ///         }
433   ///     }
434   ///
435   /// We want to put variable a and variable b in the same slot to
436   /// reduce the size of coroutine frame.
437   ///
438   /// This function use StackLifetime algorithm to partition the AllocaInsts in
439   /// Spills to non-overlapped sets in order to put Alloca in the same
440   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
441   /// field for the allocas in the same non-overlapped set by using the largest
442   /// type as the field type.
443   ///
444   /// Side Effects: Because We sort the allocas, the order of allocas in the
445   /// frame may be different with the order in the source code.
446   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
447                           coro::Shape &Shape);
448 
449   /// Add a field to this structure.
450   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
451                                       bool IsHeader = false) {
452     assert(!IsFinished && "adding fields to a finished builder");
453     assert(Ty && "must provide a type for a field");
454 
455     // The field size is always the alloc size of the type.
456     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
457 
458     // The field alignment might not be the type alignment, but we need
459     // to remember the type alignment anyway to build the type.
460     Align TyAlignment = DL.getABITypeAlign(Ty);
461     if (!FieldAlignment) FieldAlignment = TyAlignment;
462 
463     // Lay out header fields immediately.
464     uint64_t Offset;
465     if (IsHeader) {
466       Offset = alignTo(StructSize, FieldAlignment);
467       StructSize = Offset + FieldSize;
468 
469     // Everything else has a flexible offset.
470     } else {
471       Offset = OptimizedStructLayoutField::FlexibleOffset;
472     }
473 
474     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
475     return Fields.size() - 1;
476   }
477 
478   /// Finish the layout and set the body on the given type.
479   void finish(StructType *Ty);
480 
481   uint64_t getStructSize() const {
482     assert(IsFinished && "not yet finished!");
483     return StructSize;
484   }
485 
486   Align getStructAlign() const {
487     assert(IsFinished && "not yet finished!");
488     return StructAlign;
489   }
490 
491   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
492     assert(IsFinished && "not yet finished!");
493     return Fields[Id].LayoutFieldIndex;
494   }
495 };
496 } // namespace
497 
498 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
499   auto Updater = [&](Value *I) {
500     setFieldIndex(I, B.getLayoutFieldIndex(getFieldIndex(I)));
501   };
502   LayoutIndexUpdateStarted = true;
503   for (auto &S : Spills)
504     Updater(S.first);
505   for (const auto &A : Allocas)
506     Updater(A.Alloca);
507   LayoutIndexUpdateStarted = false;
508 }
509 
510 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
511                                           FrameDataInfo &FrameData,
512                                           coro::Shape &Shape) {
513   DenseMap<AllocaInst *, unsigned int> AllocaIndex;
514   using AllocaSetType = SmallVector<AllocaInst *, 4>;
515   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
516 
517   // We need to add field for allocas at the end of this function. However, this
518   // function has multiple exits, so we use this helper to avoid redundant code.
519   struct RTTIHelper {
520     std::function<void()> func;
521     RTTIHelper(std::function<void()> &&func) : func(func) {}
522     ~RTTIHelper() { func(); }
523   } Helper([&]() {
524     for (auto AllocaList : NonOverlapedAllocas) {
525       auto *LargestAI = *AllocaList.begin();
526       FieldIDType Id = addFieldForAlloca(LargestAI);
527       for (auto *Alloca : AllocaList)
528         FrameData.setFieldIndex(Alloca, Id);
529     }
530   });
531 
532   if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) {
533     for (const auto &A : FrameData.Allocas) {
534       AllocaInst *Alloca = A.Alloca;
535       AllocaIndex[Alloca] = NonOverlapedAllocas.size();
536       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
537     }
538     return;
539   }
540 
541   // Because there are pathes from the lifetime.start to coro.end
542   // for each alloca, the liferanges for every alloca is overlaped
543   // in the blocks who contain coro.end and the successor blocks.
544   // So we choose to skip there blocks when we calculates the liferange
545   // for each alloca. It should be reasonable since there shouldn't be uses
546   // in these blocks and the coroutine frame shouldn't be used outside the
547   // coroutine body.
548   //
549   // Note that the user of coro.suspend may not be SwitchInst. However, this
550   // case seems too complex to handle. And it is harmless to skip these
551   // patterns since it just prevend putting the allocas to live in the same
552   // slot.
553   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
554   for (auto CoroSuspendInst : Shape.CoroSuspends) {
555     for (auto U : CoroSuspendInst->users()) {
556       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
557         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
558         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
559         SWI->setDefaultDest(SWI->getSuccessor(1));
560       }
561     }
562   }
563 
564   auto ExtractAllocas = [&]() {
565     AllocaSetType Allocas;
566     Allocas.reserve(FrameData.Allocas.size());
567     for (const auto &A : FrameData.Allocas)
568       Allocas.push_back(A.Alloca);
569     return Allocas;
570   };
571   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
572                                       StackLifetime::LivenessType::May);
573   StackLifetimeAnalyzer.run();
574   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
575     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
576         StackLifetimeAnalyzer.getLiveRange(AI2));
577   };
578   auto GetAllocaSize = [&](const AllocaInfo &A) {
579     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
580     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
581     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
582     return RetSize->getFixedSize();
583   };
584   // Put larger allocas in the front. So the larger allocas have higher
585   // priority to merge, which can save more space potentially. Also each
586   // AllocaSet would be ordered. So we can get the largest Alloca in one
587   // AllocaSet easily.
588   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
589     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
590   });
591   for (const auto &A : FrameData.Allocas) {
592     AllocaInst *Alloca = A.Alloca;
593     bool Merged = false;
594     // Try to find if the Alloca is not inferenced with any existing
595     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
596     // NonOverlappedAllocaSet.
597     for (auto &AllocaSet : NonOverlapedAllocas) {
598       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
599       bool NoInference = none_of(AllocaSet, [&](auto Iter) {
600         return IsAllocaInferenre(Alloca, Iter);
601       });
602       // If the alignment of A is multiple of the alignment of B, the address
603       // of A should satisfy the requirement for aligning for B.
604       //
605       // There may be other more fine-grained strategies to handle the alignment
606       // infomation during the merging process. But it seems hard to handle
607       // these strategies and benefit little.
608       bool Alignable = [&]() -> bool {
609         auto *LargestAlloca = *AllocaSet.begin();
610         return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
611                0;
612       }();
613       bool CouldMerge = NoInference && Alignable;
614       if (!CouldMerge)
615         continue;
616       AllocaIndex[Alloca] = AllocaIndex[*AllocaSet.begin()];
617       AllocaSet.push_back(Alloca);
618       Merged = true;
619       break;
620     }
621     if (!Merged) {
622       AllocaIndex[Alloca] = NonOverlapedAllocas.size();
623       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
624     }
625   }
626   // Recover the default target destination for each Switch statement
627   // reserved.
628   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
629     SwitchInst *SWI = SwitchAndDefaultDest.first;
630     BasicBlock *DestBB = SwitchAndDefaultDest.second;
631     SWI->setDefaultDest(DestBB);
632   }
633   // This Debug Info could tell us which allocas are merged into one slot.
634   LLVM_DEBUG(for (auto &AllocaSet
635                   : NonOverlapedAllocas) {
636     if (AllocaSet.size() > 1) {
637       dbgs() << "In Function:" << F.getName() << "\n";
638       dbgs() << "Find Union Set "
639              << "\n";
640       dbgs() << "\tAllocas are \n";
641       for (auto Alloca : AllocaSet)
642         dbgs() << "\t\t" << *Alloca << "\n";
643     }
644   });
645 }
646 
647 void FrameTypeBuilder::finish(StructType *Ty) {
648   assert(!IsFinished && "already finished!");
649 
650   // Prepare the optimal-layout field array.
651   // The Id in the layout field is a pointer to our Field for it.
652   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
653   LayoutFields.reserve(Fields.size());
654   for (auto &Field : Fields) {
655     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
656                               Field.Offset);
657   }
658 
659   // Perform layout.
660   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
661   StructSize = SizeAndAlign.first;
662   StructAlign = SizeAndAlign.second;
663 
664   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
665     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
666   };
667 
668   // We need to produce a packed struct type if there's a field whose
669   // assigned offset isn't a multiple of its natural type alignment.
670   bool Packed = [&] {
671     for (auto &LayoutField : LayoutFields) {
672       auto &F = getField(LayoutField);
673       if (!isAligned(F.TyAlignment, LayoutField.Offset))
674         return true;
675     }
676     return false;
677   }();
678 
679   // Build the struct body.
680   SmallVector<Type*, 16> FieldTypes;
681   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
682   uint64_t LastOffset = 0;
683   for (auto &LayoutField : LayoutFields) {
684     auto &F = getField(LayoutField);
685 
686     auto Offset = LayoutField.Offset;
687 
688     // Add a padding field if there's a padding gap and we're either
689     // building a packed struct or the padding gap is more than we'd
690     // get from aligning to the field type's natural alignment.
691     assert(Offset >= LastOffset);
692     if (Offset != LastOffset) {
693       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
694         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
695                                             Offset - LastOffset));
696     }
697 
698     F.Offset = Offset;
699     F.LayoutFieldIndex = FieldTypes.size();
700 
701     FieldTypes.push_back(F.Ty);
702     LastOffset = Offset + F.Size;
703   }
704 
705   Ty->setBody(FieldTypes, Packed);
706 
707 #ifndef NDEBUG
708   // Check that the IR layout matches the offsets we expect.
709   auto Layout = DL.getStructLayout(Ty);
710   for (auto &F : Fields) {
711     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
712     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
713   }
714 #endif
715 
716   IsFinished = true;
717 }
718 
719 // Build a struct that will keep state for an active coroutine.
720 //   struct f.frame {
721 //     ResumeFnTy ResumeFnAddr;
722 //     ResumeFnTy DestroyFnAddr;
723 //     int ResumeIndex;
724 //     ... promise (if present) ...
725 //     ... spills ...
726 //   };
727 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
728                                   FrameDataInfo &FrameData) {
729   LLVMContext &C = F.getContext();
730   const DataLayout &DL = F.getParent()->getDataLayout();
731   StructType *FrameTy = [&] {
732     SmallString<32> Name(F.getName());
733     Name.append(".Frame");
734     return StructType::create(C, Name);
735   }();
736 
737   FrameTypeBuilder B(C, DL);
738 
739   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
740   Optional<FieldIDType> SwitchIndexFieldId;
741 
742   if (Shape.ABI == coro::ABI::Switch) {
743     auto *FramePtrTy = FrameTy->getPointerTo();
744     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
745                                    /*IsVarArg=*/false);
746     auto *FnPtrTy = FnTy->getPointerTo();
747 
748     // Add header fields for the resume and destroy functions.
749     // We can rely on these being perfectly packed.
750     (void)B.addField(FnPtrTy, None, /*header*/ true);
751     (void)B.addField(FnPtrTy, None, /*header*/ true);
752 
753     // PromiseAlloca field needs to be explicitly added here because it's
754     // a header field with a fixed offset based on its alignment. Hence it
755     // needs special handling and cannot be added to FrameData.Allocas.
756     if (PromiseAlloca)
757       FrameData.setFieldIndex(
758           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
759 
760     // Add a field to store the suspend index.  This doesn't need to
761     // be in the header.
762     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
763     Type *IndexType = Type::getIntNTy(C, IndexBits);
764 
765     SwitchIndexFieldId = B.addField(IndexType, None);
766   } else {
767     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
768   }
769 
770   // Because multiple allocas may own the same field slot,
771   // we add allocas to field here.
772   B.addFieldForAllocas(F, FrameData, Shape);
773   // Add PromiseAlloca to Allocas list so that
774   // 1. updateLayoutIndex could update its index after
775   // `performOptimizedStructLayout`
776   // 2. it is processed in insertSpills.
777   if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
778     // We assume that the promise alloca won't be modified before
779     // CoroBegin and no alias will be create before CoroBegin.
780     FrameData.Allocas.emplace_back(
781         PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
782   // Create an entry for every spilled value.
783   for (auto &S : FrameData.Spills) {
784     Type *FieldType = S.first->getType();
785     // For byval arguments, we need to store the pointed value in the frame,
786     // instead of the pointer itself.
787     if (const Argument *A = dyn_cast<Argument>(S.first))
788       if (A->hasByValAttr())
789         FieldType = FieldType->getPointerElementType();
790     FieldIDType Id = B.addField(FieldType, None);
791     FrameData.setFieldIndex(S.first, Id);
792   }
793 
794   B.finish(FrameTy);
795   FrameData.updateLayoutIndex(B);
796   Shape.FrameAlign = B.getStructAlign();
797   Shape.FrameSize = B.getStructSize();
798 
799   switch (Shape.ABI) {
800   case coro::ABI::Switch:
801     // In the switch ABI, remember the switch-index field.
802     Shape.SwitchLowering.IndexField =
803         B.getLayoutFieldIndex(*SwitchIndexFieldId);
804 
805     // Also round the frame size up to a multiple of its alignment, as is
806     // generally expected in C/C++.
807     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
808     break;
809 
810   // In the retcon ABI, remember whether the frame is inline in the storage.
811   case coro::ABI::Retcon:
812   case coro::ABI::RetconOnce: {
813     auto Id = Shape.getRetconCoroId();
814     Shape.RetconLowering.IsFrameInlineInStorage
815       = (B.getStructSize() <= Id->getStorageSize() &&
816          B.getStructAlign() <= Id->getStorageAlignment());
817     break;
818   }
819   case coro::ABI::Async: {
820     Shape.AsyncLowering.FrameOffset =
821         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
822     // Also make the final context size a multiple of the context alignment to
823     // make allocation easier for allocators.
824     Shape.AsyncLowering.ContextSize =
825         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
826                 Shape.AsyncLowering.getContextAlignment());
827     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
828       report_fatal_error(
829           "The alignment requirment of frame variables cannot be higher than "
830           "the alignment of the async function context");
831     }
832     break;
833   }
834   }
835 
836   return FrameTy;
837 }
838 
839 // We use a pointer use visitor to track how an alloca is being used.
840 // The goal is to be able to answer the following three questions:
841 // 1. Should this alloca be allocated on the frame instead.
842 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
843 // require copying the data from alloca to the frame after CoroBegin.
844 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
845 // after CoroBegin. In that case, we will need to recreate the alias after
846 // CoroBegin based off the frame. To answer question 1, we track two things:
847 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
848 //   the alloca. In the end, we check if there exists any two basic blocks that
849 //   cross suspension points. If so, this alloca must be put on the frame. b.
850 //   Whether the alloca or any alias of the alloca is escaped at some point,
851 //   either by storing the address somewhere, or the address is used in a
852 //   function call that might capture. If it's ever escaped, this alloca must be
853 //   put on the frame conservatively.
854 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
855 // Whenever a potential write happens, either through a store instruction, a
856 // function call or any of the memory intrinsics, we check whether this
857 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
858 // of all aliases created for the alloca prior to CoroBegin but used after
859 // CoroBegin. llvm::Optional is used to be able to represent the case when the
860 // offset is unknown (e.g. when you have a PHINode that takes in different
861 // offset values). We cannot handle unknown offsets and will assert. This is the
862 // potential issue left out. An ideal solution would likely require a
863 // significant redesign.
864 namespace {
865 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
866   using Base = PtrUseVisitor<AllocaUseVisitor>;
867   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
868                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker)
869       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {}
870 
871   void visit(Instruction &I) {
872     UserBBs.insert(I.getParent());
873     Base::visit(I);
874     // If the pointer is escaped prior to CoroBegin, we have to assume it would
875     // be written into before CoroBegin as well.
876     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
877       MayWriteBeforeCoroBegin = true;
878     }
879   }
880   // We need to provide this overload as PtrUseVisitor uses a pointer based
881   // visiting function.
882   void visit(Instruction *I) { return visit(*I); }
883 
884   void visitPHINode(PHINode &I) {
885     enqueueUsers(I);
886     handleAlias(I);
887   }
888 
889   void visitSelectInst(SelectInst &I) {
890     enqueueUsers(I);
891     handleAlias(I);
892   }
893 
894   void visitStoreInst(StoreInst &SI) {
895     // Regardless whether the alias of the alloca is the value operand or the
896     // pointer operand, we need to assume the alloca is been written.
897     handleMayWrite(SI);
898 
899     if (SI.getValueOperand() != U->get())
900       return;
901 
902     // We are storing the pointer into a memory location, potentially escaping.
903     // As an optimization, we try to detect simple cases where it doesn't
904     // actually escape, for example:
905     //   %ptr = alloca ..
906     //   %addr = alloca ..
907     //   store %ptr, %addr
908     //   %x = load %addr
909     //   ..
910     // If %addr is only used by loading from it, we could simply treat %x as
911     // another alias of %ptr, and not considering %ptr being escaped.
912     auto IsSimpleStoreThenLoad = [&]() {
913       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
914       // If the memory location we are storing to is not an alloca, it
915       // could be an alias of some other memory locations, which is difficult
916       // to analyze.
917       if (!AI)
918         return false;
919       // StoreAliases contains aliases of the memory location stored into.
920       SmallVector<Instruction *, 4> StoreAliases = {AI};
921       while (!StoreAliases.empty()) {
922         Instruction *I = StoreAliases.pop_back_val();
923         for (User *U : I->users()) {
924           // If we are loading from the memory location, we are creating an
925           // alias of the original pointer.
926           if (auto *LI = dyn_cast<LoadInst>(U)) {
927             enqueueUsers(*LI);
928             handleAlias(*LI);
929             continue;
930           }
931           // If we are overriding the memory location, the pointer certainly
932           // won't escape.
933           if (auto *S = dyn_cast<StoreInst>(U))
934             if (S->getPointerOperand() == I)
935               continue;
936           if (auto *II = dyn_cast<IntrinsicInst>(U))
937             if (II->isLifetimeStartOrEnd())
938               continue;
939           // BitCastInst creats aliases of the memory location being stored
940           // into.
941           if (auto *BI = dyn_cast<BitCastInst>(U)) {
942             StoreAliases.push_back(BI);
943             continue;
944           }
945           return false;
946         }
947       }
948 
949       return true;
950     };
951 
952     if (!IsSimpleStoreThenLoad())
953       PI.setEscaped(&SI);
954   }
955 
956   // All mem intrinsics modify the data.
957   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
958 
959   void visitBitCastInst(BitCastInst &BC) {
960     Base::visitBitCastInst(BC);
961     handleAlias(BC);
962   }
963 
964   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
965     Base::visitAddrSpaceCastInst(ASC);
966     handleAlias(ASC);
967   }
968 
969   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
970     // The base visitor will adjust Offset accordingly.
971     Base::visitGetElementPtrInst(GEPI);
972     handleAlias(GEPI);
973   }
974 
975   void visitCallBase(CallBase &CB) {
976     for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op)
977       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
978         PI.setEscaped(&CB);
979     handleMayWrite(CB);
980   }
981 
982   bool getShouldLiveOnFrame() const {
983     if (!ShouldLiveOnFrame)
984       ShouldLiveOnFrame = computeShouldLiveOnFrame();
985     return ShouldLiveOnFrame.getValue();
986   }
987 
988   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
989 
990   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
991     assert(getShouldLiveOnFrame() && "This method should only be called if the "
992                                      "alloca needs to live on the frame.");
993     for (const auto &P : AliasOffetMap)
994       if (!P.second)
995         report_fatal_error("Unable to handle an alias with unknown offset "
996                            "created before CoroBegin.");
997     return AliasOffetMap;
998   }
999 
1000 private:
1001   const DominatorTree &DT;
1002   const CoroBeginInst &CoroBegin;
1003   const SuspendCrossingInfo &Checker;
1004   // All alias to the original AllocaInst, created before CoroBegin and used
1005   // after CoroBegin. Each entry contains the instruction and the offset in the
1006   // original Alloca. They need to be recreated after CoroBegin off the frame.
1007   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1008   SmallPtrSet<BasicBlock *, 2> UserBBs{};
1009   bool MayWriteBeforeCoroBegin{false};
1010 
1011   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1012 
1013   bool computeShouldLiveOnFrame() const {
1014     if (PI.isEscaped())
1015       return true;
1016 
1017     for (auto *BB1 : UserBBs)
1018       for (auto *BB2 : UserBBs)
1019         if (Checker.hasPathCrossingSuspendPoint(BB1, BB2))
1020           return true;
1021 
1022     return false;
1023   }
1024 
1025   void handleMayWrite(const Instruction &I) {
1026     if (!DT.dominates(&CoroBegin, &I))
1027       MayWriteBeforeCoroBegin = true;
1028   }
1029 
1030   bool usedAfterCoroBegin(Instruction &I) {
1031     for (auto &U : I.uses())
1032       if (DT.dominates(&CoroBegin, U))
1033         return true;
1034     return false;
1035   }
1036 
1037   void handleAlias(Instruction &I) {
1038     // We track all aliases created prior to CoroBegin but used after.
1039     // These aliases may need to be recreated after CoroBegin if the alloca
1040     // need to live on the frame.
1041     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1042       return;
1043 
1044     if (!IsOffsetKnown) {
1045       AliasOffetMap[&I].reset();
1046     } else {
1047       auto Itr = AliasOffetMap.find(&I);
1048       if (Itr == AliasOffetMap.end()) {
1049         AliasOffetMap[&I] = Offset;
1050       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1051         // If we have seen two different possible values for this alias, we set
1052         // it to empty.
1053         AliasOffetMap[&I].reset();
1054       }
1055     }
1056   }
1057 };
1058 } // namespace
1059 
1060 // We need to make room to insert a spill after initial PHIs, but before
1061 // catchswitch instruction. Placing it before violates the requirement that
1062 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1063 //
1064 // Split away catchswitch into a separate block and insert in its place:
1065 //
1066 //   cleanuppad <InsertPt> cleanupret.
1067 //
1068 // cleanupret instruction will act as an insert point for the spill.
1069 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1070   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1071   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1072   CurrentBlock->getTerminator()->eraseFromParent();
1073 
1074   auto *CleanupPad =
1075       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1076   auto *CleanupRet =
1077       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1078   return CleanupRet;
1079 }
1080 
1081 // Replace all alloca and SSA values that are accessed across suspend points
1082 // with GetElementPointer from coroutine frame + loads and stores. Create an
1083 // AllocaSpillBB that will become the new entry block for the resume parts of
1084 // the coroutine:
1085 //
1086 //    %hdl = coro.begin(...)
1087 //    whatever
1088 //
1089 // becomes:
1090 //
1091 //    %hdl = coro.begin(...)
1092 //    %FramePtr = bitcast i8* hdl to %f.frame*
1093 //    br label %AllocaSpillBB
1094 //
1095 //  AllocaSpillBB:
1096 //    ; geps corresponding to allocas that were moved to coroutine frame
1097 //    br label PostSpill
1098 //
1099 //  PostSpill:
1100 //    whatever
1101 //
1102 //
1103 static Instruction *insertSpills(const FrameDataInfo &FrameData,
1104                                  coro::Shape &Shape) {
1105   auto *CB = Shape.CoroBegin;
1106   LLVMContext &C = CB->getContext();
1107   IRBuilder<> Builder(CB->getNextNode());
1108   StructType *FrameTy = Shape.FrameTy;
1109   PointerType *FramePtrTy = FrameTy->getPointerTo();
1110   auto *FramePtr =
1111       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1112   DominatorTree DT(*CB->getFunction());
1113   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1114 
1115   // Create a GEP with the given index into the coroutine frame for the original
1116   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1117   // original type.
1118   auto GetFramePointer = [&](Value *Orig) -> Value * {
1119     FieldIDType Index = FrameData.getFieldIndex(Orig);
1120     SmallVector<Value *, 3> Indices = {
1121         ConstantInt::get(Type::getInt32Ty(C), 0),
1122         ConstantInt::get(Type::getInt32Ty(C), Index),
1123     };
1124 
1125     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1126       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1127         auto Count = CI->getValue().getZExtValue();
1128         if (Count > 1) {
1129           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1130         }
1131       } else {
1132         report_fatal_error("Coroutines cannot handle non static allocas yet");
1133       }
1134     }
1135 
1136     auto GEP = cast<GetElementPtrInst>(
1137         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1138     if (isa<AllocaInst>(Orig)) {
1139       // If the type of GEP is not equal to the type of AllocaInst, it implies
1140       // that the AllocaInst may be reused in the Frame slot of other
1141       // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1142       // the Frame storage.
1143       //
1144       // Note: If we change the strategy dealing with alignment, we need to refine
1145       // this casting.
1146       if (GEP->getResultElementType() != Orig->getType())
1147         return Builder.CreateBitCast(GEP, Orig->getType(),
1148                                      Orig->getName() + Twine(".cast"));
1149     }
1150     return GEP;
1151   };
1152 
1153   for (auto const &E : FrameData.Spills) {
1154     Value *Def = E.first;
1155     // Create a store instruction storing the value into the
1156     // coroutine frame.
1157     Instruction *InsertPt = nullptr;
1158     bool NeedToCopyArgPtrValue = false;
1159     if (auto *Arg = dyn_cast<Argument>(Def)) {
1160       // For arguments, we will place the store instruction right after
1161       // the coroutine frame pointer instruction, i.e. bitcast of
1162       // coro.begin from i8* to %f.frame*.
1163       InsertPt = FramePtr->getNextNode();
1164 
1165       // If we're spilling an Argument, make sure we clear 'nocapture'
1166       // from the coroutine function.
1167       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1168 
1169       if (Arg->hasByValAttr())
1170         NeedToCopyArgPtrValue = true;
1171 
1172     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1173       // Don't spill immediately after a suspend; splitting assumes
1174       // that the suspend will be followed by a branch.
1175       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1176     } else {
1177       auto *I = cast<Instruction>(Def);
1178       if (!DT.dominates(CB, I)) {
1179         // If it is not dominated by CoroBegin, then spill should be
1180         // inserted immediately after CoroFrame is computed.
1181         InsertPt = FramePtr->getNextNode();
1182       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1183         // If we are spilling the result of the invoke instruction, split
1184         // the normal edge and insert the spill in the new block.
1185         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1186         InsertPt = NewBB->getTerminator();
1187       } else if (isa<PHINode>(I)) {
1188         // Skip the PHINodes and EH pads instructions.
1189         BasicBlock *DefBlock = I->getParent();
1190         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1191           InsertPt = splitBeforeCatchSwitch(CSI);
1192         else
1193           InsertPt = &*DefBlock->getFirstInsertionPt();
1194       } else {
1195         assert(!I->isTerminator() && "unexpected terminator");
1196         // For all other values, the spill is placed immediately after
1197         // the definition.
1198         InsertPt = I->getNextNode();
1199       }
1200     }
1201 
1202     auto Index = FrameData.getFieldIndex(Def);
1203     Builder.SetInsertPoint(InsertPt);
1204     auto *G = Builder.CreateConstInBoundsGEP2_32(
1205         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1206     if (NeedToCopyArgPtrValue) {
1207       // For byval arguments, we need to store the pointed value in the frame,
1208       // instead of the pointer itself.
1209       auto *Value =
1210           Builder.CreateLoad(Def->getType()->getPointerElementType(), Def);
1211       Builder.CreateStore(Value, G);
1212     } else {
1213       Builder.CreateStore(Def, G);
1214     }
1215 
1216     BasicBlock *CurrentBlock = nullptr;
1217     Value *CurrentReload = nullptr;
1218     for (auto *U : E.second) {
1219       // If we have not seen the use block, create a load instruction to reload
1220       // the spilled value from the coroutine frame. Populates the Value pointer
1221       // reference provided with the frame GEP.
1222       if (CurrentBlock != U->getParent()) {
1223         CurrentBlock = U->getParent();
1224         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1225 
1226         auto *GEP = GetFramePointer(E.first);
1227         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1228         if (NeedToCopyArgPtrValue)
1229           CurrentReload = GEP;
1230         else
1231           CurrentReload = Builder.CreateLoad(
1232               FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1233               E.first->getName() + Twine(".reload"));
1234 
1235         TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1236         for (DbgDeclareInst *DDI : DIs) {
1237           bool AllowUnresolved = false;
1238           // This dbg.declare is preserved for all coro-split function
1239           // fragments. It will be unreachable in the main function, and
1240           // processed by coro::salvageDebugInfo() by CoroCloner.
1241           DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1242               .insertDeclare(CurrentReload, DDI->getVariable(),
1243                              DDI->getExpression(), DDI->getDebugLoc(),
1244                              &*Builder.GetInsertPoint());
1245           // This dbg.declare is for the main function entry point.  It
1246           // will be deleted in all coro-split functions.
1247           coro::salvageDebugInfo(DbgPtrAllocaCache, DDI);
1248         }
1249       }
1250 
1251       // If we have a single edge PHINode, remove it and replace it with a
1252       // reload from the coroutine frame. (We already took care of multi edge
1253       // PHINodes by rewriting them in the rewritePHIs function).
1254       if (auto *PN = dyn_cast<PHINode>(U)) {
1255         assert(PN->getNumIncomingValues() == 1 &&
1256                "unexpected number of incoming "
1257                "values in the PHINode");
1258         PN->replaceAllUsesWith(CurrentReload);
1259         PN->eraseFromParent();
1260         continue;
1261       }
1262 
1263       // Replace all uses of CurrentValue in the current instruction with
1264       // reload.
1265       U->replaceUsesOfWith(Def, CurrentReload);
1266     }
1267   }
1268 
1269   BasicBlock *FramePtrBB = FramePtr->getParent();
1270 
1271   auto SpillBlock =
1272       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
1273   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1274   Shape.AllocaSpillBlock = SpillBlock;
1275 
1276   // retcon and retcon.once lowering assumes all uses have been sunk.
1277   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1278       Shape.ABI == coro::ABI::Async) {
1279     // If we found any allocas, replace all of their remaining uses with Geps.
1280     Builder.SetInsertPoint(&SpillBlock->front());
1281     for (const auto &P : FrameData.Allocas) {
1282       AllocaInst *Alloca = P.Alloca;
1283       auto *G = GetFramePointer(Alloca);
1284 
1285       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1286       // here, as we are changing location of the instruction.
1287       G->takeName(Alloca);
1288       Alloca->replaceAllUsesWith(G);
1289       Alloca->eraseFromParent();
1290     }
1291     return FramePtr;
1292   }
1293 
1294   // If we found any alloca, replace all of their remaining uses with GEP
1295   // instructions. Because new dbg.declare have been created for these alloca,
1296   // we also delete the original dbg.declare and replace other uses with undef.
1297   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1298   // as some of the uses may not be dominated by CoroBegin.
1299   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1300   SmallVector<Instruction *, 4> UsersToUpdate;
1301   for (const auto &A : FrameData.Allocas) {
1302     AllocaInst *Alloca = A.Alloca;
1303     UsersToUpdate.clear();
1304     for (User *U : Alloca->users()) {
1305       auto *I = cast<Instruction>(U);
1306       if (DT.dominates(CB, I))
1307         UsersToUpdate.push_back(I);
1308     }
1309     if (UsersToUpdate.empty())
1310       continue;
1311     auto *G = GetFramePointer(Alloca);
1312     G->setName(Alloca->getName() + Twine(".reload.addr"));
1313 
1314     SmallPtrSet<BasicBlock *, 4> SeenDbgBBs;
1315     TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Alloca);
1316     if (!DIs.empty())
1317       DIBuilder(*Alloca->getModule(),
1318                 /*AllowUnresolved*/ false)
1319           .insertDeclare(G, DIs.front()->getVariable(),
1320                          DIs.front()->getExpression(),
1321                          DIs.front()->getDebugLoc(), DIs.front());
1322     for (auto *DI : FindDbgDeclareUses(Alloca))
1323       DI->eraseFromParent();
1324     replaceDbgUsesWithUndef(Alloca);
1325 
1326     for (Instruction *I : UsersToUpdate)
1327       I->replaceUsesOfWith(Alloca, G);
1328   }
1329   Builder.SetInsertPoint(FramePtr->getNextNode());
1330   for (const auto &A : FrameData.Allocas) {
1331     AllocaInst *Alloca = A.Alloca;
1332     if (A.MayWriteBeforeCoroBegin) {
1333       // isEscaped really means potentially modified before CoroBegin.
1334       if (Alloca->isArrayAllocation())
1335         report_fatal_error(
1336             "Coroutines cannot handle copying of array allocas yet");
1337 
1338       auto *G = GetFramePointer(Alloca);
1339       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1340       Builder.CreateStore(Value, G);
1341     }
1342     // For each alias to Alloca created before CoroBegin but used after
1343     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1344     // to the pointer in the frame.
1345     for (const auto &Alias : A.Aliases) {
1346       auto *FramePtr = GetFramePointer(Alloca);
1347       auto *FramePtrRaw =
1348           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1349       auto *AliasPtr = Builder.CreateGEP(
1350           FramePtrRaw,
1351           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1352       auto *AliasPtrTyped =
1353           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1354       Alias.first->replaceUsesWithIf(
1355           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1356     }
1357   }
1358   return FramePtr;
1359 }
1360 
1361 // Sets the unwind edge of an instruction to a particular successor.
1362 static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) {
1363   if (auto *II = dyn_cast<InvokeInst>(TI))
1364     II->setUnwindDest(Succ);
1365   else if (auto *CS = dyn_cast<CatchSwitchInst>(TI))
1366     CS->setUnwindDest(Succ);
1367   else if (auto *CR = dyn_cast<CleanupReturnInst>(TI))
1368     CR->setUnwindDest(Succ);
1369   else
1370     llvm_unreachable("unexpected terminator instruction");
1371 }
1372 
1373 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a
1374 // block.
1375 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred,
1376                            BasicBlock *NewPred, PHINode *Until = nullptr) {
1377   unsigned BBIdx = 0;
1378   for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
1379     PHINode *PN = cast<PHINode>(I);
1380 
1381     // We manually update the LandingPadReplacement PHINode and it is the last
1382     // PHI Node. So, if we find it, we are done.
1383     if (Until == PN)
1384       break;
1385 
1386     // Reuse the previous value of BBIdx if it lines up.  In cases where we
1387     // have multiple phi nodes with *lots* of predecessors, this is a speed
1388     // win because we don't have to scan the PHI looking for TIBB.  This
1389     // happens because the BB list of PHI nodes are usually in the same
1390     // order.
1391     if (PN->getIncomingBlock(BBIdx) != OldPred)
1392       BBIdx = PN->getBasicBlockIndex(OldPred);
1393 
1394     assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!");
1395     PN->setIncomingBlock(BBIdx, NewPred);
1396   }
1397 }
1398 
1399 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH
1400 // specific handling.
1401 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ,
1402                                     LandingPadInst *OriginalPad,
1403                                     PHINode *LandingPadReplacement) {
1404   auto *PadInst = Succ->getFirstNonPHI();
1405   if (!LandingPadReplacement && !PadInst->isEHPad())
1406     return SplitEdge(BB, Succ);
1407 
1408   auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ);
1409   setUnwindEdgeTo(BB->getTerminator(), NewBB);
1410   updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement);
1411 
1412   if (LandingPadReplacement) {
1413     auto *NewLP = OriginalPad->clone();
1414     auto *Terminator = BranchInst::Create(Succ, NewBB);
1415     NewLP->insertBefore(Terminator);
1416     LandingPadReplacement->addIncoming(NewLP, NewBB);
1417     return NewBB;
1418   }
1419   Value *ParentPad = nullptr;
1420   if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst))
1421     ParentPad = FuncletPad->getParentPad();
1422   else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst))
1423     ParentPad = CatchSwitch->getParentPad();
1424   else
1425     llvm_unreachable("handling for other EHPads not implemented yet");
1426 
1427   auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB);
1428   CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB);
1429   return NewBB;
1430 }
1431 
1432 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1433 // PHI in InsertedBB.
1434 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1435                                          BasicBlock *InsertedBB,
1436                                          BasicBlock *PredBB,
1437                                          PHINode *UntilPHI = nullptr) {
1438   auto *PN = cast<PHINode>(&SuccBB->front());
1439   do {
1440     int Index = PN->getBasicBlockIndex(InsertedBB);
1441     Value *V = PN->getIncomingValue(Index);
1442     PHINode *InputV = PHINode::Create(
1443         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1444         &InsertedBB->front());
1445     InputV->addIncoming(V, PredBB);
1446     PN->setIncomingValue(Index, InputV);
1447     PN = dyn_cast<PHINode>(PN->getNextNode());
1448   } while (PN != UntilPHI);
1449 }
1450 
1451 // Rewrites the PHI Nodes in a cleanuppad.
1452 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1453                                      CleanupPadInst *CleanupPad) {
1454   // For every incoming edge to a CleanupPad we will create a new block holding
1455   // all incoming values in single-value PHI nodes. We will then create another
1456   // block to act as a dispather (as all unwind edges for related EH blocks
1457   // must be the same).
1458   //
1459   // cleanuppad:
1460   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1461   //    %3 = cleanuppad within none []
1462   //
1463   // It will create:
1464   //
1465   // cleanuppad.corodispatch
1466   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1467   //    %3 = cleanuppad within none []
1468   //    switch i8 % 2, label %unreachable
1469   //            [i8 0, label %cleanuppad.from.catchswitch
1470   //             i8 1, label %cleanuppad.from.catch.1]
1471   // cleanuppad.from.catchswitch:
1472   //    %4 = phi i32 [%0, %catchswitch]
1473   //    br %label cleanuppad
1474   // cleanuppad.from.catch.1:
1475   //    %6 = phi i32 [%1, %catch.1]
1476   //    br %label cleanuppad
1477   // cleanuppad:
1478   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1479   //                 [%6, %cleanuppad.from.catch.1]
1480 
1481   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1482   auto *UnreachBB = BasicBlock::Create(
1483       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1484   IRBuilder<> Builder(UnreachBB);
1485   Builder.CreateUnreachable();
1486 
1487   // Create a new cleanuppad which will be the dispatcher.
1488   auto *NewCleanupPadBB =
1489       BasicBlock::Create(CleanupPadBB->getContext(),
1490                          CleanupPadBB->getName() + Twine(".corodispatch"),
1491                          CleanupPadBB->getParent(), CleanupPadBB);
1492   Builder.SetInsertPoint(NewCleanupPadBB);
1493   auto *SwitchType = Builder.getInt8Ty();
1494   auto *SetDispatchValuePN =
1495       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1496   CleanupPad->removeFromParent();
1497   CleanupPad->insertAfter(SetDispatchValuePN);
1498   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1499                                                 pred_size(CleanupPadBB));
1500 
1501   int SwitchIndex = 0;
1502   SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1503   for (BasicBlock *Pred : Preds) {
1504     // Create a new cleanuppad and move the PHI values to there.
1505     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1506                                       CleanupPadBB->getName() +
1507                                           Twine(".from.") + Pred->getName(),
1508                                       CleanupPadBB->getParent(), CleanupPadBB);
1509     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1510     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1511                     Pred->getName());
1512     Builder.SetInsertPoint(CaseBB);
1513     Builder.CreateBr(CleanupPadBB);
1514     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1515 
1516     // Update this Pred to the new unwind point.
1517     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1518 
1519     // Setup the switch in the dispatcher.
1520     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1521     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1522     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1523     SwitchIndex++;
1524   }
1525 }
1526 
1527 static void rewritePHIs(BasicBlock &BB) {
1528   // For every incoming edge we will create a block holding all
1529   // incoming values in a single PHI nodes.
1530   //
1531   // loop:
1532   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1533   //
1534   // It will create:
1535   //
1536   // loop.from.entry:
1537   //    %n.loop.pre = phi i32 [%n, %entry]
1538   //    br %label loop
1539   // loop.from.loop:
1540   //    %inc.loop.pre = phi i32 [%inc, %loop]
1541   //    br %label loop
1542   //
1543   // After this rewrite, further analysis will ignore any phi nodes with more
1544   // than one incoming edge.
1545 
1546   // TODO: Simplify PHINodes in the basic block to remove duplicate
1547   // predecessors.
1548 
1549   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1550   // so we need to create an additional "dispatcher" block.
1551   if (auto *CleanupPad =
1552           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1553     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1554     for (BasicBlock *Pred : Preds) {
1555       if (CatchSwitchInst *CS =
1556               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1557         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1558         // unwind destination that needs to be handle specially.
1559         assert(CS->getUnwindDest() == &BB);
1560         (void)CS;
1561         rewritePHIsForCleanupPad(&BB, CleanupPad);
1562         return;
1563       }
1564     }
1565   }
1566 
1567   LandingPadInst *LandingPad = nullptr;
1568   PHINode *ReplPHI = nullptr;
1569   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1570     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1571     // We replace the original landing pad with a PHINode that will collect the
1572     // results from all of them.
1573     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1574     ReplPHI->takeName(LandingPad);
1575     LandingPad->replaceAllUsesWith(ReplPHI);
1576     // We will erase the original landing pad at the end of this function after
1577     // ehAwareSplitEdge cloned it in the transition blocks.
1578   }
1579 
1580   SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1581   for (BasicBlock *Pred : Preds) {
1582     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1583     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1584 
1585     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1586     // that replaced the landing pad.
1587     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1588   }
1589 
1590   if (LandingPad) {
1591     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1592     // No longer need it.
1593     LandingPad->eraseFromParent();
1594   }
1595 }
1596 
1597 static void rewritePHIs(Function &F) {
1598   SmallVector<BasicBlock *, 8> WorkList;
1599 
1600   for (BasicBlock &BB : F)
1601     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1602       if (PN->getNumIncomingValues() > 1)
1603         WorkList.push_back(&BB);
1604 
1605   for (BasicBlock *BB : WorkList)
1606     rewritePHIs(*BB);
1607 }
1608 
1609 // Check for instructions that we can recreate on resume as opposed to spill
1610 // the result into a coroutine frame.
1611 static bool materializable(Instruction &V) {
1612   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1613          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1614 }
1615 
1616 // Check for structural coroutine intrinsics that should not be spilled into
1617 // the coroutine frame.
1618 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1619   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1620          isa<CoroSuspendInst>(&I);
1621 }
1622 
1623 // For every use of the value that is across suspend point, recreate that value
1624 // after a suspend point.
1625 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1626                                               const SpillInfo &Spills) {
1627   for (const auto &E : Spills) {
1628     Value *Def = E.first;
1629     BasicBlock *CurrentBlock = nullptr;
1630     Instruction *CurrentMaterialization = nullptr;
1631     for (Instruction *U : E.second) {
1632       // If we have not seen this block, materialize the value.
1633       if (CurrentBlock != U->getParent()) {
1634         CurrentBlock = U->getParent();
1635         CurrentMaterialization = cast<Instruction>(Def)->clone();
1636         CurrentMaterialization->setName(Def->getName());
1637         CurrentMaterialization->insertBefore(
1638             &*CurrentBlock->getFirstInsertionPt());
1639       }
1640       if (auto *PN = dyn_cast<PHINode>(U)) {
1641         assert(PN->getNumIncomingValues() == 1 &&
1642                "unexpected number of incoming "
1643                "values in the PHINode");
1644         PN->replaceAllUsesWith(CurrentMaterialization);
1645         PN->eraseFromParent();
1646         continue;
1647       }
1648       // Replace all uses of Def in the current instruction with the
1649       // CurrentMaterialization for the block.
1650       U->replaceUsesOfWith(Def, CurrentMaterialization);
1651     }
1652   }
1653 }
1654 
1655 // Splits the block at a particular instruction unless it is the first
1656 // instruction in the block with a single predecessor.
1657 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
1658   auto *BB = I->getParent();
1659   if (&BB->front() == I) {
1660     if (BB->getSinglePredecessor()) {
1661       BB->setName(Name);
1662       return BB;
1663     }
1664   }
1665   return BB->splitBasicBlock(I, Name);
1666 }
1667 
1668 // Split above and below a particular instruction so that it
1669 // will be all alone by itself in a block.
1670 static void splitAround(Instruction *I, const Twine &Name) {
1671   splitBlockIfNotFirst(I, Name);
1672   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
1673 }
1674 
1675 static bool isSuspendBlock(BasicBlock *BB) {
1676   return isa<AnyCoroSuspendInst>(BB->front());
1677 }
1678 
1679 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
1680 
1681 /// Does control flow starting at the given block ever reach a suspend
1682 /// instruction before reaching a block in VisitedOrFreeBBs?
1683 static bool isSuspendReachableFrom(BasicBlock *From,
1684                                    VisitedBlocksSet &VisitedOrFreeBBs) {
1685   // Eagerly try to add this block to the visited set.  If it's already
1686   // there, stop recursing; this path doesn't reach a suspend before
1687   // either looping or reaching a freeing block.
1688   if (!VisitedOrFreeBBs.insert(From).second)
1689     return false;
1690 
1691   // We assume that we'll already have split suspends into their own blocks.
1692   if (isSuspendBlock(From))
1693     return true;
1694 
1695   // Recurse on the successors.
1696   for (auto Succ : successors(From)) {
1697     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
1698       return true;
1699   }
1700 
1701   return false;
1702 }
1703 
1704 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
1705 /// suspend point?
1706 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
1707   // Seed the visited set with all the basic blocks containing a free
1708   // so that we won't pass them up.
1709   VisitedBlocksSet VisitedOrFreeBBs;
1710   for (auto User : AI->users()) {
1711     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
1712       VisitedOrFreeBBs.insert(FI->getParent());
1713   }
1714 
1715   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
1716 }
1717 
1718 /// After we split the coroutine, will the given basic block be along
1719 /// an obvious exit path for the resumption function?
1720 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
1721                                               unsigned depth = 3) {
1722   // If we've bottomed out our depth count, stop searching and assume
1723   // that the path might loop back.
1724   if (depth == 0) return false;
1725 
1726   // If this is a suspend block, we're about to exit the resumption function.
1727   if (isSuspendBlock(BB)) return true;
1728 
1729   // Recurse into the successors.
1730   for (auto Succ : successors(BB)) {
1731     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
1732       return false;
1733   }
1734 
1735   // If none of the successors leads back in a loop, we're on an exit/abort.
1736   return true;
1737 }
1738 
1739 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
1740   // Look for a free that isn't sufficiently obviously followed by
1741   // either a suspend or a termination, i.e. something that will leave
1742   // the coro resumption frame.
1743   for (auto U : AI->users()) {
1744     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
1745     if (!FI) continue;
1746 
1747     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
1748       return true;
1749   }
1750 
1751   // If we never found one, we don't need a stack save.
1752   return false;
1753 }
1754 
1755 /// Turn each of the given local allocas into a normal (dynamic) alloca
1756 /// instruction.
1757 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
1758                               SmallVectorImpl<Instruction*> &DeadInsts) {
1759   for (auto AI : LocalAllocas) {
1760     auto M = AI->getModule();
1761     IRBuilder<> Builder(AI);
1762 
1763     // Save the stack depth.  Try to avoid doing this if the stackrestore
1764     // is going to immediately precede a return or something.
1765     Value *StackSave = nullptr;
1766     if (localAllocaNeedsStackSave(AI))
1767       StackSave = Builder.CreateCall(
1768                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
1769 
1770     // Allocate memory.
1771     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
1772     Alloca->setAlignment(Align(AI->getAlignment()));
1773 
1774     for (auto U : AI->users()) {
1775       // Replace gets with the allocation.
1776       if (isa<CoroAllocaGetInst>(U)) {
1777         U->replaceAllUsesWith(Alloca);
1778 
1779       // Replace frees with stackrestores.  This is safe because
1780       // alloca.alloc is required to obey a stack discipline, although we
1781       // don't enforce that structurally.
1782       } else {
1783         auto FI = cast<CoroAllocaFreeInst>(U);
1784         if (StackSave) {
1785           Builder.SetInsertPoint(FI);
1786           Builder.CreateCall(
1787                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
1788                              StackSave);
1789         }
1790       }
1791       DeadInsts.push_back(cast<Instruction>(U));
1792     }
1793 
1794     DeadInsts.push_back(AI);
1795   }
1796 }
1797 
1798 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
1799 /// This happens during the all-instructions iteration, so it must not
1800 /// delete the call.
1801 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
1802                                         coro::Shape &Shape,
1803                                    SmallVectorImpl<Instruction*> &DeadInsts) {
1804   IRBuilder<> Builder(AI);
1805   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
1806 
1807   for (User *U : AI->users()) {
1808     if (isa<CoroAllocaGetInst>(U)) {
1809       U->replaceAllUsesWith(Alloc);
1810     } else {
1811       auto FI = cast<CoroAllocaFreeInst>(U);
1812       Builder.SetInsertPoint(FI);
1813       Shape.emitDealloc(Builder, Alloc, nullptr);
1814     }
1815     DeadInsts.push_back(cast<Instruction>(U));
1816   }
1817 
1818   // Push this on last so that it gets deleted after all the others.
1819   DeadInsts.push_back(AI);
1820 
1821   // Return the new allocation value so that we can check for needed spills.
1822   return cast<Instruction>(Alloc);
1823 }
1824 
1825 /// Get the current swifterror value.
1826 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
1827                                      coro::Shape &Shape) {
1828   // Make a fake function pointer as a sort of intrinsic.
1829   auto FnTy = FunctionType::get(ValueTy, {}, false);
1830   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1831 
1832   auto Call = Builder.CreateCall(FnTy, Fn, {});
1833   Shape.SwiftErrorOps.push_back(Call);
1834 
1835   return Call;
1836 }
1837 
1838 /// Set the given value as the current swifterror value.
1839 ///
1840 /// Returns a slot that can be used as a swifterror slot.
1841 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
1842                                      coro::Shape &Shape) {
1843   // Make a fake function pointer as a sort of intrinsic.
1844   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
1845                                 {V->getType()}, false);
1846   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1847 
1848   auto Call = Builder.CreateCall(FnTy, Fn, { V });
1849   Shape.SwiftErrorOps.push_back(Call);
1850 
1851   return Call;
1852 }
1853 
1854 /// Set the swifterror value from the given alloca before a call,
1855 /// then put in back in the alloca afterwards.
1856 ///
1857 /// Returns an address that will stand in for the swifterror slot
1858 /// until splitting.
1859 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
1860                                                  AllocaInst *Alloca,
1861                                                  coro::Shape &Shape) {
1862   auto ValueTy = Alloca->getAllocatedType();
1863   IRBuilder<> Builder(Call);
1864 
1865   // Load the current value from the alloca and set it as the
1866   // swifterror value.
1867   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
1868   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
1869 
1870   // Move to after the call.  Since swifterror only has a guaranteed
1871   // value on normal exits, we can ignore implicit and explicit unwind
1872   // edges.
1873   if (isa<CallInst>(Call)) {
1874     Builder.SetInsertPoint(Call->getNextNode());
1875   } else {
1876     auto Invoke = cast<InvokeInst>(Call);
1877     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1878   }
1879 
1880   // Get the current swifterror value and store it to the alloca.
1881   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1882   Builder.CreateStore(ValueAfterCall, Alloca);
1883 
1884   return Addr;
1885 }
1886 
1887 /// Eliminate a formerly-swifterror alloca by inserting the get/set
1888 /// intrinsics and attempting to MemToReg the alloca away.
1889 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
1890                                       coro::Shape &Shape) {
1891   for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) {
1892     // We're likely changing the use list, so use a mutation-safe
1893     // iteration pattern.
1894     auto &Use = *UI;
1895     ++UI;
1896 
1897     // swifterror values can only be used in very specific ways.
1898     // We take advantage of that here.
1899     auto User = Use.getUser();
1900     if (isa<LoadInst>(User) || isa<StoreInst>(User))
1901       continue;
1902 
1903     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
1904     auto Call = cast<Instruction>(User);
1905 
1906     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1907 
1908     // Use the returned slot address as the call argument.
1909     Use.set(Addr);
1910   }
1911 
1912   // All the uses should be loads and stores now.
1913   assert(isAllocaPromotable(Alloca));
1914 }
1915 
1916 /// "Eliminate" a swifterror argument by reducing it to the alloca case
1917 /// and then loading and storing in the prologue and epilog.
1918 ///
1919 /// The argument keeps the swifterror flag.
1920 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
1921                                         coro::Shape &Shape,
1922                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1923   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
1924 
1925   auto ArgTy = cast<PointerType>(Arg.getType());
1926   auto ValueTy = ArgTy->getElementType();
1927 
1928   // Reduce to the alloca case:
1929 
1930   // Create an alloca and replace all uses of the arg with it.
1931   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
1932   Arg.replaceAllUsesWith(Alloca);
1933 
1934   // Set an initial value in the alloca.  swifterror is always null on entry.
1935   auto InitialValue = Constant::getNullValue(ValueTy);
1936   Builder.CreateStore(InitialValue, Alloca);
1937 
1938   // Find all the suspends in the function and save and restore around them.
1939   for (auto Suspend : Shape.CoroSuspends) {
1940     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
1941   }
1942 
1943   // Find all the coro.ends in the function and restore the error value.
1944   for (auto End : Shape.CoroEnds) {
1945     Builder.SetInsertPoint(End);
1946     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
1947     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
1948   }
1949 
1950   // Now we can use the alloca logic.
1951   AllocasToPromote.push_back(Alloca);
1952   eliminateSwiftErrorAlloca(F, Alloca, Shape);
1953 }
1954 
1955 /// Eliminate all problematic uses of swifterror arguments and allocas
1956 /// from the function.  We'll fix them up later when splitting the function.
1957 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
1958   SmallVector<AllocaInst*, 4> AllocasToPromote;
1959 
1960   // Look for a swifterror argument.
1961   for (auto &Arg : F.args()) {
1962     if (!Arg.hasSwiftErrorAttr()) continue;
1963 
1964     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1965     break;
1966   }
1967 
1968   // Look for swifterror allocas.
1969   for (auto &Inst : F.getEntryBlock()) {
1970     auto Alloca = dyn_cast<AllocaInst>(&Inst);
1971     if (!Alloca || !Alloca->isSwiftError()) continue;
1972 
1973     // Clear the swifterror flag.
1974     Alloca->setSwiftError(false);
1975 
1976     AllocasToPromote.push_back(Alloca);
1977     eliminateSwiftErrorAlloca(F, Alloca, Shape);
1978   }
1979 
1980   // If we have any allocas to promote, compute a dominator tree and
1981   // promote them en masse.
1982   if (!AllocasToPromote.empty()) {
1983     DominatorTree DT(F);
1984     PromoteMemToReg(AllocasToPromote, DT);
1985   }
1986 }
1987 
1988 /// retcon and retcon.once conventions assume that all spill uses can be sunk
1989 /// after the coro.begin intrinsic.
1990 static void sinkSpillUsesAfterCoroBegin(Function &F,
1991                                         const FrameDataInfo &FrameData,
1992                                         CoroBeginInst *CoroBegin) {
1993   DominatorTree Dom(F);
1994 
1995   SmallSetVector<Instruction *, 32> ToMove;
1996   SmallVector<Instruction *, 32> Worklist;
1997 
1998   // Collect all users that precede coro.begin.
1999   for (auto *Def : FrameData.getAllDefs()) {
2000     for (User *U : Def->users()) {
2001       auto Inst = cast<Instruction>(U);
2002       if (Inst->getParent() != CoroBegin->getParent() ||
2003           Dom.dominates(CoroBegin, Inst))
2004         continue;
2005       if (ToMove.insert(Inst))
2006         Worklist.push_back(Inst);
2007     }
2008   }
2009   // Recursively collect users before coro.begin.
2010   while (!Worklist.empty()) {
2011     auto *Def = Worklist.pop_back_val();
2012     for (User *U : Def->users()) {
2013       auto Inst = cast<Instruction>(U);
2014       if (Dom.dominates(CoroBegin, Inst))
2015         continue;
2016       if (ToMove.insert(Inst))
2017         Worklist.push_back(Inst);
2018     }
2019   }
2020 
2021   // Sort by dominance.
2022   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2023   llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2024     // If a dominates b it should preceed (<) b.
2025     return Dom.dominates(A, B);
2026   });
2027 
2028   Instruction *InsertPt = CoroBegin->getNextNode();
2029   for (Instruction *Inst : InsertionList)
2030     Inst->moveBefore(InsertPt);
2031 }
2032 
2033 /// For each local variable that all of its user are only used inside one of
2034 /// suspended region, we sink their lifetime.start markers to the place where
2035 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2036 /// hence minimizing the amount of data we end up putting on the frame.
2037 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2038                                      SuspendCrossingInfo &Checker) {
2039   DominatorTree DT(F);
2040 
2041   // Collect all possible basic blocks which may dominate all uses of allocas.
2042   SmallPtrSet<BasicBlock *, 4> DomSet;
2043   DomSet.insert(&F.getEntryBlock());
2044   for (auto *CSI : Shape.CoroSuspends) {
2045     BasicBlock *SuspendBlock = CSI->getParent();
2046     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2047            "should have split coro.suspend into its own block");
2048     DomSet.insert(SuspendBlock->getSingleSuccessor());
2049   }
2050 
2051   for (Instruction &I : instructions(F)) {
2052     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2053     if (!AI)
2054       continue;
2055 
2056     for (BasicBlock *DomBB : DomSet) {
2057       bool Valid = true;
2058       SmallVector<Instruction *, 1> Lifetimes;
2059 
2060       auto isLifetimeStart = [](Instruction* I) {
2061         if (auto* II = dyn_cast<IntrinsicInst>(I))
2062           return II->getIntrinsicID() == Intrinsic::lifetime_start;
2063         return false;
2064       };
2065 
2066       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2067         if (isLifetimeStart(U)) {
2068           Lifetimes.push_back(U);
2069           return true;
2070         }
2071         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2072           return false;
2073         if (isLifetimeStart(U->user_back())) {
2074           Lifetimes.push_back(U->user_back());
2075           return true;
2076         }
2077         return false;
2078       };
2079 
2080       for (User *U : AI->users()) {
2081         Instruction *UI = cast<Instruction>(U);
2082         // For all users except lifetime.start markers, if they are all
2083         // dominated by one of the basic blocks and do not cross
2084         // suspend points as well, then there is no need to spill the
2085         // instruction.
2086         if (!DT.dominates(DomBB, UI->getParent()) ||
2087             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2088           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2089           // markers.
2090           if (collectLifetimeStart(UI, AI))
2091             continue;
2092           Valid = false;
2093           break;
2094         }
2095       }
2096       // Sink lifetime.start markers to dominate block when they are
2097       // only used outside the region.
2098       if (Valid && Lifetimes.size() != 0) {
2099         // May be AI itself, when the type of AI is i8*
2100         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2101           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2102             return AI;
2103           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2104           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2105                                   DomBB->getTerminator());
2106         }(AI);
2107 
2108         auto *NewLifetime = Lifetimes[0]->clone();
2109         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2110         NewLifetime->insertBefore(DomBB->getTerminator());
2111 
2112         // All the outsided lifetime.start markers are no longer necessary.
2113         for (Instruction *S : Lifetimes)
2114           S->eraseFromParent();
2115 
2116         break;
2117       }
2118     }
2119   }
2120 }
2121 
2122 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2123                                 const SuspendCrossingInfo &Checker,
2124                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2125   // Collect lifetime.start info for each alloca.
2126   using LifetimeStart = SmallPtrSet<Instruction *, 2>;
2127   llvm::DenseMap<AllocaInst *, std::unique_ptr<LifetimeStart>> LifetimeMap;
2128   for (Instruction &I : instructions(F)) {
2129     auto *II = dyn_cast<IntrinsicInst>(&I);
2130     if (!II || II->getIntrinsicID() != Intrinsic::lifetime_start)
2131       continue;
2132 
2133     if (auto *OpInst = dyn_cast<Instruction>(II->getOperand(1))) {
2134       if (auto *AI = dyn_cast<AllocaInst>(OpInst->stripPointerCasts())) {
2135 
2136         if (LifetimeMap.find(AI) == LifetimeMap.end())
2137           LifetimeMap[AI] = std::make_unique<LifetimeStart>();
2138         LifetimeMap[AI]->insert(isa<AllocaInst>(OpInst) ? II : OpInst);
2139       }
2140     }
2141   }
2142 
2143   for (Instruction &I : instructions(F)) {
2144     auto *AI = dyn_cast<AllocaInst>(&I);
2145     if (!AI)
2146       continue;
2147     // The PromiseAlloca will be specially handled since it needs to be in a
2148     // fixed position in the frame.
2149     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2150       continue;
2151     }
2152     bool ShouldLiveOnFrame = false;
2153     auto Iter = LifetimeMap.find(AI);
2154     if (Iter != LifetimeMap.end()) {
2155       // Check against lifetime.start if the instruction has the info.
2156       for (User *U : I.users()) {
2157         for (auto *S : *Iter->second)
2158           if ((ShouldLiveOnFrame = Checker.isDefinitionAcrossSuspend(*S, U)))
2159             break;
2160         if (ShouldLiveOnFrame)
2161           break;
2162       }
2163       if (!ShouldLiveOnFrame)
2164         continue;
2165     }
2166     // At this point, either ShouldLiveOnFrame is true or we didn't have
2167     // lifetime information. We will need to rely on more precise pointer
2168     // tracking.
2169     DominatorTree DT(F);
2170     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2171                              *Shape.CoroBegin, Checker};
2172     Visitor.visitPtr(*AI);
2173     if (!Visitor.getShouldLiveOnFrame())
2174       continue;
2175     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2176                          Visitor.getMayWriteBeforeCoroBegin());
2177   }
2178 }
2179 
2180 void coro::salvageDebugInfo(
2181     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2182     DbgDeclareInst *DDI, bool LoadFromFramePtr) {
2183   Function *F = DDI->getFunction();
2184   IRBuilder<> Builder(F->getContext());
2185   auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2186   while (isa<IntrinsicInst>(InsertPt))
2187     ++InsertPt;
2188   Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2189   DIExpression *Expr = DDI->getExpression();
2190   // Follow the pointer arithmetic all the way to the incoming
2191   // function argument and convert into a DIExpression.
2192   Value *Storage = DDI->getAddress();
2193   while (Storage) {
2194     if (auto *LdInst = dyn_cast<LoadInst>(Storage)) {
2195       Storage = LdInst->getOperand(0);
2196     } else if (auto *StInst = dyn_cast<StoreInst>(Storage)) {
2197       Storage = StInst->getOperand(0);
2198     } else if (auto *GEPInst = dyn_cast<GetElementPtrInst>(Storage)) {
2199       Expr = llvm::salvageDebugInfoImpl(*GEPInst, Expr,
2200                                         /*WithStackValue=*/false);
2201       Storage = GEPInst->getOperand(0);
2202     } else if (auto *BCInst = dyn_cast<llvm::BitCastInst>(Storage))
2203       Storage = BCInst->getOperand(0);
2204     else
2205       break;
2206   }
2207   // Store a pointer to the coroutine frame object in an alloca so it
2208   // is available throughout the function when producing unoptimized
2209   // code. Extending the lifetime this way is correct because the
2210   // variable has been declared by a dbg.declare intrinsic.
2211   if (auto Arg = dyn_cast_or_null<llvm::Argument>(Storage)) {
2212     auto &Cached = DbgPtrAllocaCache[Storage];
2213     if (!Cached) {
2214       Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2215                                     Arg->getName() + ".debug");
2216       Builder.CreateStore(Storage, Cached);
2217     }
2218     Storage = Cached;
2219   }
2220   // The FramePtr object adds one extra layer of indirection that
2221   // needs to be unwrapped.
2222   if (LoadFromFramePtr)
2223     Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2224   auto &VMContext = DDI->getFunction()->getContext();
2225   DDI->setOperand(
2226       0, MetadataAsValue::get(VMContext, ValueAsMetadata::get(Storage)));
2227   DDI->setOperand(2, MetadataAsValue::get(VMContext, Expr));
2228   if (auto *InsertPt = dyn_cast_or_null<Instruction>(Storage))
2229     DDI->moveAfter(InsertPt);
2230 }
2231 
2232 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2233   eliminateSwiftError(F, Shape);
2234 
2235   if (Shape.ABI == coro::ABI::Switch &&
2236       Shape.SwitchLowering.PromiseAlloca) {
2237     Shape.getSwitchCoroId()->clearPromise();
2238   }
2239 
2240   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2241   // intrinsics are in their own blocks to simplify the logic of building up
2242   // SuspendCrossing data.
2243   for (auto *CSI : Shape.CoroSuspends) {
2244     if (auto *Save = CSI->getCoroSave())
2245       splitAround(Save, "CoroSave");
2246     splitAround(CSI, "CoroSuspend");
2247   }
2248 
2249   // Put CoroEnds into their own blocks.
2250   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2251     splitAround(CE, "CoroEnd");
2252 
2253     // Emit the musttail call function in a new block before the CoroEnd.
2254     // We do this here so that the right suspend crossing info is computed for
2255     // the uses of the musttail call function call. (Arguments to the coro.end
2256     // instructions would be ignored)
2257     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2258       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2259       if (!MustTailCallFn)
2260         continue;
2261       IRBuilder<> Builder(AsyncEnd);
2262       SmallVector<Value *, 8> Args(AsyncEnd->args());
2263       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2264       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2265                                       Arguments, Builder);
2266       splitAround(Call, "MustTailCall.Before.CoroEnd");
2267     }
2268   }
2269 
2270   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2271   // never has its definition separated from the PHI by the suspend point.
2272   rewritePHIs(F);
2273 
2274   // Build suspend crossing info.
2275   SuspendCrossingInfo Checker(F, Shape);
2276 
2277   IRBuilder<> Builder(F.getContext());
2278   FrameDataInfo FrameData;
2279   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2280   SmallVector<Instruction*, 4> DeadInstructions;
2281 
2282   {
2283     SpillInfo Spills;
2284     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2285       // See if there are materializable instructions across suspend points.
2286       for (Instruction &I : instructions(F))
2287         if (materializable(I))
2288           for (User *U : I.users())
2289             if (Checker.isDefinitionAcrossSuspend(I, U))
2290               Spills[&I].push_back(cast<Instruction>(U));
2291 
2292       if (Spills.empty())
2293         break;
2294 
2295       // Rewrite materializable instructions to be materialized at the use
2296       // point.
2297       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2298       rewriteMaterializableInstructions(Builder, Spills);
2299       Spills.clear();
2300     }
2301   }
2302 
2303   sinkLifetimeStartMarkers(F, Shape, Checker);
2304   collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2305   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2306 
2307   // Collect the spills for arguments and other not-materializable values.
2308   for (Argument &A : F.args())
2309     for (User *U : A.users())
2310       if (Checker.isDefinitionAcrossSuspend(A, U))
2311         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2312 
2313   for (Instruction &I : instructions(F)) {
2314     // Values returned from coroutine structure intrinsics should not be part
2315     // of the Coroutine Frame.
2316     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2317       continue;
2318 
2319     // The Coroutine Promise always included into coroutine frame, no need to
2320     // check for suspend crossing.
2321     if (Shape.ABI == coro::ABI::Switch &&
2322         Shape.SwitchLowering.PromiseAlloca == &I)
2323       continue;
2324 
2325     // Handle alloca.alloc specially here.
2326     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2327       // Check whether the alloca's lifetime is bounded by suspend points.
2328       if (isLocalAlloca(AI)) {
2329         LocalAllocas.push_back(AI);
2330         continue;
2331       }
2332 
2333       // If not, do a quick rewrite of the alloca and then add spills of
2334       // the rewritten value.  The rewrite doesn't invalidate anything in
2335       // Spills because the other alloca intrinsics have no other operands
2336       // besides AI, and it doesn't invalidate the iteration because we delay
2337       // erasing AI.
2338       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2339 
2340       for (User *U : Alloc->users()) {
2341         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2342           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2343       }
2344       continue;
2345     }
2346 
2347     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2348     if (isa<CoroAllocaGetInst>(I))
2349       continue;
2350 
2351     if (isa<AllocaInst>(I))
2352       continue;
2353 
2354     for (User *U : I.users())
2355       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2356         // We cannot spill a token.
2357         if (I.getType()->isTokenTy())
2358           report_fatal_error(
2359               "token definition is separated from the use by a suspend point");
2360         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2361       }
2362   }
2363   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2364   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2365       Shape.ABI == coro::ABI::Async)
2366     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2367   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2368   Shape.FramePtr = insertSpills(FrameData, Shape);
2369   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2370 
2371   for (auto I : DeadInstructions)
2372     I->eraseFromParent();
2373 }
2374