1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //===----------------------------------------------------------------------===//
16 
17 #include "CoroInternal.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/SmallString.h"
20 #include "llvm/Analysis/PtrUseVisitor.h"
21 #include "llvm/Analysis/StackLifetime.h"
22 #include "llvm/Config/llvm-config.h"
23 #include "llvm/IR/CFG.h"
24 #include "llvm/IR/DIBuilder.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstIterator.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Support/OptimizedStructLayout.h"
32 #include "llvm/Support/circular_raw_ostream.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
35 #include "llvm/Transforms/Utils/Local.h"
36 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
37 #include <algorithm>
38 
39 using namespace llvm;
40 
41 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
42 // "coro-frame", which results in leaner debug spew.
43 #define DEBUG_TYPE "coro-suspend-crossing"
44 
45 static cl::opt<bool> EnableReuseStorageInFrame(
46     "reuse-storage-in-coroutine-frame", cl::Hidden,
47     cl::desc(
48         "Enable the optimization which would reuse the storage in the coroutine \
49          frame for allocas whose liferanges are not overlapped, for testing purposes"),
50     llvm::cl::init(false));
51 
52 enum { SmallVectorThreshold = 32 };
53 
54 // Provides two way mapping between the blocks and numbers.
55 namespace {
56 class BlockToIndexMapping {
57   SmallVector<BasicBlock *, SmallVectorThreshold> V;
58 
59 public:
size() const60   size_t size() const { return V.size(); }
61 
BlockToIndexMapping(Function & F)62   BlockToIndexMapping(Function &F) {
63     for (BasicBlock &BB : F)
64       V.push_back(&BB);
65     llvm::sort(V);
66   }
67 
blockToIndex(BasicBlock * BB) const68   size_t blockToIndex(BasicBlock *BB) const {
69     auto *I = llvm::lower_bound(V, BB);
70     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
71     return I - V.begin();
72   }
73 
indexToBlock(unsigned Index) const74   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
75 };
76 } // end anonymous namespace
77 
78 // The SuspendCrossingInfo maintains data that allows to answer a question
79 // whether given two BasicBlocks A and B there is a path from A to B that
80 // passes through a suspend point.
81 //
82 // For every basic block 'i' it maintains a BlockData that consists of:
83 //   Consumes:  a bit vector which contains a set of indices of blocks that can
84 //              reach block 'i'
85 //   Kills: a bit vector which contains a set of indices of blocks that can
86 //          reach block 'i', but one of the path will cross a suspend point
87 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
88 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
89 //
90 namespace {
91 struct SuspendCrossingInfo {
92   BlockToIndexMapping Mapping;
93 
94   struct BlockData {
95     BitVector Consumes;
96     BitVector Kills;
97     bool Suspend = false;
98     bool End = false;
99   };
100   SmallVector<BlockData, SmallVectorThreshold> Block;
101 
successors__anon6bb6f9a70311::SuspendCrossingInfo102   iterator_range<succ_iterator> successors(BlockData const &BD) const {
103     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
104     return llvm::successors(BB);
105   }
106 
getBlockData__anon6bb6f9a70311::SuspendCrossingInfo107   BlockData &getBlockData(BasicBlock *BB) {
108     return Block[Mapping.blockToIndex(BB)];
109   }
110 
111   void dump() const;
112   void dump(StringRef Label, BitVector const &BV) const;
113 
114   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
115 
hasPathCrossingSuspendPoint__anon6bb6f9a70311::SuspendCrossingInfo116   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
117     size_t const DefIndex = Mapping.blockToIndex(DefBB);
118     size_t const UseIndex = Mapping.blockToIndex(UseBB);
119 
120     bool const Result = Block[UseIndex].Kills[DefIndex];
121     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
122                       << " answer is " << Result << "\n");
123     return Result;
124   }
125 
isDefinitionAcrossSuspend__anon6bb6f9a70311::SuspendCrossingInfo126   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
127     auto *I = cast<Instruction>(U);
128 
129     // We rewrote PHINodes, so that only the ones with exactly one incoming
130     // value need to be analyzed.
131     if (auto *PN = dyn_cast<PHINode>(I))
132       if (PN->getNumIncomingValues() > 1)
133         return false;
134 
135     BasicBlock *UseBB = I->getParent();
136 
137     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
138     // llvm.coro.suspend.async as if they were uses in the suspend's single
139     // predecessor: the uses conceptually occur before the suspend.
140     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
141       UseBB = UseBB->getSinglePredecessor();
142       assert(UseBB && "should have split coro.suspend into its own block");
143     }
144 
145     return hasPathCrossingSuspendPoint(DefBB, UseBB);
146   }
147 
isDefinitionAcrossSuspend__anon6bb6f9a70311::SuspendCrossingInfo148   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
149     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
150   }
151 
isDefinitionAcrossSuspend__anon6bb6f9a70311::SuspendCrossingInfo152   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
153     auto *DefBB = I.getParent();
154 
155     // As a special case, treat values produced by an llvm.coro.suspend.*
156     // as if they were defined in the single successor: the uses
157     // conceptually occur after the suspend.
158     if (isa<AnyCoroSuspendInst>(I)) {
159       DefBB = DefBB->getSingleSuccessor();
160       assert(DefBB && "should have split coro.suspend into its own block");
161     }
162 
163     return isDefinitionAcrossSuspend(DefBB, U);
164   }
165 
isDefinitionAcrossSuspend__anon6bb6f9a70311::SuspendCrossingInfo166   bool isDefinitionAcrossSuspend(Value &V, User *U) const {
167     if (auto *Arg = dyn_cast<Argument>(&V))
168       return isDefinitionAcrossSuspend(*Arg, U);
169     if (auto *Inst = dyn_cast<Instruction>(&V))
170       return isDefinitionAcrossSuspend(*Inst, U);
171 
172     llvm_unreachable(
173         "Coroutine could only collect Argument and Instruction now.");
174   }
175 };
176 } // end anonymous namespace
177 
178 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(StringRef Label,BitVector const & BV) const179 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
180                                                 BitVector const &BV) const {
181   dbgs() << Label << ":";
182   for (size_t I = 0, N = BV.size(); I < N; ++I)
183     if (BV[I])
184       dbgs() << " " << Mapping.indexToBlock(I)->getName();
185   dbgs() << "\n";
186 }
187 
dump() const188 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
189   for (size_t I = 0, N = Block.size(); I < N; ++I) {
190     BasicBlock *const B = Mapping.indexToBlock(I);
191     dbgs() << B->getName() << ":\n";
192     dump("   Consumes", Block[I].Consumes);
193     dump("      Kills", Block[I].Kills);
194   }
195   dbgs() << "\n";
196 }
197 #endif
198 
SuspendCrossingInfo(Function & F,coro::Shape & Shape)199 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
200     : Mapping(F) {
201   const size_t N = Mapping.size();
202   Block.resize(N);
203 
204   // Initialize every block so that it consumes itself
205   for (size_t I = 0; I < N; ++I) {
206     auto &B = Block[I];
207     B.Consumes.resize(N);
208     B.Kills.resize(N);
209     B.Consumes.set(I);
210   }
211 
212   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
213   // the code beyond coro.end is reachable during initial invocation of the
214   // coroutine.
215   for (auto *CE : Shape.CoroEnds)
216     getBlockData(CE->getParent()).End = true;
217 
218   // Mark all suspend blocks and indicate that they kill everything they
219   // consume. Note, that crossing coro.save also requires a spill, as any code
220   // between coro.save and coro.suspend may resume the coroutine and all of the
221   // state needs to be saved by that time.
222   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
223     BasicBlock *SuspendBlock = BarrierInst->getParent();
224     auto &B = getBlockData(SuspendBlock);
225     B.Suspend = true;
226     B.Kills |= B.Consumes;
227   };
228   for (auto *CSI : Shape.CoroSuspends) {
229     markSuspendBlock(CSI);
230     if (auto *Save = CSI->getCoroSave())
231       markSuspendBlock(Save);
232   }
233 
234   // Iterate propagating consumes and kills until they stop changing.
235   int Iteration = 0;
236   (void)Iteration;
237 
238   bool Changed;
239   do {
240     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
241     LLVM_DEBUG(dbgs() << "==============\n");
242 
243     Changed = false;
244     for (size_t I = 0; I < N; ++I) {
245       auto &B = Block[I];
246       for (BasicBlock *SI : successors(B)) {
247 
248         auto SuccNo = Mapping.blockToIndex(SI);
249 
250         // Saved Consumes and Kills bitsets so that it is easy to see
251         // if anything changed after propagation.
252         auto &S = Block[SuccNo];
253         auto SavedConsumes = S.Consumes;
254         auto SavedKills = S.Kills;
255 
256         // Propagate Kills and Consumes from block B into its successor S.
257         S.Consumes |= B.Consumes;
258         S.Kills |= B.Kills;
259 
260         // If block B is a suspend block, it should propagate kills into the
261         // its successor for every block B consumes.
262         if (B.Suspend) {
263           S.Kills |= B.Consumes;
264         }
265         if (S.Suspend) {
266           // If block S is a suspend block, it should kill all of the blocks it
267           // consumes.
268           S.Kills |= S.Consumes;
269         } else if (S.End) {
270           // If block S is an end block, it should not propagate kills as the
271           // blocks following coro.end() are reached during initial invocation
272           // of the coroutine while all the data are still available on the
273           // stack or in the registers.
274           S.Kills.reset();
275         } else {
276           // This is reached when S block it not Suspend nor coro.end and it
277           // need to make sure that it is not in the kill set.
278           S.Kills.reset(SuccNo);
279         }
280 
281         // See if anything changed.
282         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
283 
284         if (S.Kills != SavedKills) {
285           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
286                             << "\n");
287           LLVM_DEBUG(dump("S.Kills", S.Kills));
288           LLVM_DEBUG(dump("SavedKills", SavedKills));
289         }
290         if (S.Consumes != SavedConsumes) {
291           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
292           LLVM_DEBUG(dump("S.Consume", S.Consumes));
293           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
294         }
295       }
296     }
297   } while (Changed);
298   LLVM_DEBUG(dump());
299 }
300 
301 #undef DEBUG_TYPE // "coro-suspend-crossing"
302 #define DEBUG_TYPE "coro-frame"
303 
304 namespace {
305 class FrameTypeBuilder;
306 // Mapping from the to-be-spilled value to all the users that need reload.
307 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
308 struct AllocaInfo {
309   AllocaInst *Alloca;
310   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
311   bool MayWriteBeforeCoroBegin;
AllocaInfo__anon6bb6f9a70511::AllocaInfo312   AllocaInfo(AllocaInst *Alloca,
313              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
314              bool MayWriteBeforeCoroBegin)
315       : Alloca(Alloca), Aliases(std::move(Aliases)),
316         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
317 };
318 struct FrameDataInfo {
319   // All the values (that are not allocas) that needs to be spilled to the
320   // frame.
321   SpillInfo Spills;
322   // Allocas contains all values defined as allocas that need to live in the
323   // frame.
324   SmallVector<AllocaInfo, 8> Allocas;
325 
getAllDefs__anon6bb6f9a70511::FrameDataInfo326   SmallVector<Value *, 8> getAllDefs() const {
327     SmallVector<Value *, 8> Defs;
328     for (const auto &P : Spills)
329       Defs.push_back(P.first);
330     for (const auto &A : Allocas)
331       Defs.push_back(A.Alloca);
332     return Defs;
333   }
334 
getFieldIndex__anon6bb6f9a70511::FrameDataInfo335   uint32_t getFieldIndex(Value *V) const {
336     auto Itr = FieldIndexMap.find(V);
337     assert(Itr != FieldIndexMap.end() &&
338            "Value does not have a frame field index");
339     return Itr->second;
340   }
341 
setFieldIndex__anon6bb6f9a70511::FrameDataInfo342   void setFieldIndex(Value *V, uint32_t Index) {
343     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
344            "Cannot set the index for the same field twice.");
345     FieldIndexMap[V] = Index;
346   }
347 
getAlign__anon6bb6f9a70511::FrameDataInfo348   uint64_t getAlign(Value *V) const {
349     auto Iter = FieldAlignMap.find(V);
350     assert(Iter != FieldAlignMap.end());
351     return Iter->second;
352   }
353 
setAlign__anon6bb6f9a70511::FrameDataInfo354   void setAlign(Value *V, uint64_t Align) {
355     assert(FieldAlignMap.count(V) == 0);
356     FieldAlignMap.insert({V, Align});
357   }
358 
getOffset__anon6bb6f9a70511::FrameDataInfo359   uint64_t getOffset(Value *V) const {
360     auto Iter = FieldOffsetMap.find(V);
361     assert(Iter != FieldOffsetMap.end());
362     return Iter->second;
363   }
364 
setOffset__anon6bb6f9a70511::FrameDataInfo365   void setOffset(Value *V, uint64_t Offset) {
366     assert(FieldOffsetMap.count(V) == 0);
367     FieldOffsetMap.insert({V, Offset});
368   }
369 
370   // Remap the index of every field in the frame, using the final layout index.
371   void updateLayoutIndex(FrameTypeBuilder &B);
372 
373 private:
374   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
375   // twice by mistake.
376   bool LayoutIndexUpdateStarted = false;
377   // Map from values to their slot indexes on the frame. They will be first set
378   // with their original insertion field index. After the frame is built, their
379   // indexes will be updated into the final layout index.
380   DenseMap<Value *, uint32_t> FieldIndexMap;
381   // Map from values to their alignment on the frame. They would be set after
382   // the frame is built.
383   DenseMap<Value *, uint64_t> FieldAlignMap;
384   // Map from values to their offset on the frame. They would be set after
385   // the frame is built.
386   DenseMap<Value *, uint64_t> FieldOffsetMap;
387 };
388 } // namespace
389 
390 #ifndef NDEBUG
dumpSpills(StringRef Title,const SpillInfo & Spills)391 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
392   dbgs() << "------------- " << Title << "--------------\n";
393   for (const auto &E : Spills) {
394     E.first->dump();
395     dbgs() << "   user: ";
396     for (auto *I : E.second)
397       I->dump();
398   }
399 }
400 
dumpAllocas(const SmallVectorImpl<AllocaInfo> & Allocas)401 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
402   dbgs() << "------------- Allocas --------------\n";
403   for (const auto &A : Allocas) {
404     A.Alloca->dump();
405   }
406 }
407 #endif
408 
409 namespace {
410 using FieldIDType = size_t;
411 // We cannot rely solely on natural alignment of a type when building a
412 // coroutine frame and if the alignment specified on the Alloca instruction
413 // differs from the natural alignment of the alloca type we will need to insert
414 // padding.
415 class FrameTypeBuilder {
416 private:
417   struct Field {
418     uint64_t Size;
419     uint64_t Offset;
420     Type *Ty;
421     FieldIDType LayoutFieldIndex;
422     Align Alignment;
423     Align TyAlignment;
424   };
425 
426   const DataLayout &DL;
427   LLVMContext &Context;
428   uint64_t StructSize = 0;
429   Align StructAlign;
430   bool IsFinished = false;
431 
432   SmallVector<Field, 8> Fields;
433   DenseMap<Value*, unsigned> FieldIndexByKey;
434 
435 public:
FrameTypeBuilder(LLVMContext & Context,DataLayout const & DL)436   FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL)
437       : DL(DL), Context(Context) {}
438 
439   /// Add a field to this structure for the storage of an `alloca`
440   /// instruction.
addFieldForAlloca(AllocaInst * AI,bool IsHeader=false)441   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
442                                                bool IsHeader = false) {
443     Type *Ty = AI->getAllocatedType();
444 
445     // Make an array type if this is a static array allocation.
446     if (AI->isArrayAllocation()) {
447       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
448         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
449       else
450         report_fatal_error("Coroutines cannot handle non static allocas yet");
451     }
452 
453     return addField(Ty, AI->getAlign(), IsHeader);
454   }
455 
456   /// We want to put the allocas whose lifetime-ranges are not overlapped
457   /// into one slot of coroutine frame.
458   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
459   ///
460   ///     cppcoro::task<void> alternative_paths(bool cond) {
461   ///         if (cond) {
462   ///             big_structure a;
463   ///             process(a);
464   ///             co_await something();
465   ///         } else {
466   ///             big_structure b;
467   ///             process2(b);
468   ///             co_await something();
469   ///         }
470   ///     }
471   ///
472   /// We want to put variable a and variable b in the same slot to
473   /// reduce the size of coroutine frame.
474   ///
475   /// This function use StackLifetime algorithm to partition the AllocaInsts in
476   /// Spills to non-overlapped sets in order to put Alloca in the same
477   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
478   /// field for the allocas in the same non-overlapped set by using the largest
479   /// type as the field type.
480   ///
481   /// Side Effects: Because We sort the allocas, the order of allocas in the
482   /// frame may be different with the order in the source code.
483   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
484                           coro::Shape &Shape);
485 
486   /// Add a field to this structure.
addField(Type * Ty,MaybeAlign FieldAlignment,bool IsHeader=false)487   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
488                                       bool IsHeader = false) {
489     assert(!IsFinished && "adding fields to a finished builder");
490     assert(Ty && "must provide a type for a field");
491 
492     // The field size is always the alloc size of the type.
493     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
494 
495     // For an alloca with size=0, we don't need to add a field and they
496     // can just point to any index in the frame. Use index 0.
497     if (FieldSize == 0) {
498       return 0;
499     }
500 
501     // The field alignment might not be the type alignment, but we need
502     // to remember the type alignment anyway to build the type.
503     Align TyAlignment = DL.getABITypeAlign(Ty);
504     if (!FieldAlignment) FieldAlignment = TyAlignment;
505 
506     // Lay out header fields immediately.
507     uint64_t Offset;
508     if (IsHeader) {
509       Offset = alignTo(StructSize, FieldAlignment);
510       StructSize = Offset + FieldSize;
511 
512     // Everything else has a flexible offset.
513     } else {
514       Offset = OptimizedStructLayoutField::FlexibleOffset;
515     }
516 
517     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
518     return Fields.size() - 1;
519   }
520 
521   /// Finish the layout and set the body on the given type.
522   void finish(StructType *Ty);
523 
getStructSize() const524   uint64_t getStructSize() const {
525     assert(IsFinished && "not yet finished!");
526     return StructSize;
527   }
528 
getStructAlign() const529   Align getStructAlign() const {
530     assert(IsFinished && "not yet finished!");
531     return StructAlign;
532   }
533 
getLayoutFieldIndex(FieldIDType Id) const534   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
535     assert(IsFinished && "not yet finished!");
536     return Fields[Id].LayoutFieldIndex;
537   }
538 
getLayoutField(FieldIDType Id) const539   Field getLayoutField(FieldIDType Id) const {
540     assert(IsFinished && "not yet finished!");
541     return Fields[Id];
542   }
543 };
544 } // namespace
545 
updateLayoutIndex(FrameTypeBuilder & B)546 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
547   auto Updater = [&](Value *I) {
548     auto Field = B.getLayoutField(getFieldIndex(I));
549     setFieldIndex(I, Field.LayoutFieldIndex);
550     setAlign(I, Field.Alignment.value());
551     setOffset(I, Field.Offset);
552   };
553   LayoutIndexUpdateStarted = true;
554   for (auto &S : Spills)
555     Updater(S.first);
556   for (const auto &A : Allocas)
557     Updater(A.Alloca);
558   LayoutIndexUpdateStarted = false;
559 }
560 
addFieldForAllocas(const Function & F,FrameDataInfo & FrameData,coro::Shape & Shape)561 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
562                                           FrameDataInfo &FrameData,
563                                           coro::Shape &Shape) {
564   using AllocaSetType = SmallVector<AllocaInst *, 4>;
565   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
566 
567   // We need to add field for allocas at the end of this function. However, this
568   // function has multiple exits, so we use this helper to avoid redundant code.
569   struct RTTIHelper {
570     std::function<void()> func;
571     RTTIHelper(std::function<void()> &&func) : func(func) {}
572     ~RTTIHelper() { func(); }
573   } Helper([&]() {
574     for (auto AllocaList : NonOverlapedAllocas) {
575       auto *LargestAI = *AllocaList.begin();
576       FieldIDType Id = addFieldForAlloca(LargestAI);
577       for (auto *Alloca : AllocaList)
578         FrameData.setFieldIndex(Alloca, Id);
579     }
580   });
581 
582   if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) {
583     for (const auto &A : FrameData.Allocas) {
584       AllocaInst *Alloca = A.Alloca;
585       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
586     }
587     return;
588   }
589 
590   // Because there are pathes from the lifetime.start to coro.end
591   // for each alloca, the liferanges for every alloca is overlaped
592   // in the blocks who contain coro.end and the successor blocks.
593   // So we choose to skip there blocks when we calculates the liferange
594   // for each alloca. It should be reasonable since there shouldn't be uses
595   // in these blocks and the coroutine frame shouldn't be used outside the
596   // coroutine body.
597   //
598   // Note that the user of coro.suspend may not be SwitchInst. However, this
599   // case seems too complex to handle. And it is harmless to skip these
600   // patterns since it just prevend putting the allocas to live in the same
601   // slot.
602   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
603   for (auto CoroSuspendInst : Shape.CoroSuspends) {
604     for (auto U : CoroSuspendInst->users()) {
605       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
606         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
607         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
608         SWI->setDefaultDest(SWI->getSuccessor(1));
609       }
610     }
611   }
612 
613   auto ExtractAllocas = [&]() {
614     AllocaSetType Allocas;
615     Allocas.reserve(FrameData.Allocas.size());
616     for (const auto &A : FrameData.Allocas)
617       Allocas.push_back(A.Alloca);
618     return Allocas;
619   };
620   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
621                                       StackLifetime::LivenessType::May);
622   StackLifetimeAnalyzer.run();
623   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
624     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
625         StackLifetimeAnalyzer.getLiveRange(AI2));
626   };
627   auto GetAllocaSize = [&](const AllocaInfo &A) {
628     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
629     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
630     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
631     return RetSize->getFixedSize();
632   };
633   // Put larger allocas in the front. So the larger allocas have higher
634   // priority to merge, which can save more space potentially. Also each
635   // AllocaSet would be ordered. So we can get the largest Alloca in one
636   // AllocaSet easily.
637   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
638     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
639   });
640   for (const auto &A : FrameData.Allocas) {
641     AllocaInst *Alloca = A.Alloca;
642     bool Merged = false;
643     // Try to find if the Alloca is not inferenced with any existing
644     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
645     // NonOverlappedAllocaSet.
646     for (auto &AllocaSet : NonOverlapedAllocas) {
647       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
648       bool NoInference = none_of(AllocaSet, [&](auto Iter) {
649         return IsAllocaInferenre(Alloca, Iter);
650       });
651       // If the alignment of A is multiple of the alignment of B, the address
652       // of A should satisfy the requirement for aligning for B.
653       //
654       // There may be other more fine-grained strategies to handle the alignment
655       // infomation during the merging process. But it seems hard to handle
656       // these strategies and benefit little.
657       bool Alignable = [&]() -> bool {
658         auto *LargestAlloca = *AllocaSet.begin();
659         return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
660                0;
661       }();
662       bool CouldMerge = NoInference && Alignable;
663       if (!CouldMerge)
664         continue;
665       AllocaSet.push_back(Alloca);
666       Merged = true;
667       break;
668     }
669     if (!Merged) {
670       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
671     }
672   }
673   // Recover the default target destination for each Switch statement
674   // reserved.
675   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
676     SwitchInst *SWI = SwitchAndDefaultDest.first;
677     BasicBlock *DestBB = SwitchAndDefaultDest.second;
678     SWI->setDefaultDest(DestBB);
679   }
680   // This Debug Info could tell us which allocas are merged into one slot.
681   LLVM_DEBUG(for (auto &AllocaSet
682                   : NonOverlapedAllocas) {
683     if (AllocaSet.size() > 1) {
684       dbgs() << "In Function:" << F.getName() << "\n";
685       dbgs() << "Find Union Set "
686              << "\n";
687       dbgs() << "\tAllocas are \n";
688       for (auto Alloca : AllocaSet)
689         dbgs() << "\t\t" << *Alloca << "\n";
690     }
691   });
692 }
693 
finish(StructType * Ty)694 void FrameTypeBuilder::finish(StructType *Ty) {
695   assert(!IsFinished && "already finished!");
696 
697   // Prepare the optimal-layout field array.
698   // The Id in the layout field is a pointer to our Field for it.
699   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
700   LayoutFields.reserve(Fields.size());
701   for (auto &Field : Fields) {
702     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
703                               Field.Offset);
704   }
705 
706   // Perform layout.
707   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
708   StructSize = SizeAndAlign.first;
709   StructAlign = SizeAndAlign.second;
710 
711   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
712     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
713   };
714 
715   // We need to produce a packed struct type if there's a field whose
716   // assigned offset isn't a multiple of its natural type alignment.
717   bool Packed = [&] {
718     for (auto &LayoutField : LayoutFields) {
719       auto &F = getField(LayoutField);
720       if (!isAligned(F.TyAlignment, LayoutField.Offset))
721         return true;
722     }
723     return false;
724   }();
725 
726   // Build the struct body.
727   SmallVector<Type*, 16> FieldTypes;
728   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
729   uint64_t LastOffset = 0;
730   for (auto &LayoutField : LayoutFields) {
731     auto &F = getField(LayoutField);
732 
733     auto Offset = LayoutField.Offset;
734 
735     // Add a padding field if there's a padding gap and we're either
736     // building a packed struct or the padding gap is more than we'd
737     // get from aligning to the field type's natural alignment.
738     assert(Offset >= LastOffset);
739     if (Offset != LastOffset) {
740       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
741         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
742                                             Offset - LastOffset));
743     }
744 
745     F.Offset = Offset;
746     F.LayoutFieldIndex = FieldTypes.size();
747 
748     FieldTypes.push_back(F.Ty);
749     LastOffset = Offset + F.Size;
750   }
751 
752   Ty->setBody(FieldTypes, Packed);
753 
754 #ifndef NDEBUG
755   // Check that the IR layout matches the offsets we expect.
756   auto Layout = DL.getStructLayout(Ty);
757   for (auto &F : Fields) {
758     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
759     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
760   }
761 #endif
762 
763   IsFinished = true;
764 }
765 
cacheDIVar(FrameDataInfo & FrameData,DenseMap<Value *,DILocalVariable * > & DIVarCache)766 static void cacheDIVar(FrameDataInfo &FrameData,
767                        DenseMap<Value *, DILocalVariable *> &DIVarCache) {
768   for (auto *V : FrameData.getAllDefs()) {
769     if (DIVarCache.find(V) != DIVarCache.end())
770       continue;
771 
772     auto DDIs = FindDbgDeclareUses(V);
773     auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
774       return DDI->getExpression()->getNumElements() == 0;
775     });
776     if (I != DDIs.end())
777       DIVarCache.insert({V, (*I)->getVariable()});
778   }
779 }
780 
781 /// Create name for Type. It uses MDString to store new created string to
782 /// avoid memory leak.
solveTypeName(Type * Ty)783 static StringRef solveTypeName(Type *Ty) {
784   if (Ty->isIntegerTy()) {
785     // The longest name in common may be '__int_128', which has 9 bits.
786     SmallString<16> Buffer;
787     raw_svector_ostream OS(Buffer);
788     OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
789     auto *MDName = MDString::get(Ty->getContext(), OS.str());
790     return MDName->getString();
791   }
792 
793   if (Ty->isFloatingPointTy()) {
794     if (Ty->isFloatTy())
795       return "__float_";
796     if (Ty->isDoubleTy())
797       return "__double_";
798     return "__floating_type_";
799   }
800 
801   if (Ty->isPointerTy()) {
802     auto *PtrTy = cast<PointerType>(Ty);
803     Type *PointeeTy = PtrTy->getElementType();
804     auto Name = solveTypeName(PointeeTy);
805     if (Name == "UnknownType")
806       return "PointerType";
807     SmallString<16> Buffer;
808     Twine(Name + "_Ptr").toStringRef(Buffer);
809     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
810     return MDName->getString();
811   }
812 
813   if (Ty->isStructTy()) {
814     if (!cast<StructType>(Ty)->hasName())
815       return "__LiteralStructType_";
816 
817     auto Name = Ty->getStructName();
818 
819     SmallString<16> Buffer(Name);
820     for_each(Buffer, [](auto &Iter) {
821       if (Iter == '.' || Iter == ':')
822         Iter = '_';
823     });
824     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
825     return MDName->getString();
826   }
827 
828   return "UnknownType";
829 }
830 
solveDIType(DIBuilder & Builder,Type * Ty,DataLayout & Layout,DIScope * Scope,unsigned LineNum,DenseMap<Type *,DIType * > & DITypeCache)831 static DIType *solveDIType(DIBuilder &Builder, Type *Ty, DataLayout &Layout,
832                            DIScope *Scope, unsigned LineNum,
833                            DenseMap<Type *, DIType *> &DITypeCache) {
834   if (DIType *DT = DITypeCache.lookup(Ty))
835     return DT;
836 
837   StringRef Name = solveTypeName(Ty);
838 
839   DIType *RetType = nullptr;
840 
841   if (Ty->isIntegerTy()) {
842     auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
843     RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
844                                       llvm::DINode::FlagArtificial);
845   } else if (Ty->isFloatingPointTy()) {
846     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
847                                       dwarf::DW_ATE_float,
848                                       llvm::DINode::FlagArtificial);
849   } else if (Ty->isPointerTy()) {
850     // Construct BasicType instead of PointerType to avoid infinite
851     // search problem.
852     // For example, we would be in trouble if we traverse recursively:
853     //
854     //  struct Node {
855     //      Node* ptr;
856     //  };
857     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
858                                       dwarf::DW_ATE_address,
859                                       llvm::DINode::FlagArtificial);
860   } else if (Ty->isStructTy()) {
861     auto *DIStruct = Builder.createStructType(
862         Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
863         Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr,
864         llvm::DINodeArray());
865 
866     auto *StructTy = cast<StructType>(Ty);
867     SmallVector<Metadata *, 16> Elements;
868     for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
869       DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
870                                  Scope, LineNum, DITypeCache);
871       assert(DITy);
872       Elements.push_back(Builder.createMemberType(
873           Scope, DITy->getName(), Scope->getFile(), LineNum,
874           DITy->getSizeInBits(), DITy->getAlignInBits(),
875           Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
876           llvm::DINode::FlagArtificial, DITy));
877     }
878 
879     Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
880 
881     RetType = DIStruct;
882   } else {
883     LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";);
884     SmallString<32> Buffer;
885     raw_svector_ostream OS(Buffer);
886     OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty);
887     RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty),
888                                       dwarf::DW_ATE_address,
889                                       llvm::DINode::FlagArtificial);
890   }
891 
892   DITypeCache.insert({Ty, RetType});
893   return RetType;
894 }
895 
896 /// Build artificial debug info for C++ coroutine frames to allow users to
897 /// inspect the contents of the frame directly
898 ///
899 /// Create Debug information for coroutine frame with debug name "__coro_frame".
900 /// The debug information for the fields of coroutine frame is constructed from
901 /// the following way:
902 /// 1. For all the value in the Frame, we search the use of dbg.declare to find
903 ///    the corresponding debug variables for the value. If we can find the
904 ///    debug variable, we can get full and accurate debug information.
905 /// 2. If we can't get debug information in step 1 and 2, we could only try to
906 ///    build the DIType by Type. We did this in solveDIType. We only handle
907 ///    integer, float, double, integer type and struct type for now.
buildFrameDebugInfo(Function & F,coro::Shape & Shape,FrameDataInfo & FrameData)908 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
909                                 FrameDataInfo &FrameData) {
910   DISubprogram *DIS = F.getSubprogram();
911   // If there is no DISubprogram for F, it implies the Function are not compiled
912   // with debug info. So we also don't need to generate debug info for the frame
913   // neither.
914   if (!DIS || !DIS->getUnit() ||
915       !dwarf::isCPlusPlus(
916           (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
917     return;
918 
919   assert(Shape.ABI == coro::ABI::Switch &&
920          "We could only build debug infomation for C++ coroutine now.\n");
921 
922   DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
923 
924   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
925   assert(PromiseAlloca &&
926          "Coroutine with switch ABI should own Promise alloca");
927 
928   TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca);
929   if (DIs.empty())
930     return;
931 
932   DbgDeclareInst *PromiseDDI = DIs.front();
933   DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
934   DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
935   DIFile *DFile = PromiseDIScope->getFile();
936   DILocation *DILoc = PromiseDDI->getDebugLoc().get();
937   unsigned LineNum = PromiseDIVariable->getLine();
938 
939   DICompositeType *FrameDITy = DBuilder.createStructType(
940       PromiseDIScope, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8,
941       Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
942       llvm::DINodeArray());
943   StructType *FrameTy = Shape.FrameTy;
944   SmallVector<Metadata *, 16> Elements;
945   DataLayout Layout = F.getParent()->getDataLayout();
946 
947   DenseMap<Value *, DILocalVariable *> DIVarCache;
948   cacheDIVar(FrameData, DIVarCache);
949 
950   unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
951   unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
952   unsigned IndexIndex = Shape.SwitchLowering.IndexField;
953 
954   DenseMap<unsigned, StringRef> NameCache;
955   NameCache.insert({ResumeIndex, "__resume_fn"});
956   NameCache.insert({DestroyIndex, "__destroy_fn"});
957   NameCache.insert({IndexIndex, "__coro_index"});
958 
959   Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
960        *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
961        *IndexTy = FrameTy->getElementType(IndexIndex);
962 
963   DenseMap<unsigned, DIType *> TyCache;
964   TyCache.insert({ResumeIndex,
965                   DBuilder.createBasicType("__resume_fn",
966                                            Layout.getTypeSizeInBits(ResumeFnTy),
967                                            dwarf::DW_ATE_address)});
968   TyCache.insert(
969       {DestroyIndex, DBuilder.createBasicType(
970                          "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy),
971                          dwarf::DW_ATE_address)});
972 
973   /// FIXME: If we fill the field `SizeInBits` with the actual size of
974   /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
975   TyCache.insert({IndexIndex, DBuilder.createBasicType(
976                                   "__coro_index",
977                                   (Layout.getTypeSizeInBits(IndexTy) < 8)
978                                       ? 8
979                                       : Layout.getTypeSizeInBits(IndexTy),
980                                   dwarf::DW_ATE_unsigned_char)});
981 
982   for (auto *V : FrameData.getAllDefs()) {
983     if (DIVarCache.find(V) == DIVarCache.end())
984       continue;
985 
986     auto Index = FrameData.getFieldIndex(V);
987 
988     NameCache.insert({Index, DIVarCache[V]->getName()});
989     TyCache.insert({Index, DIVarCache[V]->getType()});
990   }
991 
992   // Cache from index to (Align, Offset Pair)
993   DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
994   // The Align and Offset of Resume function and Destroy function are fixed.
995   OffsetCache.insert({ResumeIndex, {8, 0}});
996   OffsetCache.insert({DestroyIndex, {8, 8}});
997   OffsetCache.insert(
998       {IndexIndex,
999        {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
1000 
1001   for (auto *V : FrameData.getAllDefs()) {
1002     auto Index = FrameData.getFieldIndex(V);
1003 
1004     OffsetCache.insert(
1005         {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}});
1006   }
1007 
1008   DenseMap<Type *, DIType *> DITypeCache;
1009   // This counter is used to avoid same type names. e.g., there would be
1010   // many i32 and i64 types in one coroutine. And we would use i32_0 and
1011   // i32_1 to avoid the same type. Since it makes no sense the name of the
1012   // fields confilicts with each other.
1013   unsigned UnknownTypeNum = 0;
1014   for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1015     if (OffsetCache.find(Index) == OffsetCache.end())
1016       continue;
1017 
1018     std::string Name;
1019     uint64_t SizeInBits;
1020     uint32_t AlignInBits;
1021     uint64_t OffsetInBits;
1022     DIType *DITy = nullptr;
1023 
1024     Type *Ty = FrameTy->getElementType(Index);
1025     assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1026     SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize();
1027     AlignInBits = OffsetCache[Index].first * 8;
1028     OffsetInBits = OffsetCache[Index].second * 8;
1029 
1030     if (NameCache.find(Index) != NameCache.end()) {
1031       Name = NameCache[Index].str();
1032       DITy = TyCache[Index];
1033     } else {
1034       DITy = solveDIType(DBuilder, Ty, Layout, PromiseDIScope, LineNum,
1035                          DITypeCache);
1036       assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1037       Name = DITy->getName().str();
1038       Name += "_" + std::to_string(UnknownTypeNum);
1039       UnknownTypeNum++;
1040     }
1041 
1042     Elements.push_back(DBuilder.createMemberType(
1043         PromiseDIScope, Name, DFile, LineNum, SizeInBits, AlignInBits,
1044         OffsetInBits, llvm::DINode::FlagArtificial, DITy));
1045   }
1046 
1047   DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1048 
1049   auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1050                                                  DFile, LineNum, FrameDITy,
1051                                                  true, DINode::FlagArtificial);
1052   assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));
1053 
1054   // Subprogram would have ContainedNodes field which records the debug
1055   // variables it contained. So we need to add __coro_frame to the
1056   // ContainedNodes of it.
1057   //
1058   // If we don't add __coro_frame to the RetainedNodes, user may get
1059   // `no symbol __coro_frame in context` rather than `__coro_frame`
1060   // is optimized out, which is more precise.
1061   if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1062     auto RetainedNodes = SubProgram->getRetainedNodes();
1063     SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1064                                                  RetainedNodes.end());
1065     RetainedNodesVec.push_back(FrameDIVar);
1066     SubProgram->replaceOperandWith(
1067         7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1068   }
1069 
1070   DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1071                          DBuilder.createExpression(), DILoc,
1072                          Shape.FramePtr->getNextNode());
1073 }
1074 
1075 // Build a struct that will keep state for an active coroutine.
1076 //   struct f.frame {
1077 //     ResumeFnTy ResumeFnAddr;
1078 //     ResumeFnTy DestroyFnAddr;
1079 //     int ResumeIndex;
1080 //     ... promise (if present) ...
1081 //     ... spills ...
1082 //   };
buildFrameType(Function & F,coro::Shape & Shape,FrameDataInfo & FrameData)1083 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
1084                                   FrameDataInfo &FrameData) {
1085   LLVMContext &C = F.getContext();
1086   const DataLayout &DL = F.getParent()->getDataLayout();
1087   StructType *FrameTy = [&] {
1088     SmallString<32> Name(F.getName());
1089     Name.append(".Frame");
1090     return StructType::create(C, Name);
1091   }();
1092 
1093   FrameTypeBuilder B(C, DL);
1094 
1095   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1096   Optional<FieldIDType> SwitchIndexFieldId;
1097 
1098   if (Shape.ABI == coro::ABI::Switch) {
1099     auto *FramePtrTy = FrameTy->getPointerTo();
1100     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
1101                                    /*IsVarArg=*/false);
1102     auto *FnPtrTy = FnTy->getPointerTo();
1103 
1104     // Add header fields for the resume and destroy functions.
1105     // We can rely on these being perfectly packed.
1106     (void)B.addField(FnPtrTy, None, /*header*/ true);
1107     (void)B.addField(FnPtrTy, None, /*header*/ true);
1108 
1109     // PromiseAlloca field needs to be explicitly added here because it's
1110     // a header field with a fixed offset based on its alignment. Hence it
1111     // needs special handling and cannot be added to FrameData.Allocas.
1112     if (PromiseAlloca)
1113       FrameData.setFieldIndex(
1114           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1115 
1116     // Add a field to store the suspend index.  This doesn't need to
1117     // be in the header.
1118     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1119     Type *IndexType = Type::getIntNTy(C, IndexBits);
1120 
1121     SwitchIndexFieldId = B.addField(IndexType, None);
1122   } else {
1123     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1124   }
1125 
1126   // Because multiple allocas may own the same field slot,
1127   // we add allocas to field here.
1128   B.addFieldForAllocas(F, FrameData, Shape);
1129   // Add PromiseAlloca to Allocas list so that
1130   // 1. updateLayoutIndex could update its index after
1131   // `performOptimizedStructLayout`
1132   // 2. it is processed in insertSpills.
1133   if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1134     // We assume that the promise alloca won't be modified before
1135     // CoroBegin and no alias will be create before CoroBegin.
1136     FrameData.Allocas.emplace_back(
1137         PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
1138   // Create an entry for every spilled value.
1139   for (auto &S : FrameData.Spills) {
1140     FieldIDType Id = B.addField(S.first->getType(), None);
1141     FrameData.setFieldIndex(S.first, Id);
1142   }
1143 
1144   B.finish(FrameTy);
1145   FrameData.updateLayoutIndex(B);
1146   Shape.FrameAlign = B.getStructAlign();
1147   Shape.FrameSize = B.getStructSize();
1148 
1149   switch (Shape.ABI) {
1150   case coro::ABI::Switch: {
1151     // In the switch ABI, remember the switch-index field.
1152     auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1153     Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1154     Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1155     Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1156 
1157     // Also round the frame size up to a multiple of its alignment, as is
1158     // generally expected in C/C++.
1159     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1160     break;
1161   }
1162 
1163   // In the retcon ABI, remember whether the frame is inline in the storage.
1164   case coro::ABI::Retcon:
1165   case coro::ABI::RetconOnce: {
1166     auto Id = Shape.getRetconCoroId();
1167     Shape.RetconLowering.IsFrameInlineInStorage
1168       = (B.getStructSize() <= Id->getStorageSize() &&
1169          B.getStructAlign() <= Id->getStorageAlignment());
1170     break;
1171   }
1172   case coro::ABI::Async: {
1173     Shape.AsyncLowering.FrameOffset =
1174         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
1175     // Also make the final context size a multiple of the context alignment to
1176     // make allocation easier for allocators.
1177     Shape.AsyncLowering.ContextSize =
1178         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
1179                 Shape.AsyncLowering.getContextAlignment());
1180     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1181       report_fatal_error(
1182           "The alignment requirment of frame variables cannot be higher than "
1183           "the alignment of the async function context");
1184     }
1185     break;
1186   }
1187   }
1188 
1189   return FrameTy;
1190 }
1191 
1192 // We use a pointer use visitor to track how an alloca is being used.
1193 // The goal is to be able to answer the following three questions:
1194 // 1. Should this alloca be allocated on the frame instead.
1195 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
1196 // require copying the data from alloca to the frame after CoroBegin.
1197 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
1198 // after CoroBegin. In that case, we will need to recreate the alias after
1199 // CoroBegin based off the frame. To answer question 1, we track two things:
1200 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
1201 //   the alloca. In the end, we check if there exists any two basic blocks that
1202 //   cross suspension points. If so, this alloca must be put on the frame. b.
1203 //   Whether the alloca or any alias of the alloca is escaped at some point,
1204 //   either by storing the address somewhere, or the address is used in a
1205 //   function call that might capture. If it's ever escaped, this alloca must be
1206 //   put on the frame conservatively.
1207 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1208 // Whenever a potential write happens, either through a store instruction, a
1209 // function call or any of the memory intrinsics, we check whether this
1210 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
1211 // of all aliases created for the alloca prior to CoroBegin but used after
1212 // CoroBegin. llvm::Optional is used to be able to represent the case when the
1213 // offset is unknown (e.g. when you have a PHINode that takes in different
1214 // offset values). We cannot handle unknown offsets and will assert. This is the
1215 // potential issue left out. An ideal solution would likely require a
1216 // significant redesign.
1217 namespace {
1218 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1219   using Base = PtrUseVisitor<AllocaUseVisitor>;
AllocaUseVisitor__anon6bb6f9a71411::AllocaUseVisitor1220   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1221                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker)
1222       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {}
1223 
visit__anon6bb6f9a71411::AllocaUseVisitor1224   void visit(Instruction &I) {
1225     Users.insert(&I);
1226     Base::visit(I);
1227     // If the pointer is escaped prior to CoroBegin, we have to assume it would
1228     // be written into before CoroBegin as well.
1229     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1230       MayWriteBeforeCoroBegin = true;
1231     }
1232   }
1233   // We need to provide this overload as PtrUseVisitor uses a pointer based
1234   // visiting function.
visit__anon6bb6f9a71411::AllocaUseVisitor1235   void visit(Instruction *I) { return visit(*I); }
1236 
visitPHINode__anon6bb6f9a71411::AllocaUseVisitor1237   void visitPHINode(PHINode &I) {
1238     enqueueUsers(I);
1239     handleAlias(I);
1240   }
1241 
visitSelectInst__anon6bb6f9a71411::AllocaUseVisitor1242   void visitSelectInst(SelectInst &I) {
1243     enqueueUsers(I);
1244     handleAlias(I);
1245   }
1246 
visitStoreInst__anon6bb6f9a71411::AllocaUseVisitor1247   void visitStoreInst(StoreInst &SI) {
1248     // Regardless whether the alias of the alloca is the value operand or the
1249     // pointer operand, we need to assume the alloca is been written.
1250     handleMayWrite(SI);
1251 
1252     if (SI.getValueOperand() != U->get())
1253       return;
1254 
1255     // We are storing the pointer into a memory location, potentially escaping.
1256     // As an optimization, we try to detect simple cases where it doesn't
1257     // actually escape, for example:
1258     //   %ptr = alloca ..
1259     //   %addr = alloca ..
1260     //   store %ptr, %addr
1261     //   %x = load %addr
1262     //   ..
1263     // If %addr is only used by loading from it, we could simply treat %x as
1264     // another alias of %ptr, and not considering %ptr being escaped.
1265     auto IsSimpleStoreThenLoad = [&]() {
1266       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1267       // If the memory location we are storing to is not an alloca, it
1268       // could be an alias of some other memory locations, which is difficult
1269       // to analyze.
1270       if (!AI)
1271         return false;
1272       // StoreAliases contains aliases of the memory location stored into.
1273       SmallVector<Instruction *, 4> StoreAliases = {AI};
1274       while (!StoreAliases.empty()) {
1275         Instruction *I = StoreAliases.pop_back_val();
1276         for (User *U : I->users()) {
1277           // If we are loading from the memory location, we are creating an
1278           // alias of the original pointer.
1279           if (auto *LI = dyn_cast<LoadInst>(U)) {
1280             enqueueUsers(*LI);
1281             handleAlias(*LI);
1282             continue;
1283           }
1284           // If we are overriding the memory location, the pointer certainly
1285           // won't escape.
1286           if (auto *S = dyn_cast<StoreInst>(U))
1287             if (S->getPointerOperand() == I)
1288               continue;
1289           if (auto *II = dyn_cast<IntrinsicInst>(U))
1290             if (II->isLifetimeStartOrEnd())
1291               continue;
1292           // BitCastInst creats aliases of the memory location being stored
1293           // into.
1294           if (auto *BI = dyn_cast<BitCastInst>(U)) {
1295             StoreAliases.push_back(BI);
1296             continue;
1297           }
1298           return false;
1299         }
1300       }
1301 
1302       return true;
1303     };
1304 
1305     if (!IsSimpleStoreThenLoad())
1306       PI.setEscaped(&SI);
1307   }
1308 
1309   // All mem intrinsics modify the data.
visitMemIntrinsic__anon6bb6f9a71411::AllocaUseVisitor1310   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1311 
visitBitCastInst__anon6bb6f9a71411::AllocaUseVisitor1312   void visitBitCastInst(BitCastInst &BC) {
1313     Base::visitBitCastInst(BC);
1314     handleAlias(BC);
1315   }
1316 
visitAddrSpaceCastInst__anon6bb6f9a71411::AllocaUseVisitor1317   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
1318     Base::visitAddrSpaceCastInst(ASC);
1319     handleAlias(ASC);
1320   }
1321 
visitGetElementPtrInst__anon6bb6f9a71411::AllocaUseVisitor1322   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1323     // The base visitor will adjust Offset accordingly.
1324     Base::visitGetElementPtrInst(GEPI);
1325     handleAlias(GEPI);
1326   }
1327 
visitIntrinsicInst__anon6bb6f9a71411::AllocaUseVisitor1328   void visitIntrinsicInst(IntrinsicInst &II) {
1329     if (II.getIntrinsicID() != Intrinsic::lifetime_start)
1330       return Base::visitIntrinsicInst(II);
1331     LifetimeStarts.insert(&II);
1332   }
1333 
visitCallBase__anon6bb6f9a71411::AllocaUseVisitor1334   void visitCallBase(CallBase &CB) {
1335     for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op)
1336       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1337         PI.setEscaped(&CB);
1338     handleMayWrite(CB);
1339   }
1340 
getShouldLiveOnFrame__anon6bb6f9a71411::AllocaUseVisitor1341   bool getShouldLiveOnFrame() const {
1342     if (!ShouldLiveOnFrame)
1343       ShouldLiveOnFrame = computeShouldLiveOnFrame();
1344     return ShouldLiveOnFrame.getValue();
1345   }
1346 
getMayWriteBeforeCoroBegin__anon6bb6f9a71411::AllocaUseVisitor1347   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1348 
getAliasesCopy__anon6bb6f9a71411::AllocaUseVisitor1349   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
1350     assert(getShouldLiveOnFrame() && "This method should only be called if the "
1351                                      "alloca needs to live on the frame.");
1352     for (const auto &P : AliasOffetMap)
1353       if (!P.second)
1354         report_fatal_error("Unable to handle an alias with unknown offset "
1355                            "created before CoroBegin.");
1356     return AliasOffetMap;
1357   }
1358 
1359 private:
1360   const DominatorTree &DT;
1361   const CoroBeginInst &CoroBegin;
1362   const SuspendCrossingInfo &Checker;
1363   // All alias to the original AllocaInst, created before CoroBegin and used
1364   // after CoroBegin. Each entry contains the instruction and the offset in the
1365   // original Alloca. They need to be recreated after CoroBegin off the frame.
1366   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1367   SmallPtrSet<Instruction *, 4> Users{};
1368   SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1369   bool MayWriteBeforeCoroBegin{false};
1370 
1371   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1372 
computeShouldLiveOnFrame__anon6bb6f9a71411::AllocaUseVisitor1373   bool computeShouldLiveOnFrame() const {
1374     // If lifetime information is available, we check it first since it's
1375     // more precise. We look at every pair of lifetime.start intrinsic and
1376     // every basic block that uses the pointer to see if they cross suspension
1377     // points. The uses cover both direct uses as well as indirect uses.
1378     if (!LifetimeStarts.empty()) {
1379       for (auto *I : Users)
1380         for (auto *S : LifetimeStarts)
1381           if (Checker.isDefinitionAcrossSuspend(*S, I))
1382             return true;
1383       return false;
1384     }
1385     // FIXME: Ideally the isEscaped check should come at the beginning.
1386     // However there are a few loose ends that need to be fixed first before
1387     // we can do that. We need to make sure we are not over-conservative, so
1388     // that the data accessed in-between await_suspend and symmetric transfer
1389     // is always put on the stack, and also data accessed after coro.end is
1390     // always put on the stack (esp the return object). To fix that, we need
1391     // to:
1392     //  1) Potentially treat sret as nocapture in calls
1393     //  2) Special handle the return object and put it on the stack
1394     //  3) Utilize lifetime.end intrinsic
1395     if (PI.isEscaped())
1396       return true;
1397 
1398     for (auto *U1 : Users)
1399       for (auto *U2 : Users)
1400         if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1401           return true;
1402 
1403     return false;
1404   }
1405 
handleMayWrite__anon6bb6f9a71411::AllocaUseVisitor1406   void handleMayWrite(const Instruction &I) {
1407     if (!DT.dominates(&CoroBegin, &I))
1408       MayWriteBeforeCoroBegin = true;
1409   }
1410 
usedAfterCoroBegin__anon6bb6f9a71411::AllocaUseVisitor1411   bool usedAfterCoroBegin(Instruction &I) {
1412     for (auto &U : I.uses())
1413       if (DT.dominates(&CoroBegin, U))
1414         return true;
1415     return false;
1416   }
1417 
handleAlias__anon6bb6f9a71411::AllocaUseVisitor1418   void handleAlias(Instruction &I) {
1419     // We track all aliases created prior to CoroBegin but used after.
1420     // These aliases may need to be recreated after CoroBegin if the alloca
1421     // need to live on the frame.
1422     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1423       return;
1424 
1425     if (!IsOffsetKnown) {
1426       AliasOffetMap[&I].reset();
1427     } else {
1428       auto Itr = AliasOffetMap.find(&I);
1429       if (Itr == AliasOffetMap.end()) {
1430         AliasOffetMap[&I] = Offset;
1431       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1432         // If we have seen two different possible values for this alias, we set
1433         // it to empty.
1434         AliasOffetMap[&I].reset();
1435       }
1436     }
1437   }
1438 };
1439 } // namespace
1440 
1441 // We need to make room to insert a spill after initial PHIs, but before
1442 // catchswitch instruction. Placing it before violates the requirement that
1443 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1444 //
1445 // Split away catchswitch into a separate block and insert in its place:
1446 //
1447 //   cleanuppad <InsertPt> cleanupret.
1448 //
1449 // cleanupret instruction will act as an insert point for the spill.
splitBeforeCatchSwitch(CatchSwitchInst * CatchSwitch)1450 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1451   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1452   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1453   CurrentBlock->getTerminator()->eraseFromParent();
1454 
1455   auto *CleanupPad =
1456       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1457   auto *CleanupRet =
1458       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1459   return CleanupRet;
1460 }
1461 
createFramePtr(coro::Shape & Shape)1462 static void createFramePtr(coro::Shape &Shape) {
1463   auto *CB = Shape.CoroBegin;
1464   IRBuilder<> Builder(CB->getNextNode());
1465   StructType *FrameTy = Shape.FrameTy;
1466   PointerType *FramePtrTy = FrameTy->getPointerTo();
1467   Shape.FramePtr =
1468       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1469 }
1470 
1471 // Replace all alloca and SSA values that are accessed across suspend points
1472 // with GetElementPointer from coroutine frame + loads and stores. Create an
1473 // AllocaSpillBB that will become the new entry block for the resume parts of
1474 // the coroutine:
1475 //
1476 //    %hdl = coro.begin(...)
1477 //    whatever
1478 //
1479 // becomes:
1480 //
1481 //    %hdl = coro.begin(...)
1482 //    %FramePtr = bitcast i8* hdl to %f.frame*
1483 //    br label %AllocaSpillBB
1484 //
1485 //  AllocaSpillBB:
1486 //    ; geps corresponding to allocas that were moved to coroutine frame
1487 //    br label PostSpill
1488 //
1489 //  PostSpill:
1490 //    whatever
1491 //
1492 //
insertSpills(const FrameDataInfo & FrameData,coro::Shape & Shape)1493 static Instruction *insertSpills(const FrameDataInfo &FrameData,
1494                                  coro::Shape &Shape) {
1495   auto *CB = Shape.CoroBegin;
1496   LLVMContext &C = CB->getContext();
1497   IRBuilder<> Builder(C);
1498   StructType *FrameTy = Shape.FrameTy;
1499   Instruction *FramePtr = Shape.FramePtr;
1500   DominatorTree DT(*CB->getFunction());
1501   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1502 
1503   // Create a GEP with the given index into the coroutine frame for the original
1504   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1505   // original type.
1506   auto GetFramePointer = [&](Value *Orig) -> Value * {
1507     FieldIDType Index = FrameData.getFieldIndex(Orig);
1508     SmallVector<Value *, 3> Indices = {
1509         ConstantInt::get(Type::getInt32Ty(C), 0),
1510         ConstantInt::get(Type::getInt32Ty(C), Index),
1511     };
1512 
1513     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1514       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1515         auto Count = CI->getValue().getZExtValue();
1516         if (Count > 1) {
1517           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1518         }
1519       } else {
1520         report_fatal_error("Coroutines cannot handle non static allocas yet");
1521       }
1522     }
1523 
1524     auto GEP = cast<GetElementPtrInst>(
1525         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1526     if (isa<AllocaInst>(Orig)) {
1527       // If the type of GEP is not equal to the type of AllocaInst, it implies
1528       // that the AllocaInst may be reused in the Frame slot of other
1529       // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1530       // the Frame storage.
1531       //
1532       // Note: If we change the strategy dealing with alignment, we need to refine
1533       // this casting.
1534       if (GEP->getResultElementType() != Orig->getType())
1535         return Builder.CreateBitCast(GEP, Orig->getType(),
1536                                      Orig->getName() + Twine(".cast"));
1537     }
1538     return GEP;
1539   };
1540 
1541   for (auto const &E : FrameData.Spills) {
1542     Value *Def = E.first;
1543     // Create a store instruction storing the value into the
1544     // coroutine frame.
1545     Instruction *InsertPt = nullptr;
1546     if (auto *Arg = dyn_cast<Argument>(Def)) {
1547       // For arguments, we will place the store instruction right after
1548       // the coroutine frame pointer instruction, i.e. bitcast of
1549       // coro.begin from i8* to %f.frame*.
1550       InsertPt = FramePtr->getNextNode();
1551 
1552       // If we're spilling an Argument, make sure we clear 'nocapture'
1553       // from the coroutine function.
1554       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1555 
1556     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1557       // Don't spill immediately after a suspend; splitting assumes
1558       // that the suspend will be followed by a branch.
1559       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1560     } else {
1561       auto *I = cast<Instruction>(Def);
1562       if (!DT.dominates(CB, I)) {
1563         // If it is not dominated by CoroBegin, then spill should be
1564         // inserted immediately after CoroFrame is computed.
1565         InsertPt = FramePtr->getNextNode();
1566       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1567         // If we are spilling the result of the invoke instruction, split
1568         // the normal edge and insert the spill in the new block.
1569         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1570         InsertPt = NewBB->getTerminator();
1571       } else if (isa<PHINode>(I)) {
1572         // Skip the PHINodes and EH pads instructions.
1573         BasicBlock *DefBlock = I->getParent();
1574         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1575           InsertPt = splitBeforeCatchSwitch(CSI);
1576         else
1577           InsertPt = &*DefBlock->getFirstInsertionPt();
1578       } else {
1579         assert(!I->isTerminator() && "unexpected terminator");
1580         // For all other values, the spill is placed immediately after
1581         // the definition.
1582         InsertPt = I->getNextNode();
1583       }
1584     }
1585 
1586     auto Index = FrameData.getFieldIndex(Def);
1587     Builder.SetInsertPoint(InsertPt);
1588     auto *G = Builder.CreateConstInBoundsGEP2_32(
1589         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1590     Builder.CreateStore(Def, G);
1591 
1592     BasicBlock *CurrentBlock = nullptr;
1593     Value *CurrentReload = nullptr;
1594     for (auto *U : E.second) {
1595       // If we have not seen the use block, create a load instruction to reload
1596       // the spilled value from the coroutine frame. Populates the Value pointer
1597       // reference provided with the frame GEP.
1598       if (CurrentBlock != U->getParent()) {
1599         CurrentBlock = U->getParent();
1600         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1601 
1602         auto *GEP = GetFramePointer(E.first);
1603         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1604         CurrentReload = Builder.CreateLoad(
1605             FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1606             E.first->getName() + Twine(".reload"));
1607 
1608         TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1609         for (DbgDeclareInst *DDI : DIs) {
1610           bool AllowUnresolved = false;
1611           // This dbg.declare is preserved for all coro-split function
1612           // fragments. It will be unreachable in the main function, and
1613           // processed by coro::salvageDebugInfo() by CoroCloner.
1614           DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1615               .insertDeclare(CurrentReload, DDI->getVariable(),
1616                              DDI->getExpression(), DDI->getDebugLoc(),
1617                              &*Builder.GetInsertPoint());
1618           // This dbg.declare is for the main function entry point.  It
1619           // will be deleted in all coro-split functions.
1620           coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.ReuseFrameSlot);
1621         }
1622       }
1623 
1624       // If we have a single edge PHINode, remove it and replace it with a
1625       // reload from the coroutine frame. (We already took care of multi edge
1626       // PHINodes by rewriting them in the rewritePHIs function).
1627       if (auto *PN = dyn_cast<PHINode>(U)) {
1628         assert(PN->getNumIncomingValues() == 1 &&
1629                "unexpected number of incoming "
1630                "values in the PHINode");
1631         PN->replaceAllUsesWith(CurrentReload);
1632         PN->eraseFromParent();
1633         continue;
1634       }
1635 
1636       // Replace all uses of CurrentValue in the current instruction with
1637       // reload.
1638       U->replaceUsesOfWith(Def, CurrentReload);
1639     }
1640   }
1641 
1642   BasicBlock *FramePtrBB = FramePtr->getParent();
1643 
1644   auto SpillBlock =
1645       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
1646   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1647   Shape.AllocaSpillBlock = SpillBlock;
1648 
1649   // retcon and retcon.once lowering assumes all uses have been sunk.
1650   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1651       Shape.ABI == coro::ABI::Async) {
1652     // If we found any allocas, replace all of their remaining uses with Geps.
1653     Builder.SetInsertPoint(&SpillBlock->front());
1654     for (const auto &P : FrameData.Allocas) {
1655       AllocaInst *Alloca = P.Alloca;
1656       auto *G = GetFramePointer(Alloca);
1657 
1658       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1659       // here, as we are changing location of the instruction.
1660       G->takeName(Alloca);
1661       Alloca->replaceAllUsesWith(G);
1662       Alloca->eraseFromParent();
1663     }
1664     return FramePtr;
1665   }
1666 
1667   // If we found any alloca, replace all of their remaining uses with GEP
1668   // instructions. To remain debugbility, we replace the uses of allocas for
1669   // dbg.declares and dbg.values with the reload from the frame.
1670   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1671   // as some of the uses may not be dominated by CoroBegin.
1672   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1673   SmallVector<Instruction *, 4> UsersToUpdate;
1674   for (const auto &A : FrameData.Allocas) {
1675     AllocaInst *Alloca = A.Alloca;
1676     UsersToUpdate.clear();
1677     for (User *U : Alloca->users()) {
1678       auto *I = cast<Instruction>(U);
1679       if (DT.dominates(CB, I))
1680         UsersToUpdate.push_back(I);
1681     }
1682     if (UsersToUpdate.empty())
1683       continue;
1684     auto *G = GetFramePointer(Alloca);
1685     G->setName(Alloca->getName() + Twine(".reload.addr"));
1686 
1687     SmallVector<DbgVariableIntrinsic *, 4> DIs;
1688     findDbgUsers(DIs, Alloca);
1689     for (auto *DVI : DIs)
1690       DVI->replaceUsesOfWith(Alloca, G);
1691 
1692     for (Instruction *I : UsersToUpdate)
1693       I->replaceUsesOfWith(Alloca, G);
1694   }
1695   Builder.SetInsertPoint(FramePtr->getNextNode());
1696   for (const auto &A : FrameData.Allocas) {
1697     AllocaInst *Alloca = A.Alloca;
1698     if (A.MayWriteBeforeCoroBegin) {
1699       // isEscaped really means potentially modified before CoroBegin.
1700       if (Alloca->isArrayAllocation())
1701         report_fatal_error(
1702             "Coroutines cannot handle copying of array allocas yet");
1703 
1704       auto *G = GetFramePointer(Alloca);
1705       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1706       Builder.CreateStore(Value, G);
1707     }
1708     // For each alias to Alloca created before CoroBegin but used after
1709     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1710     // to the pointer in the frame.
1711     for (const auto &Alias : A.Aliases) {
1712       auto *FramePtr = GetFramePointer(Alloca);
1713       auto *FramePtrRaw =
1714           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1715       auto *AliasPtr = Builder.CreateGEP(
1716           FramePtrRaw,
1717           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1718       auto *AliasPtrTyped =
1719           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1720       Alias.first->replaceUsesWithIf(
1721           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1722     }
1723   }
1724   return FramePtr;
1725 }
1726 
1727 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1728 // PHI in InsertedBB.
movePHIValuesToInsertedBlock(BasicBlock * SuccBB,BasicBlock * InsertedBB,BasicBlock * PredBB,PHINode * UntilPHI=nullptr)1729 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1730                                          BasicBlock *InsertedBB,
1731                                          BasicBlock *PredBB,
1732                                          PHINode *UntilPHI = nullptr) {
1733   auto *PN = cast<PHINode>(&SuccBB->front());
1734   do {
1735     int Index = PN->getBasicBlockIndex(InsertedBB);
1736     Value *V = PN->getIncomingValue(Index);
1737     PHINode *InputV = PHINode::Create(
1738         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1739         &InsertedBB->front());
1740     InputV->addIncoming(V, PredBB);
1741     PN->setIncomingValue(Index, InputV);
1742     PN = dyn_cast<PHINode>(PN->getNextNode());
1743   } while (PN != UntilPHI);
1744 }
1745 
1746 // Rewrites the PHI Nodes in a cleanuppad.
rewritePHIsForCleanupPad(BasicBlock * CleanupPadBB,CleanupPadInst * CleanupPad)1747 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1748                                      CleanupPadInst *CleanupPad) {
1749   // For every incoming edge to a CleanupPad we will create a new block holding
1750   // all incoming values in single-value PHI nodes. We will then create another
1751   // block to act as a dispather (as all unwind edges for related EH blocks
1752   // must be the same).
1753   //
1754   // cleanuppad:
1755   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1756   //    %3 = cleanuppad within none []
1757   //
1758   // It will create:
1759   //
1760   // cleanuppad.corodispatch
1761   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1762   //    %3 = cleanuppad within none []
1763   //    switch i8 % 2, label %unreachable
1764   //            [i8 0, label %cleanuppad.from.catchswitch
1765   //             i8 1, label %cleanuppad.from.catch.1]
1766   // cleanuppad.from.catchswitch:
1767   //    %4 = phi i32 [%0, %catchswitch]
1768   //    br %label cleanuppad
1769   // cleanuppad.from.catch.1:
1770   //    %6 = phi i32 [%1, %catch.1]
1771   //    br %label cleanuppad
1772   // cleanuppad:
1773   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1774   //                 [%6, %cleanuppad.from.catch.1]
1775 
1776   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1777   auto *UnreachBB = BasicBlock::Create(
1778       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1779   IRBuilder<> Builder(UnreachBB);
1780   Builder.CreateUnreachable();
1781 
1782   // Create a new cleanuppad which will be the dispatcher.
1783   auto *NewCleanupPadBB =
1784       BasicBlock::Create(CleanupPadBB->getContext(),
1785                          CleanupPadBB->getName() + Twine(".corodispatch"),
1786                          CleanupPadBB->getParent(), CleanupPadBB);
1787   Builder.SetInsertPoint(NewCleanupPadBB);
1788   auto *SwitchType = Builder.getInt8Ty();
1789   auto *SetDispatchValuePN =
1790       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1791   CleanupPad->removeFromParent();
1792   CleanupPad->insertAfter(SetDispatchValuePN);
1793   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1794                                                 pred_size(CleanupPadBB));
1795 
1796   int SwitchIndex = 0;
1797   SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1798   for (BasicBlock *Pred : Preds) {
1799     // Create a new cleanuppad and move the PHI values to there.
1800     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1801                                       CleanupPadBB->getName() +
1802                                           Twine(".from.") + Pred->getName(),
1803                                       CleanupPadBB->getParent(), CleanupPadBB);
1804     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1805     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1806                     Pred->getName());
1807     Builder.SetInsertPoint(CaseBB);
1808     Builder.CreateBr(CleanupPadBB);
1809     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1810 
1811     // Update this Pred to the new unwind point.
1812     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1813 
1814     // Setup the switch in the dispatcher.
1815     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1816     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1817     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1818     SwitchIndex++;
1819   }
1820 }
1821 
rewritePHIs(BasicBlock & BB)1822 static void rewritePHIs(BasicBlock &BB) {
1823   // For every incoming edge we will create a block holding all
1824   // incoming values in a single PHI nodes.
1825   //
1826   // loop:
1827   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1828   //
1829   // It will create:
1830   //
1831   // loop.from.entry:
1832   //    %n.loop.pre = phi i32 [%n, %entry]
1833   //    br %label loop
1834   // loop.from.loop:
1835   //    %inc.loop.pre = phi i32 [%inc, %loop]
1836   //    br %label loop
1837   //
1838   // After this rewrite, further analysis will ignore any phi nodes with more
1839   // than one incoming edge.
1840 
1841   // TODO: Simplify PHINodes in the basic block to remove duplicate
1842   // predecessors.
1843 
1844   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1845   // so we need to create an additional "dispatcher" block.
1846   if (auto *CleanupPad =
1847           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1848     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1849     for (BasicBlock *Pred : Preds) {
1850       if (CatchSwitchInst *CS =
1851               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1852         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1853         // unwind destination that needs to be handle specially.
1854         assert(CS->getUnwindDest() == &BB);
1855         (void)CS;
1856         rewritePHIsForCleanupPad(&BB, CleanupPad);
1857         return;
1858       }
1859     }
1860   }
1861 
1862   LandingPadInst *LandingPad = nullptr;
1863   PHINode *ReplPHI = nullptr;
1864   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1865     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1866     // We replace the original landing pad with a PHINode that will collect the
1867     // results from all of them.
1868     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1869     ReplPHI->takeName(LandingPad);
1870     LandingPad->replaceAllUsesWith(ReplPHI);
1871     // We will erase the original landing pad at the end of this function after
1872     // ehAwareSplitEdge cloned it in the transition blocks.
1873   }
1874 
1875   SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1876   for (BasicBlock *Pred : Preds) {
1877     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1878     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1879 
1880     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1881     // that replaced the landing pad.
1882     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1883   }
1884 
1885   if (LandingPad) {
1886     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1887     // No longer need it.
1888     LandingPad->eraseFromParent();
1889   }
1890 }
1891 
rewritePHIs(Function & F)1892 static void rewritePHIs(Function &F) {
1893   SmallVector<BasicBlock *, 8> WorkList;
1894 
1895   for (BasicBlock &BB : F)
1896     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1897       if (PN->getNumIncomingValues() > 1)
1898         WorkList.push_back(&BB);
1899 
1900   for (BasicBlock *BB : WorkList)
1901     rewritePHIs(*BB);
1902 }
1903 
1904 // Check for instructions that we can recreate on resume as opposed to spill
1905 // the result into a coroutine frame.
materializable(Instruction & V)1906 static bool materializable(Instruction &V) {
1907   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1908          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1909 }
1910 
1911 // Check for structural coroutine intrinsics that should not be spilled into
1912 // the coroutine frame.
isCoroutineStructureIntrinsic(Instruction & I)1913 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1914   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1915          isa<CoroSuspendInst>(&I);
1916 }
1917 
1918 // For every use of the value that is across suspend point, recreate that value
1919 // after a suspend point.
rewriteMaterializableInstructions(IRBuilder<> & IRB,const SpillInfo & Spills)1920 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1921                                               const SpillInfo &Spills) {
1922   for (const auto &E : Spills) {
1923     Value *Def = E.first;
1924     BasicBlock *CurrentBlock = nullptr;
1925     Instruction *CurrentMaterialization = nullptr;
1926     for (Instruction *U : E.second) {
1927       // If we have not seen this block, materialize the value.
1928       if (CurrentBlock != U->getParent()) {
1929         CurrentBlock = U->getParent();
1930         CurrentMaterialization = cast<Instruction>(Def)->clone();
1931         CurrentMaterialization->setName(Def->getName());
1932         CurrentMaterialization->insertBefore(
1933             &*CurrentBlock->getFirstInsertionPt());
1934       }
1935       if (auto *PN = dyn_cast<PHINode>(U)) {
1936         assert(PN->getNumIncomingValues() == 1 &&
1937                "unexpected number of incoming "
1938                "values in the PHINode");
1939         PN->replaceAllUsesWith(CurrentMaterialization);
1940         PN->eraseFromParent();
1941         continue;
1942       }
1943       // Replace all uses of Def in the current instruction with the
1944       // CurrentMaterialization for the block.
1945       U->replaceUsesOfWith(Def, CurrentMaterialization);
1946     }
1947   }
1948 }
1949 
1950 // Splits the block at a particular instruction unless it is the first
1951 // instruction in the block with a single predecessor.
splitBlockIfNotFirst(Instruction * I,const Twine & Name)1952 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
1953   auto *BB = I->getParent();
1954   if (&BB->front() == I) {
1955     if (BB->getSinglePredecessor()) {
1956       BB->setName(Name);
1957       return BB;
1958     }
1959   }
1960   return BB->splitBasicBlock(I, Name);
1961 }
1962 
1963 // Split above and below a particular instruction so that it
1964 // will be all alone by itself in a block.
splitAround(Instruction * I,const Twine & Name)1965 static void splitAround(Instruction *I, const Twine &Name) {
1966   splitBlockIfNotFirst(I, Name);
1967   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
1968 }
1969 
isSuspendBlock(BasicBlock * BB)1970 static bool isSuspendBlock(BasicBlock *BB) {
1971   return isa<AnyCoroSuspendInst>(BB->front());
1972 }
1973 
1974 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
1975 
1976 /// Does control flow starting at the given block ever reach a suspend
1977 /// instruction before reaching a block in VisitedOrFreeBBs?
isSuspendReachableFrom(BasicBlock * From,VisitedBlocksSet & VisitedOrFreeBBs)1978 static bool isSuspendReachableFrom(BasicBlock *From,
1979                                    VisitedBlocksSet &VisitedOrFreeBBs) {
1980   // Eagerly try to add this block to the visited set.  If it's already
1981   // there, stop recursing; this path doesn't reach a suspend before
1982   // either looping or reaching a freeing block.
1983   if (!VisitedOrFreeBBs.insert(From).second)
1984     return false;
1985 
1986   // We assume that we'll already have split suspends into their own blocks.
1987   if (isSuspendBlock(From))
1988     return true;
1989 
1990   // Recurse on the successors.
1991   for (auto Succ : successors(From)) {
1992     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
1993       return true;
1994   }
1995 
1996   return false;
1997 }
1998 
1999 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2000 /// suspend point?
isLocalAlloca(CoroAllocaAllocInst * AI)2001 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
2002   // Seed the visited set with all the basic blocks containing a free
2003   // so that we won't pass them up.
2004   VisitedBlocksSet VisitedOrFreeBBs;
2005   for (auto User : AI->users()) {
2006     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2007       VisitedOrFreeBBs.insert(FI->getParent());
2008   }
2009 
2010   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2011 }
2012 
2013 /// After we split the coroutine, will the given basic block be along
2014 /// an obvious exit path for the resumption function?
willLeaveFunctionImmediatelyAfter(BasicBlock * BB,unsigned depth=3)2015 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
2016                                               unsigned depth = 3) {
2017   // If we've bottomed out our depth count, stop searching and assume
2018   // that the path might loop back.
2019   if (depth == 0) return false;
2020 
2021   // If this is a suspend block, we're about to exit the resumption function.
2022   if (isSuspendBlock(BB)) return true;
2023 
2024   // Recurse into the successors.
2025   for (auto Succ : successors(BB)) {
2026     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2027       return false;
2028   }
2029 
2030   // If none of the successors leads back in a loop, we're on an exit/abort.
2031   return true;
2032 }
2033 
localAllocaNeedsStackSave(CoroAllocaAllocInst * AI)2034 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
2035   // Look for a free that isn't sufficiently obviously followed by
2036   // either a suspend or a termination, i.e. something that will leave
2037   // the coro resumption frame.
2038   for (auto U : AI->users()) {
2039     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2040     if (!FI) continue;
2041 
2042     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2043       return true;
2044   }
2045 
2046   // If we never found one, we don't need a stack save.
2047   return false;
2048 }
2049 
2050 /// Turn each of the given local allocas into a normal (dynamic) alloca
2051 /// instruction.
lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst * > LocalAllocas,SmallVectorImpl<Instruction * > & DeadInsts)2052 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
2053                               SmallVectorImpl<Instruction*> &DeadInsts) {
2054   for (auto AI : LocalAllocas) {
2055     auto M = AI->getModule();
2056     IRBuilder<> Builder(AI);
2057 
2058     // Save the stack depth.  Try to avoid doing this if the stackrestore
2059     // is going to immediately precede a return or something.
2060     Value *StackSave = nullptr;
2061     if (localAllocaNeedsStackSave(AI))
2062       StackSave = Builder.CreateCall(
2063                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
2064 
2065     // Allocate memory.
2066     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2067     Alloca->setAlignment(Align(AI->getAlignment()));
2068 
2069     for (auto U : AI->users()) {
2070       // Replace gets with the allocation.
2071       if (isa<CoroAllocaGetInst>(U)) {
2072         U->replaceAllUsesWith(Alloca);
2073 
2074       // Replace frees with stackrestores.  This is safe because
2075       // alloca.alloc is required to obey a stack discipline, although we
2076       // don't enforce that structurally.
2077       } else {
2078         auto FI = cast<CoroAllocaFreeInst>(U);
2079         if (StackSave) {
2080           Builder.SetInsertPoint(FI);
2081           Builder.CreateCall(
2082                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
2083                              StackSave);
2084         }
2085       }
2086       DeadInsts.push_back(cast<Instruction>(U));
2087     }
2088 
2089     DeadInsts.push_back(AI);
2090   }
2091 }
2092 
2093 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
2094 /// This happens during the all-instructions iteration, so it must not
2095 /// delete the call.
lowerNonLocalAlloca(CoroAllocaAllocInst * AI,coro::Shape & Shape,SmallVectorImpl<Instruction * > & DeadInsts)2096 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
2097                                         coro::Shape &Shape,
2098                                    SmallVectorImpl<Instruction*> &DeadInsts) {
2099   IRBuilder<> Builder(AI);
2100   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2101 
2102   for (User *U : AI->users()) {
2103     if (isa<CoroAllocaGetInst>(U)) {
2104       U->replaceAllUsesWith(Alloc);
2105     } else {
2106       auto FI = cast<CoroAllocaFreeInst>(U);
2107       Builder.SetInsertPoint(FI);
2108       Shape.emitDealloc(Builder, Alloc, nullptr);
2109     }
2110     DeadInsts.push_back(cast<Instruction>(U));
2111   }
2112 
2113   // Push this on last so that it gets deleted after all the others.
2114   DeadInsts.push_back(AI);
2115 
2116   // Return the new allocation value so that we can check for needed spills.
2117   return cast<Instruction>(Alloc);
2118 }
2119 
2120 /// Get the current swifterror value.
emitGetSwiftErrorValue(IRBuilder<> & Builder,Type * ValueTy,coro::Shape & Shape)2121 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
2122                                      coro::Shape &Shape) {
2123   // Make a fake function pointer as a sort of intrinsic.
2124   auto FnTy = FunctionType::get(ValueTy, {}, false);
2125   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2126 
2127   auto Call = Builder.CreateCall(FnTy, Fn, {});
2128   Shape.SwiftErrorOps.push_back(Call);
2129 
2130   return Call;
2131 }
2132 
2133 /// Set the given value as the current swifterror value.
2134 ///
2135 /// Returns a slot that can be used as a swifterror slot.
emitSetSwiftErrorValue(IRBuilder<> & Builder,Value * V,coro::Shape & Shape)2136 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
2137                                      coro::Shape &Shape) {
2138   // Make a fake function pointer as a sort of intrinsic.
2139   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
2140                                 {V->getType()}, false);
2141   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2142 
2143   auto Call = Builder.CreateCall(FnTy, Fn, { V });
2144   Shape.SwiftErrorOps.push_back(Call);
2145 
2146   return Call;
2147 }
2148 
2149 /// Set the swifterror value from the given alloca before a call,
2150 /// then put in back in the alloca afterwards.
2151 ///
2152 /// Returns an address that will stand in for the swifterror slot
2153 /// until splitting.
emitSetAndGetSwiftErrorValueAround(Instruction * Call,AllocaInst * Alloca,coro::Shape & Shape)2154 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
2155                                                  AllocaInst *Alloca,
2156                                                  coro::Shape &Shape) {
2157   auto ValueTy = Alloca->getAllocatedType();
2158   IRBuilder<> Builder(Call);
2159 
2160   // Load the current value from the alloca and set it as the
2161   // swifterror value.
2162   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2163   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2164 
2165   // Move to after the call.  Since swifterror only has a guaranteed
2166   // value on normal exits, we can ignore implicit and explicit unwind
2167   // edges.
2168   if (isa<CallInst>(Call)) {
2169     Builder.SetInsertPoint(Call->getNextNode());
2170   } else {
2171     auto Invoke = cast<InvokeInst>(Call);
2172     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2173   }
2174 
2175   // Get the current swifterror value and store it to the alloca.
2176   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2177   Builder.CreateStore(ValueAfterCall, Alloca);
2178 
2179   return Addr;
2180 }
2181 
2182 /// Eliminate a formerly-swifterror alloca by inserting the get/set
2183 /// intrinsics and attempting to MemToReg the alloca away.
eliminateSwiftErrorAlloca(Function & F,AllocaInst * Alloca,coro::Shape & Shape)2184 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
2185                                       coro::Shape &Shape) {
2186   for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) {
2187     // We're likely changing the use list, so use a mutation-safe
2188     // iteration pattern.
2189     auto &Use = *UI;
2190     ++UI;
2191 
2192     // swifterror values can only be used in very specific ways.
2193     // We take advantage of that here.
2194     auto User = Use.getUser();
2195     if (isa<LoadInst>(User) || isa<StoreInst>(User))
2196       continue;
2197 
2198     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2199     auto Call = cast<Instruction>(User);
2200 
2201     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2202 
2203     // Use the returned slot address as the call argument.
2204     Use.set(Addr);
2205   }
2206 
2207   // All the uses should be loads and stores now.
2208   assert(isAllocaPromotable(Alloca));
2209 }
2210 
2211 /// "Eliminate" a swifterror argument by reducing it to the alloca case
2212 /// and then loading and storing in the prologue and epilog.
2213 ///
2214 /// The argument keeps the swifterror flag.
eliminateSwiftErrorArgument(Function & F,Argument & Arg,coro::Shape & Shape,SmallVectorImpl<AllocaInst * > & AllocasToPromote)2215 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
2216                                         coro::Shape &Shape,
2217                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2218   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2219 
2220   auto ArgTy = cast<PointerType>(Arg.getType());
2221   auto ValueTy = ArgTy->getElementType();
2222 
2223   // Reduce to the alloca case:
2224 
2225   // Create an alloca and replace all uses of the arg with it.
2226   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2227   Arg.replaceAllUsesWith(Alloca);
2228 
2229   // Set an initial value in the alloca.  swifterror is always null on entry.
2230   auto InitialValue = Constant::getNullValue(ValueTy);
2231   Builder.CreateStore(InitialValue, Alloca);
2232 
2233   // Find all the suspends in the function and save and restore around them.
2234   for (auto Suspend : Shape.CoroSuspends) {
2235     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2236   }
2237 
2238   // Find all the coro.ends in the function and restore the error value.
2239   for (auto End : Shape.CoroEnds) {
2240     Builder.SetInsertPoint(End);
2241     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2242     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2243   }
2244 
2245   // Now we can use the alloca logic.
2246   AllocasToPromote.push_back(Alloca);
2247   eliminateSwiftErrorAlloca(F, Alloca, Shape);
2248 }
2249 
2250 /// Eliminate all problematic uses of swifterror arguments and allocas
2251 /// from the function.  We'll fix them up later when splitting the function.
eliminateSwiftError(Function & F,coro::Shape & Shape)2252 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
2253   SmallVector<AllocaInst*, 4> AllocasToPromote;
2254 
2255   // Look for a swifterror argument.
2256   for (auto &Arg : F.args()) {
2257     if (!Arg.hasSwiftErrorAttr()) continue;
2258 
2259     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2260     break;
2261   }
2262 
2263   // Look for swifterror allocas.
2264   for (auto &Inst : F.getEntryBlock()) {
2265     auto Alloca = dyn_cast<AllocaInst>(&Inst);
2266     if (!Alloca || !Alloca->isSwiftError()) continue;
2267 
2268     // Clear the swifterror flag.
2269     Alloca->setSwiftError(false);
2270 
2271     AllocasToPromote.push_back(Alloca);
2272     eliminateSwiftErrorAlloca(F, Alloca, Shape);
2273   }
2274 
2275   // If we have any allocas to promote, compute a dominator tree and
2276   // promote them en masse.
2277   if (!AllocasToPromote.empty()) {
2278     DominatorTree DT(F);
2279     PromoteMemToReg(AllocasToPromote, DT);
2280   }
2281 }
2282 
2283 /// retcon and retcon.once conventions assume that all spill uses can be sunk
2284 /// after the coro.begin intrinsic.
sinkSpillUsesAfterCoroBegin(Function & F,const FrameDataInfo & FrameData,CoroBeginInst * CoroBegin)2285 static void sinkSpillUsesAfterCoroBegin(Function &F,
2286                                         const FrameDataInfo &FrameData,
2287                                         CoroBeginInst *CoroBegin) {
2288   DominatorTree Dom(F);
2289 
2290   SmallSetVector<Instruction *, 32> ToMove;
2291   SmallVector<Instruction *, 32> Worklist;
2292 
2293   // Collect all users that precede coro.begin.
2294   for (auto *Def : FrameData.getAllDefs()) {
2295     for (User *U : Def->users()) {
2296       auto Inst = cast<Instruction>(U);
2297       if (Inst->getParent() != CoroBegin->getParent() ||
2298           Dom.dominates(CoroBegin, Inst))
2299         continue;
2300       if (ToMove.insert(Inst))
2301         Worklist.push_back(Inst);
2302     }
2303   }
2304   // Recursively collect users before coro.begin.
2305   while (!Worklist.empty()) {
2306     auto *Def = Worklist.pop_back_val();
2307     for (User *U : Def->users()) {
2308       auto Inst = cast<Instruction>(U);
2309       if (Dom.dominates(CoroBegin, Inst))
2310         continue;
2311       if (ToMove.insert(Inst))
2312         Worklist.push_back(Inst);
2313     }
2314   }
2315 
2316   // Sort by dominance.
2317   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2318   llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2319     // If a dominates b it should preceed (<) b.
2320     return Dom.dominates(A, B);
2321   });
2322 
2323   Instruction *InsertPt = CoroBegin->getNextNode();
2324   for (Instruction *Inst : InsertionList)
2325     Inst->moveBefore(InsertPt);
2326 }
2327 
2328 /// For each local variable that all of its user are only used inside one of
2329 /// suspended region, we sink their lifetime.start markers to the place where
2330 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2331 /// hence minimizing the amount of data we end up putting on the frame.
sinkLifetimeStartMarkers(Function & F,coro::Shape & Shape,SuspendCrossingInfo & Checker)2332 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2333                                      SuspendCrossingInfo &Checker) {
2334   DominatorTree DT(F);
2335 
2336   // Collect all possible basic blocks which may dominate all uses of allocas.
2337   SmallPtrSet<BasicBlock *, 4> DomSet;
2338   DomSet.insert(&F.getEntryBlock());
2339   for (auto *CSI : Shape.CoroSuspends) {
2340     BasicBlock *SuspendBlock = CSI->getParent();
2341     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2342            "should have split coro.suspend into its own block");
2343     DomSet.insert(SuspendBlock->getSingleSuccessor());
2344   }
2345 
2346   for (Instruction &I : instructions(F)) {
2347     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2348     if (!AI)
2349       continue;
2350 
2351     for (BasicBlock *DomBB : DomSet) {
2352       bool Valid = true;
2353       SmallVector<Instruction *, 1> Lifetimes;
2354 
2355       auto isLifetimeStart = [](Instruction* I) {
2356         if (auto* II = dyn_cast<IntrinsicInst>(I))
2357           return II->getIntrinsicID() == Intrinsic::lifetime_start;
2358         return false;
2359       };
2360 
2361       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2362         if (isLifetimeStart(U)) {
2363           Lifetimes.push_back(U);
2364           return true;
2365         }
2366         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2367           return false;
2368         if (isLifetimeStart(U->user_back())) {
2369           Lifetimes.push_back(U->user_back());
2370           return true;
2371         }
2372         return false;
2373       };
2374 
2375       for (User *U : AI->users()) {
2376         Instruction *UI = cast<Instruction>(U);
2377         // For all users except lifetime.start markers, if they are all
2378         // dominated by one of the basic blocks and do not cross
2379         // suspend points as well, then there is no need to spill the
2380         // instruction.
2381         if (!DT.dominates(DomBB, UI->getParent()) ||
2382             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2383           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2384           // markers.
2385           if (collectLifetimeStart(UI, AI))
2386             continue;
2387           Valid = false;
2388           break;
2389         }
2390       }
2391       // Sink lifetime.start markers to dominate block when they are
2392       // only used outside the region.
2393       if (Valid && Lifetimes.size() != 0) {
2394         // May be AI itself, when the type of AI is i8*
2395         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2396           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2397             return AI;
2398           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2399           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2400                                   DomBB->getTerminator());
2401         }(AI);
2402 
2403         auto *NewLifetime = Lifetimes[0]->clone();
2404         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2405         NewLifetime->insertBefore(DomBB->getTerminator());
2406 
2407         // All the outsided lifetime.start markers are no longer necessary.
2408         for (Instruction *S : Lifetimes)
2409           S->eraseFromParent();
2410 
2411         break;
2412       }
2413     }
2414   }
2415 }
2416 
collectFrameAllocas(Function & F,coro::Shape & Shape,const SuspendCrossingInfo & Checker,SmallVectorImpl<AllocaInfo> & Allocas)2417 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2418                                 const SuspendCrossingInfo &Checker,
2419                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2420   for (Instruction &I : instructions(F)) {
2421     auto *AI = dyn_cast<AllocaInst>(&I);
2422     if (!AI)
2423       continue;
2424     // The PromiseAlloca will be specially handled since it needs to be in a
2425     // fixed position in the frame.
2426     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2427       continue;
2428     }
2429     DominatorTree DT(F);
2430     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2431                              *Shape.CoroBegin, Checker};
2432     Visitor.visitPtr(*AI);
2433     if (!Visitor.getShouldLiveOnFrame())
2434       continue;
2435     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2436                          Visitor.getMayWriteBeforeCoroBegin());
2437   }
2438 }
2439 
salvageDebugInfo(SmallDenseMap<llvm::Value *,llvm::AllocaInst *,4> & DbgPtrAllocaCache,DbgVariableIntrinsic * DVI,bool ReuseFrameSlot)2440 void coro::salvageDebugInfo(
2441     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2442     DbgVariableIntrinsic *DVI, bool ReuseFrameSlot) {
2443   Function *F = DVI->getFunction();
2444   IRBuilder<> Builder(F->getContext());
2445   auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2446   while (isa<IntrinsicInst>(InsertPt))
2447     ++InsertPt;
2448   Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2449   DIExpression *Expr = DVI->getExpression();
2450   // Follow the pointer arithmetic all the way to the incoming
2451   // function argument and convert into a DIExpression.
2452   bool OutermostLoad = true;
2453   Value *Storage = DVI->getVariableLocationOp(0);
2454   Value *OriginalStorage = Storage;
2455   while (Storage) {
2456     if (auto *LdInst = dyn_cast<LoadInst>(Storage)) {
2457       Storage = LdInst->getOperand(0);
2458       // FIXME: This is a heuristic that works around the fact that
2459       // LLVM IR debug intrinsics cannot yet distinguish between
2460       // memory and value locations: Because a dbg.declare(alloca) is
2461       // implicitly a memory location no DW_OP_deref operation for the
2462       // last direct load from an alloca is necessary.  This condition
2463       // effectively drops the *last* DW_OP_deref in the expression.
2464       if (!OutermostLoad)
2465         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2466       OutermostLoad = false;
2467     } else if (auto *StInst = dyn_cast<StoreInst>(Storage)) {
2468       Storage = StInst->getOperand(0);
2469     } else if (auto *GEPInst = dyn_cast<GetElementPtrInst>(Storage)) {
2470       SmallVector<Value *> AdditionalValues;
2471       DIExpression *SalvagedExpr = llvm::salvageDebugInfoImpl(
2472           *GEPInst, Expr,
2473           /*WithStackValue=*/false, 0, AdditionalValues);
2474       // Debug declares cannot currently handle additional location
2475       // operands.
2476       if (!SalvagedExpr || !AdditionalValues.empty())
2477         break;
2478       Expr = SalvagedExpr;
2479       Storage = GEPInst->getOperand(0);
2480     } else if (auto *BCInst = dyn_cast<llvm::BitCastInst>(Storage))
2481       Storage = BCInst->getOperand(0);
2482     else
2483       break;
2484   }
2485   if (!Storage)
2486     return;
2487 
2488   // Store a pointer to the coroutine frame object in an alloca so it
2489   // is available throughout the function when producing unoptimized
2490   // code. Extending the lifetime this way is correct because the
2491   // variable has been declared by a dbg.declare intrinsic.
2492   //
2493   // Avoid to create the alloca would be eliminated by optimization
2494   // passes and the corresponding dbg.declares would be invalid.
2495   if (!ReuseFrameSlot && !EnableReuseStorageInFrame)
2496     if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2497       auto &Cached = DbgPtrAllocaCache[Storage];
2498       if (!Cached) {
2499         Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2500                                       Arg->getName() + ".debug");
2501         Builder.CreateStore(Storage, Cached);
2502       }
2503       Storage = Cached;
2504       // FIXME: LLVM lacks nuanced semantics to differentiate between
2505       // memory and direct locations at the IR level. The backend will
2506       // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2507       // location. Thus, if there are deref and offset operations in the
2508       // expression, we need to add a DW_OP_deref at the *start* of the
2509       // expression to first load the contents of the alloca before
2510       // adjusting it with the expression.
2511       if (Expr && Expr->isComplex())
2512         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2513     }
2514 
2515   DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2516   DVI->setExpression(Expr);
2517   /// It makes no sense to move the dbg.value intrinsic.
2518   if (!isa<DbgValueInst>(DVI)) {
2519     if (auto *InsertPt = dyn_cast<Instruction>(Storage))
2520       DVI->moveAfter(InsertPt);
2521     else if (isa<Argument>(Storage))
2522       DVI->moveAfter(F->getEntryBlock().getFirstNonPHI());
2523   }
2524 }
2525 
buildCoroutineFrame(Function & F,Shape & Shape)2526 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2527   // Don't eliminate swifterror in async functions that won't be split.
2528   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2529     eliminateSwiftError(F, Shape);
2530 
2531   if (Shape.ABI == coro::ABI::Switch &&
2532       Shape.SwitchLowering.PromiseAlloca) {
2533     Shape.getSwitchCoroId()->clearPromise();
2534   }
2535 
2536   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2537   // intrinsics are in their own blocks to simplify the logic of building up
2538   // SuspendCrossing data.
2539   for (auto *CSI : Shape.CoroSuspends) {
2540     if (auto *Save = CSI->getCoroSave())
2541       splitAround(Save, "CoroSave");
2542     splitAround(CSI, "CoroSuspend");
2543   }
2544 
2545   // Put CoroEnds into their own blocks.
2546   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2547     splitAround(CE, "CoroEnd");
2548 
2549     // Emit the musttail call function in a new block before the CoroEnd.
2550     // We do this here so that the right suspend crossing info is computed for
2551     // the uses of the musttail call function call. (Arguments to the coro.end
2552     // instructions would be ignored)
2553     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2554       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2555       if (!MustTailCallFn)
2556         continue;
2557       IRBuilder<> Builder(AsyncEnd);
2558       SmallVector<Value *, 8> Args(AsyncEnd->args());
2559       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2560       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2561                                       Arguments, Builder);
2562       splitAround(Call, "MustTailCall.Before.CoroEnd");
2563     }
2564   }
2565 
2566   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2567   // never has its definition separated from the PHI by the suspend point.
2568   rewritePHIs(F);
2569 
2570   // Build suspend crossing info.
2571   SuspendCrossingInfo Checker(F, Shape);
2572 
2573   IRBuilder<> Builder(F.getContext());
2574   FrameDataInfo FrameData;
2575   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2576   SmallVector<Instruction*, 4> DeadInstructions;
2577 
2578   {
2579     SpillInfo Spills;
2580     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2581       // See if there are materializable instructions across suspend points.
2582       for (Instruction &I : instructions(F))
2583         if (materializable(I)) {
2584           for (User *U : I.users())
2585             if (Checker.isDefinitionAcrossSuspend(I, U))
2586               Spills[&I].push_back(cast<Instruction>(U));
2587 
2588           // Manually add dbg.value metadata uses of I.
2589           SmallVector<DbgValueInst *, 16> DVIs;
2590           findDbgValues(DVIs, &I);
2591           for (auto *DVI : DVIs)
2592             if (Checker.isDefinitionAcrossSuspend(I, DVI))
2593               Spills[&I].push_back(DVI);
2594         }
2595 
2596       if (Spills.empty())
2597         break;
2598 
2599       // Rewrite materializable instructions to be materialized at the use
2600       // point.
2601       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2602       rewriteMaterializableInstructions(Builder, Spills);
2603       Spills.clear();
2604     }
2605   }
2606 
2607   sinkLifetimeStartMarkers(F, Shape, Checker);
2608   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2609     collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2610   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2611 
2612   // Collect the spills for arguments and other not-materializable values.
2613   for (Argument &A : F.args())
2614     for (User *U : A.users())
2615       if (Checker.isDefinitionAcrossSuspend(A, U))
2616         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2617 
2618   for (Instruction &I : instructions(F)) {
2619     // Values returned from coroutine structure intrinsics should not be part
2620     // of the Coroutine Frame.
2621     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2622       continue;
2623 
2624     // The Coroutine Promise always included into coroutine frame, no need to
2625     // check for suspend crossing.
2626     if (Shape.ABI == coro::ABI::Switch &&
2627         Shape.SwitchLowering.PromiseAlloca == &I)
2628       continue;
2629 
2630     // Handle alloca.alloc specially here.
2631     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2632       // Check whether the alloca's lifetime is bounded by suspend points.
2633       if (isLocalAlloca(AI)) {
2634         LocalAllocas.push_back(AI);
2635         continue;
2636       }
2637 
2638       // If not, do a quick rewrite of the alloca and then add spills of
2639       // the rewritten value.  The rewrite doesn't invalidate anything in
2640       // Spills because the other alloca intrinsics have no other operands
2641       // besides AI, and it doesn't invalidate the iteration because we delay
2642       // erasing AI.
2643       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2644 
2645       for (User *U : Alloc->users()) {
2646         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2647           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2648       }
2649       continue;
2650     }
2651 
2652     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2653     if (isa<CoroAllocaGetInst>(I))
2654       continue;
2655 
2656     if (isa<AllocaInst>(I))
2657       continue;
2658 
2659     for (User *U : I.users())
2660       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2661         // We cannot spill a token.
2662         if (I.getType()->isTokenTy())
2663           report_fatal_error(
2664               "token definition is separated from the use by a suspend point");
2665         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2666       }
2667   }
2668 
2669   // We don't want the layout of coroutine frame to be affected
2670   // by debug information. So we only choose to salvage DbgValueInst for
2671   // whose value is already in the frame.
2672   // We would handle the dbg.values for allocas specially
2673   for (auto &Iter : FrameData.Spills) {
2674     auto *V = Iter.first;
2675     SmallVector<DbgValueInst *, 16> DVIs;
2676     findDbgValues(DVIs, V);
2677     llvm::for_each(DVIs, [&](DbgValueInst *DVI) {
2678       if (Checker.isDefinitionAcrossSuspend(*V, DVI))
2679         FrameData.Spills[V].push_back(DVI);
2680     });
2681   }
2682 
2683   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2684   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2685       Shape.ABI == coro::ABI::Async)
2686     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2687   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2688   createFramePtr(Shape);
2689   // For now, this works for C++ programs only.
2690   buildFrameDebugInfo(F, Shape, FrameData);
2691   insertSpills(FrameData, Shape);
2692   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2693 
2694   for (auto I : DeadInstructions)
2695     I->eraseFromParent();
2696 }
2697