1 //===- CoroInternal.h - Internal Coroutine interfaces ---------*- C++ -*---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // Common definitions/declarations used internally by coroutine lowering passes.
9 //===----------------------------------------------------------------------===//
10 
11 #ifndef LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H
12 #define LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H
13 
14 #include "CoroInstr.h"
15 #include "llvm/IR/IRBuilder.h"
16 #include "llvm/Transforms/Coroutines.h"
17 
18 namespace llvm {
19 
20 class CallGraph;
21 class CallGraphSCC;
22 class PassRegistry;
23 
24 void initializeCoroEarlyLegacyPass(PassRegistry &);
25 void initializeCoroSplitLegacyPass(PassRegistry &);
26 void initializeCoroElideLegacyPass(PassRegistry &);
27 void initializeCoroCleanupLegacyPass(PassRegistry &);
28 
29 // CoroEarly pass marks every function that has coro.begin with a string
30 // attribute "coroutine.presplit"="0". CoroSplit pass processes the coroutine
31 // twice. First, it lets it go through complete IPO optimization pipeline as a
32 // single function. It forces restart of the pipeline by inserting an indirect
33 // call to an empty function "coro.devirt.trigger" which is devirtualized by
34 // CoroElide pass that triggers a restart of the pipeline by CGPassManager.
35 // When CoroSplit pass sees the same coroutine the second time, it splits it up,
36 // adds coroutine subfunctions to the SCC to be processed by IPO pipeline.
37 // Async lowering similarily triggers a restart of the pipeline after it has
38 // split the coroutine.
39 #define CORO_PRESPLIT_ATTR "coroutine.presplit"
40 #define UNPREPARED_FOR_SPLIT "0"
41 #define PREPARED_FOR_SPLIT "1"
42 #define ASYNC_RESTART_AFTER_SPLIT "2"
43 
44 #define CORO_DEVIRT_TRIGGER_FN "coro.devirt.trigger"
45 
46 namespace coro {
47 
48 bool declaresIntrinsics(const Module &M,
49                         const std::initializer_list<StringRef>);
50 void replaceCoroFree(CoroIdInst *CoroId, bool Elide);
51 void updateCallGraph(Function &Caller, ArrayRef<Function *> Funcs,
52                      CallGraph &CG, CallGraphSCC &SCC);
53 /// Recover a dbg.declare prepared by the frontend and emit an alloca
54 /// holding a pointer to the coroutine frame.
55 void salvageDebugInfo(
56     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
57     DbgVariableIntrinsic *DVI, bool ReuseFrameSlot);
58 
59 // Keeps data and helper functions for lowering coroutine intrinsics.
60 struct LowererBase {
61   Module &TheModule;
62   LLVMContext &Context;
63   PointerType *const Int8Ptr;
64   FunctionType *const ResumeFnType;
65   ConstantPointerNull *const NullPtr;
66 
67   LowererBase(Module &M);
68   Value *makeSubFnCall(Value *Arg, int Index, Instruction *InsertPt);
69 };
70 
71 enum class ABI {
72   /// The "resume-switch" lowering, where there are separate resume and
73   /// destroy functions that are shared between all suspend points.  The
74   /// coroutine frame implicitly stores the resume and destroy functions,
75   /// the current index, and any promise value.
76   Switch,
77 
78   /// The "returned-continuation" lowering, where each suspend point creates a
79   /// single continuation function that is used for both resuming and
80   /// destroying.  Does not support promises.
81   Retcon,
82 
83   /// The "unique returned-continuation" lowering, where each suspend point
84   /// creates a single continuation function that is used for both resuming
85   /// and destroying.  Does not support promises.  The function is known to
86   /// suspend at most once during its execution, and the return value of
87   /// the continuation is void.
88   RetconOnce,
89 
90   /// The "async continuation" lowering, where each suspend point creates a
91   /// single continuation function. The continuation function is available as an
92   /// intrinsic.
93   Async,
94 };
95 
96 // Holds structural Coroutine Intrinsics for a particular function and other
97 // values used during CoroSplit pass.
98 struct LLVM_LIBRARY_VISIBILITY Shape {
99   CoroBeginInst *CoroBegin;
100   SmallVector<AnyCoroEndInst *, 4> CoroEnds;
101   SmallVector<CoroSizeInst *, 2> CoroSizes;
102   SmallVector<AnyCoroSuspendInst *, 4> CoroSuspends;
103   SmallVector<CallInst*, 2> SwiftErrorOps;
104 
105   // Field indexes for special fields in the switch lowering.
106   struct SwitchFieldIndex {
107     enum {
108       Resume,
109       Destroy
110 
111       // The promise field is always at a fixed offset from the start of
112       // frame given its type, but the index isn't a constant for all
113       // possible frames.
114 
115       // The switch-index field isn't at a fixed offset or index, either;
116       // we just work it in where it fits best.
117     };
118   };
119 
120   coro::ABI ABI;
121 
122   StructType *FrameTy;
123   Align FrameAlign;
124   uint64_t FrameSize;
125   Instruction *FramePtr;
126   BasicBlock *AllocaSpillBlock;
127 
128   /// This would only be true if optimization are enabled.
129   bool ReuseFrameSlot;
130 
131   struct SwitchLoweringStorage {
132     SwitchInst *ResumeSwitch;
133     AllocaInst *PromiseAlloca;
134     BasicBlock *ResumeEntryBlock;
135     unsigned IndexField;
136     unsigned IndexAlign;
137     unsigned IndexOffset;
138     bool HasFinalSuspend;
139   };
140 
141   struct RetconLoweringStorage {
142     Function *ResumePrototype;
143     Function *Alloc;
144     Function *Dealloc;
145     BasicBlock *ReturnBlock;
146     bool IsFrameInlineInStorage;
147   };
148 
149   struct AsyncLoweringStorage {
150     FunctionType *AsyncFuncTy;
151     Value *Context;
152     CallingConv::ID AsyncCC;
153     unsigned ContextArgNo;
154     uint64_t ContextHeaderSize;
155     uint64_t ContextAlignment;
156     uint64_t FrameOffset; // Start of the frame.
157     uint64_t ContextSize; // Includes frame size.
158     GlobalVariable *AsyncFuncPointer;
159 
getContextAlignmentShape::AsyncLoweringStorage160     Align getContextAlignment() const { return Align(ContextAlignment); }
161   };
162 
163   union {
164     SwitchLoweringStorage SwitchLowering;
165     RetconLoweringStorage RetconLowering;
166     AsyncLoweringStorage AsyncLowering;
167   };
168 
getSwitchCoroIdShape169   CoroIdInst *getSwitchCoroId() const {
170     assert(ABI == coro::ABI::Switch);
171     return cast<CoroIdInst>(CoroBegin->getId());
172   }
173 
getRetconCoroIdShape174   AnyCoroIdRetconInst *getRetconCoroId() const {
175     assert(ABI == coro::ABI::Retcon ||
176            ABI == coro::ABI::RetconOnce);
177     return cast<AnyCoroIdRetconInst>(CoroBegin->getId());
178   }
179 
getAsyncCoroIdShape180   CoroIdAsyncInst *getAsyncCoroId() const {
181     assert(ABI == coro::ABI::Async);
182     return cast<CoroIdAsyncInst>(CoroBegin->getId());
183   }
184 
getSwitchIndexFieldShape185   unsigned getSwitchIndexField() const {
186     assert(ABI == coro::ABI::Switch);
187     assert(FrameTy && "frame type not assigned");
188     return SwitchLowering.IndexField;
189   }
getIndexTypeShape190   IntegerType *getIndexType() const {
191     assert(ABI == coro::ABI::Switch);
192     assert(FrameTy && "frame type not assigned");
193     return cast<IntegerType>(FrameTy->getElementType(getSwitchIndexField()));
194   }
getIndexShape195   ConstantInt *getIndex(uint64_t Value) const {
196     return ConstantInt::get(getIndexType(), Value);
197   }
198 
getSwitchResumePointerTypeShape199   PointerType *getSwitchResumePointerType() const {
200     assert(ABI == coro::ABI::Switch);
201   assert(FrameTy && "frame type not assigned");
202   return cast<PointerType>(FrameTy->getElementType(SwitchFieldIndex::Resume));
203   }
204 
getResumeFunctionTypeShape205   FunctionType *getResumeFunctionType() const {
206     switch (ABI) {
207     case coro::ABI::Switch: {
208       auto *FnPtrTy = getSwitchResumePointerType();
209       return cast<FunctionType>(FnPtrTy->getPointerElementType());
210     }
211     case coro::ABI::Retcon:
212     case coro::ABI::RetconOnce:
213       return RetconLowering.ResumePrototype->getFunctionType();
214     case coro::ABI::Async:
215       // Not used. The function type depends on the active suspend.
216       return nullptr;
217     }
218 
219     llvm_unreachable("Unknown coro::ABI enum");
220   }
221 
getRetconResultTypesShape222   ArrayRef<Type*> getRetconResultTypes() const {
223     assert(ABI == coro::ABI::Retcon ||
224            ABI == coro::ABI::RetconOnce);
225     auto FTy = CoroBegin->getFunction()->getFunctionType();
226 
227     // The safety of all this is checked by checkWFRetconPrototype.
228     if (auto STy = dyn_cast<StructType>(FTy->getReturnType())) {
229       return STy->elements().slice(1);
230     } else {
231       return ArrayRef<Type*>();
232     }
233   }
234 
getRetconResumeTypesShape235   ArrayRef<Type*> getRetconResumeTypes() const {
236     assert(ABI == coro::ABI::Retcon ||
237            ABI == coro::ABI::RetconOnce);
238 
239     // The safety of all this is checked by checkWFRetconPrototype.
240     auto FTy = RetconLowering.ResumePrototype->getFunctionType();
241     return FTy->params().slice(1);
242   }
243 
getResumeFunctionCCShape244   CallingConv::ID getResumeFunctionCC() const {
245     switch (ABI) {
246     case coro::ABI::Switch:
247       return CallingConv::Fast;
248 
249     case coro::ABI::Retcon:
250     case coro::ABI::RetconOnce:
251       return RetconLowering.ResumePrototype->getCallingConv();
252     case coro::ABI::Async:
253       return AsyncLowering.AsyncCC;
254     }
255     llvm_unreachable("Unknown coro::ABI enum");
256   }
257 
getPromiseAllocaShape258   AllocaInst *getPromiseAlloca() const {
259     if (ABI == coro::ABI::Switch)
260       return SwitchLowering.PromiseAlloca;
261     return nullptr;
262   }
263 
264   /// Allocate memory according to the rules of the active lowering.
265   ///
266   /// \param CG - if non-null, will be updated for the new call
267   Value *emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const;
268 
269   /// Deallocate memory according to the rules of the active lowering.
270   ///
271   /// \param CG - if non-null, will be updated for the new call
272   void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const;
273 
274   Shape() = default;
275   explicit Shape(Function &F, bool ReuseFrameSlot = false)
ReuseFrameSlotShape276       : ReuseFrameSlot(ReuseFrameSlot) {
277     buildFrom(F);
278   }
279   void buildFrom(Function &F);
280 };
281 
282 void buildCoroutineFrame(Function &F, Shape &Shape);
283 CallInst *createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
284                              ArrayRef<Value *> Arguments, IRBuilder<> &);
285 } // End namespace coro.
286 } // End namespace llvm
287 
288 #endif
289