1 //===- CoroInternal.h - Internal Coroutine interfaces ---------*- C++ -*---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // Common definitions/declarations used internally by coroutine lowering passes. 9 //===----------------------------------------------------------------------===// 10 11 #ifndef LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H 12 #define LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H 13 14 #include "CoroInstr.h" 15 #include "llvm/IR/IRBuilder.h" 16 17 namespace llvm { 18 19 class CallGraph; 20 class CallGraphSCC; 21 class PassRegistry; 22 23 namespace coro { 24 25 bool declaresAnyIntrinsic(const Module &M); 26 bool declaresIntrinsics(const Module &M, 27 const std::initializer_list<StringRef>); 28 void replaceCoroFree(CoroIdInst *CoroId, bool Elide); 29 30 /// Recover a dbg.declare prepared by the frontend and emit an alloca 31 /// holding a pointer to the coroutine frame. 32 void salvageDebugInfo( 33 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache, 34 DbgVariableIntrinsic *DVI, bool OptimizeFrame); 35 36 // Keeps data and helper functions for lowering coroutine intrinsics. 37 struct LowererBase { 38 Module &TheModule; 39 LLVMContext &Context; 40 PointerType *const Int8Ptr; 41 FunctionType *const ResumeFnType; 42 ConstantPointerNull *const NullPtr; 43 44 LowererBase(Module &M); 45 Value *makeSubFnCall(Value *Arg, int Index, Instruction *InsertPt); 46 }; 47 48 enum class ABI { 49 /// The "resume-switch" lowering, where there are separate resume and 50 /// destroy functions that are shared between all suspend points. The 51 /// coroutine frame implicitly stores the resume and destroy functions, 52 /// the current index, and any promise value. 53 Switch, 54 55 /// The "returned-continuation" lowering, where each suspend point creates a 56 /// single continuation function that is used for both resuming and 57 /// destroying. Does not support promises. 58 Retcon, 59 60 /// The "unique returned-continuation" lowering, where each suspend point 61 /// creates a single continuation function that is used for both resuming 62 /// and destroying. Does not support promises. The function is known to 63 /// suspend at most once during its execution, and the return value of 64 /// the continuation is void. 65 RetconOnce, 66 67 /// The "async continuation" lowering, where each suspend point creates a 68 /// single continuation function. The continuation function is available as an 69 /// intrinsic. 70 Async, 71 }; 72 73 // Holds structural Coroutine Intrinsics for a particular function and other 74 // values used during CoroSplit pass. 75 struct LLVM_LIBRARY_VISIBILITY Shape { 76 CoroBeginInst *CoroBegin; 77 SmallVector<AnyCoroEndInst *, 4> CoroEnds; 78 SmallVector<CoroSizeInst *, 2> CoroSizes; 79 SmallVector<CoroAlignInst *, 2> CoroAligns; 80 SmallVector<AnyCoroSuspendInst *, 4> CoroSuspends; 81 SmallVector<CallInst*, 2> SwiftErrorOps; 82 83 // Field indexes for special fields in the switch lowering. 84 struct SwitchFieldIndex { 85 enum { 86 Resume, 87 Destroy 88 89 // The promise field is always at a fixed offset from the start of 90 // frame given its type, but the index isn't a constant for all 91 // possible frames. 92 93 // The switch-index field isn't at a fixed offset or index, either; 94 // we just work it in where it fits best. 95 }; 96 }; 97 98 coro::ABI ABI; 99 100 StructType *FrameTy; 101 Align FrameAlign; 102 uint64_t FrameSize; 103 Value *FramePtr; 104 BasicBlock *AllocaSpillBlock; 105 106 /// This would only be true if optimization are enabled. 107 bool OptimizeFrame; 108 109 struct SwitchLoweringStorage { 110 SwitchInst *ResumeSwitch; 111 AllocaInst *PromiseAlloca; 112 BasicBlock *ResumeEntryBlock; 113 unsigned IndexField; 114 unsigned IndexAlign; 115 unsigned IndexOffset; 116 bool HasFinalSuspend; 117 }; 118 119 struct RetconLoweringStorage { 120 Function *ResumePrototype; 121 Function *Alloc; 122 Function *Dealloc; 123 BasicBlock *ReturnBlock; 124 bool IsFrameInlineInStorage; 125 }; 126 127 struct AsyncLoweringStorage { 128 FunctionType *AsyncFuncTy; 129 Value *Context; 130 CallingConv::ID AsyncCC; 131 unsigned ContextArgNo; 132 uint64_t ContextHeaderSize; 133 uint64_t ContextAlignment; 134 uint64_t FrameOffset; // Start of the frame. 135 uint64_t ContextSize; // Includes frame size. 136 GlobalVariable *AsyncFuncPointer; 137 138 Align getContextAlignment() const { return Align(ContextAlignment); } 139 }; 140 141 union { 142 SwitchLoweringStorage SwitchLowering; 143 RetconLoweringStorage RetconLowering; 144 AsyncLoweringStorage AsyncLowering; 145 }; 146 147 CoroIdInst *getSwitchCoroId() const { 148 assert(ABI == coro::ABI::Switch); 149 return cast<CoroIdInst>(CoroBegin->getId()); 150 } 151 152 AnyCoroIdRetconInst *getRetconCoroId() const { 153 assert(ABI == coro::ABI::Retcon || 154 ABI == coro::ABI::RetconOnce); 155 return cast<AnyCoroIdRetconInst>(CoroBegin->getId()); 156 } 157 158 CoroIdAsyncInst *getAsyncCoroId() const { 159 assert(ABI == coro::ABI::Async); 160 return cast<CoroIdAsyncInst>(CoroBegin->getId()); 161 } 162 163 unsigned getSwitchIndexField() const { 164 assert(ABI == coro::ABI::Switch); 165 assert(FrameTy && "frame type not assigned"); 166 return SwitchLowering.IndexField; 167 } 168 IntegerType *getIndexType() const { 169 assert(ABI == coro::ABI::Switch); 170 assert(FrameTy && "frame type not assigned"); 171 return cast<IntegerType>(FrameTy->getElementType(getSwitchIndexField())); 172 } 173 ConstantInt *getIndex(uint64_t Value) const { 174 return ConstantInt::get(getIndexType(), Value); 175 } 176 177 PointerType *getSwitchResumePointerType() const { 178 assert(ABI == coro::ABI::Switch); 179 assert(FrameTy && "frame type not assigned"); 180 return cast<PointerType>(FrameTy->getElementType(SwitchFieldIndex::Resume)); 181 } 182 183 FunctionType *getResumeFunctionType() const { 184 switch (ABI) { 185 case coro::ABI::Switch: 186 return FunctionType::get(Type::getVoidTy(FrameTy->getContext()), 187 FrameTy->getPointerTo(), /*IsVarArg*/false); 188 case coro::ABI::Retcon: 189 case coro::ABI::RetconOnce: 190 return RetconLowering.ResumePrototype->getFunctionType(); 191 case coro::ABI::Async: 192 // Not used. The function type depends on the active suspend. 193 return nullptr; 194 } 195 196 llvm_unreachable("Unknown coro::ABI enum"); 197 } 198 199 ArrayRef<Type*> getRetconResultTypes() const { 200 assert(ABI == coro::ABI::Retcon || 201 ABI == coro::ABI::RetconOnce); 202 auto FTy = CoroBegin->getFunction()->getFunctionType(); 203 204 // The safety of all this is checked by checkWFRetconPrototype. 205 if (auto STy = dyn_cast<StructType>(FTy->getReturnType())) { 206 return STy->elements().slice(1); 207 } else { 208 return ArrayRef<Type*>(); 209 } 210 } 211 212 ArrayRef<Type*> getRetconResumeTypes() const { 213 assert(ABI == coro::ABI::Retcon || 214 ABI == coro::ABI::RetconOnce); 215 216 // The safety of all this is checked by checkWFRetconPrototype. 217 auto FTy = RetconLowering.ResumePrototype->getFunctionType(); 218 return FTy->params().slice(1); 219 } 220 221 CallingConv::ID getResumeFunctionCC() const { 222 switch (ABI) { 223 case coro::ABI::Switch: 224 return CallingConv::Fast; 225 226 case coro::ABI::Retcon: 227 case coro::ABI::RetconOnce: 228 return RetconLowering.ResumePrototype->getCallingConv(); 229 case coro::ABI::Async: 230 return AsyncLowering.AsyncCC; 231 } 232 llvm_unreachable("Unknown coro::ABI enum"); 233 } 234 235 AllocaInst *getPromiseAlloca() const { 236 if (ABI == coro::ABI::Switch) 237 return SwitchLowering.PromiseAlloca; 238 return nullptr; 239 } 240 241 Instruction *getInsertPtAfterFramePtr() const { 242 if (auto *I = dyn_cast<Instruction>(FramePtr)) 243 return I->getNextNode(); 244 return &cast<Argument>(FramePtr)->getParent()->getEntryBlock().front(); 245 } 246 247 /// Allocate memory according to the rules of the active lowering. 248 /// 249 /// \param CG - if non-null, will be updated for the new call 250 Value *emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const; 251 252 /// Deallocate memory according to the rules of the active lowering. 253 /// 254 /// \param CG - if non-null, will be updated for the new call 255 void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const; 256 257 Shape() = default; 258 explicit Shape(Function &F, bool OptimizeFrame = false) 259 : OptimizeFrame(OptimizeFrame) { 260 buildFrom(F); 261 } 262 void buildFrom(Function &F); 263 }; 264 265 void buildCoroutineFrame(Function &F, Shape &Shape); 266 CallInst *createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, 267 ArrayRef<Value *> Arguments, IRBuilder<> &); 268 } // End namespace coro. 269 } // End namespace llvm 270 271 #endif 272