1 //===- CoroInternal.h - Internal Coroutine interfaces ---------*- C++ -*---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // Common definitions/declarations used internally by coroutine lowering passes. 9 //===----------------------------------------------------------------------===// 10 11 #ifndef LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H 12 #define LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H 13 14 #include "CoroInstr.h" 15 #include "llvm/IR/IRBuilder.h" 16 17 namespace llvm { 18 19 class CallGraph; 20 21 namespace coro { 22 23 bool declaresAnyIntrinsic(const Module &M); 24 bool declaresIntrinsics(const Module &M, 25 const std::initializer_list<StringRef>); 26 void replaceCoroFree(CoroIdInst *CoroId, bool Elide); 27 28 /// Recover a dbg.declare prepared by the frontend and emit an alloca 29 /// holding a pointer to the coroutine frame. 30 void salvageDebugInfo( 31 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache, 32 DbgVariableIntrinsic *DVI, bool OptimizeFrame); 33 34 // Keeps data and helper functions for lowering coroutine intrinsics. 35 struct LowererBase { 36 Module &TheModule; 37 LLVMContext &Context; 38 PointerType *const Int8Ptr; 39 FunctionType *const ResumeFnType; 40 ConstantPointerNull *const NullPtr; 41 42 LowererBase(Module &M); 43 Value *makeSubFnCall(Value *Arg, int Index, Instruction *InsertPt); 44 }; 45 46 enum class ABI { 47 /// The "resume-switch" lowering, where there are separate resume and 48 /// destroy functions that are shared between all suspend points. The 49 /// coroutine frame implicitly stores the resume and destroy functions, 50 /// the current index, and any promise value. 51 Switch, 52 53 /// The "returned-continuation" lowering, where each suspend point creates a 54 /// single continuation function that is used for both resuming and 55 /// destroying. Does not support promises. 56 Retcon, 57 58 /// The "unique returned-continuation" lowering, where each suspend point 59 /// creates a single continuation function that is used for both resuming 60 /// and destroying. Does not support promises. The function is known to 61 /// suspend at most once during its execution, and the return value of 62 /// the continuation is void. 63 RetconOnce, 64 65 /// The "async continuation" lowering, where each suspend point creates a 66 /// single continuation function. The continuation function is available as an 67 /// intrinsic. 68 Async, 69 }; 70 71 // Holds structural Coroutine Intrinsics for a particular function and other 72 // values used during CoroSplit pass. 73 struct LLVM_LIBRARY_VISIBILITY Shape { 74 CoroBeginInst *CoroBegin; 75 SmallVector<AnyCoroEndInst *, 4> CoroEnds; 76 SmallVector<CoroSizeInst *, 2> CoroSizes; 77 SmallVector<CoroAlignInst *, 2> CoroAligns; 78 SmallVector<AnyCoroSuspendInst *, 4> CoroSuspends; 79 SmallVector<CallInst*, 2> SwiftErrorOps; 80 81 // Field indexes for special fields in the switch lowering. 82 struct SwitchFieldIndex { 83 enum { 84 Resume, 85 Destroy 86 87 // The promise field is always at a fixed offset from the start of 88 // frame given its type, but the index isn't a constant for all 89 // possible frames. 90 91 // The switch-index field isn't at a fixed offset or index, either; 92 // we just work it in where it fits best. 93 }; 94 }; 95 96 coro::ABI ABI; 97 98 StructType *FrameTy; 99 Align FrameAlign; 100 uint64_t FrameSize; 101 Value *FramePtr; 102 BasicBlock *AllocaSpillBlock; 103 104 /// This would only be true if optimization are enabled. 105 bool OptimizeFrame; 106 107 struct SwitchLoweringStorage { 108 SwitchInst *ResumeSwitch; 109 AllocaInst *PromiseAlloca; 110 BasicBlock *ResumeEntryBlock; 111 unsigned IndexField; 112 unsigned IndexAlign; 113 unsigned IndexOffset; 114 bool HasFinalSuspend; 115 }; 116 117 struct RetconLoweringStorage { 118 Function *ResumePrototype; 119 Function *Alloc; 120 Function *Dealloc; 121 BasicBlock *ReturnBlock; 122 bool IsFrameInlineInStorage; 123 }; 124 125 struct AsyncLoweringStorage { 126 FunctionType *AsyncFuncTy; 127 Value *Context; 128 CallingConv::ID AsyncCC; 129 unsigned ContextArgNo; 130 uint64_t ContextHeaderSize; 131 uint64_t ContextAlignment; 132 uint64_t FrameOffset; // Start of the frame. 133 uint64_t ContextSize; // Includes frame size. 134 GlobalVariable *AsyncFuncPointer; 135 136 Align getContextAlignment() const { return Align(ContextAlignment); } 137 }; 138 139 union { 140 SwitchLoweringStorage SwitchLowering; 141 RetconLoweringStorage RetconLowering; 142 AsyncLoweringStorage AsyncLowering; 143 }; 144 145 CoroIdInst *getSwitchCoroId() const { 146 assert(ABI == coro::ABI::Switch); 147 return cast<CoroIdInst>(CoroBegin->getId()); 148 } 149 150 AnyCoroIdRetconInst *getRetconCoroId() const { 151 assert(ABI == coro::ABI::Retcon || 152 ABI == coro::ABI::RetconOnce); 153 return cast<AnyCoroIdRetconInst>(CoroBegin->getId()); 154 } 155 156 CoroIdAsyncInst *getAsyncCoroId() const { 157 assert(ABI == coro::ABI::Async); 158 return cast<CoroIdAsyncInst>(CoroBegin->getId()); 159 } 160 161 unsigned getSwitchIndexField() const { 162 assert(ABI == coro::ABI::Switch); 163 assert(FrameTy && "frame type not assigned"); 164 return SwitchLowering.IndexField; 165 } 166 IntegerType *getIndexType() const { 167 assert(ABI == coro::ABI::Switch); 168 assert(FrameTy && "frame type not assigned"); 169 return cast<IntegerType>(FrameTy->getElementType(getSwitchIndexField())); 170 } 171 ConstantInt *getIndex(uint64_t Value) const { 172 return ConstantInt::get(getIndexType(), Value); 173 } 174 175 PointerType *getSwitchResumePointerType() const { 176 assert(ABI == coro::ABI::Switch); 177 assert(FrameTy && "frame type not assigned"); 178 return cast<PointerType>(FrameTy->getElementType(SwitchFieldIndex::Resume)); 179 } 180 181 FunctionType *getResumeFunctionType() const { 182 switch (ABI) { 183 case coro::ABI::Switch: 184 return FunctionType::get(Type::getVoidTy(FrameTy->getContext()), 185 FrameTy->getPointerTo(), /*IsVarArg*/false); 186 case coro::ABI::Retcon: 187 case coro::ABI::RetconOnce: 188 return RetconLowering.ResumePrototype->getFunctionType(); 189 case coro::ABI::Async: 190 // Not used. The function type depends on the active suspend. 191 return nullptr; 192 } 193 194 llvm_unreachable("Unknown coro::ABI enum"); 195 } 196 197 ArrayRef<Type*> getRetconResultTypes() const { 198 assert(ABI == coro::ABI::Retcon || 199 ABI == coro::ABI::RetconOnce); 200 auto FTy = CoroBegin->getFunction()->getFunctionType(); 201 202 // The safety of all this is checked by checkWFRetconPrototype. 203 if (auto STy = dyn_cast<StructType>(FTy->getReturnType())) { 204 return STy->elements().slice(1); 205 } else { 206 return ArrayRef<Type*>(); 207 } 208 } 209 210 ArrayRef<Type*> getRetconResumeTypes() const { 211 assert(ABI == coro::ABI::Retcon || 212 ABI == coro::ABI::RetconOnce); 213 214 // The safety of all this is checked by checkWFRetconPrototype. 215 auto FTy = RetconLowering.ResumePrototype->getFunctionType(); 216 return FTy->params().slice(1); 217 } 218 219 CallingConv::ID getResumeFunctionCC() const { 220 switch (ABI) { 221 case coro::ABI::Switch: 222 return CallingConv::Fast; 223 224 case coro::ABI::Retcon: 225 case coro::ABI::RetconOnce: 226 return RetconLowering.ResumePrototype->getCallingConv(); 227 case coro::ABI::Async: 228 return AsyncLowering.AsyncCC; 229 } 230 llvm_unreachable("Unknown coro::ABI enum"); 231 } 232 233 AllocaInst *getPromiseAlloca() const { 234 if (ABI == coro::ABI::Switch) 235 return SwitchLowering.PromiseAlloca; 236 return nullptr; 237 } 238 239 Instruction *getInsertPtAfterFramePtr() const { 240 if (auto *I = dyn_cast<Instruction>(FramePtr)) 241 return I->getNextNode(); 242 return &cast<Argument>(FramePtr)->getParent()->getEntryBlock().front(); 243 } 244 245 /// Allocate memory according to the rules of the active lowering. 246 /// 247 /// \param CG - if non-null, will be updated for the new call 248 Value *emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const; 249 250 /// Deallocate memory according to the rules of the active lowering. 251 /// 252 /// \param CG - if non-null, will be updated for the new call 253 void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const; 254 255 Shape() = default; 256 explicit Shape(Function &F, bool OptimizeFrame = false) 257 : OptimizeFrame(OptimizeFrame) { 258 buildFrom(F); 259 } 260 void buildFrom(Function &F); 261 }; 262 263 void buildCoroutineFrame(Function &F, Shape &Shape); 264 CallInst *createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, 265 ArrayRef<Value *> Arguments, IRBuilder<> &); 266 } // End namespace coro. 267 } // End namespace llvm 268 269 #endif 270