1 //===- CoroElide.cpp - Coroutine Frame Allocation Elision Pass ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Transforms/Coroutines/CoroElide.h" 10 #include "CoroInternal.h" 11 #include "llvm/ADT/DenseMap.h" 12 #include "llvm/ADT/Statistic.h" 13 #include "llvm/Analysis/AliasAnalysis.h" 14 #include "llvm/Analysis/InstructionSimplify.h" 15 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 16 #include "llvm/IR/Dominators.h" 17 #include "llvm/IR/InstIterator.h" 18 #include "llvm/Support/ErrorHandling.h" 19 #include "llvm/Support/FileSystem.h" 20 #include <optional> 21 22 using namespace llvm; 23 24 #define DEBUG_TYPE "coro-elide" 25 26 STATISTIC(NumOfCoroElided, "The # of coroutine get elided."); 27 28 #ifndef NDEBUG 29 static cl::opt<std::string> CoroElideInfoOutputFilename( 30 "coro-elide-info-output-file", cl::value_desc("filename"), 31 cl::desc("File to record the coroutines got elided"), cl::Hidden); 32 #endif 33 34 namespace { 35 // Created on demand if the coro-elide pass has work to do. 36 struct Lowerer : coro::LowererBase { 37 SmallVector<CoroIdInst *, 4> CoroIds; 38 SmallVector<CoroBeginInst *, 1> CoroBegins; 39 SmallVector<CoroAllocInst *, 1> CoroAllocs; 40 SmallVector<CoroSubFnInst *, 4> ResumeAddr; 41 DenseMap<CoroBeginInst *, SmallVector<CoroSubFnInst *, 4>> DestroyAddr; 42 SmallPtrSet<const SwitchInst *, 4> CoroSuspendSwitches; 43 44 Lowerer(Module &M) : LowererBase(M) {} 45 46 void elideHeapAllocations(Function *F, uint64_t FrameSize, Align FrameAlign, 47 AAResults &AA); 48 bool shouldElide(Function *F, DominatorTree &DT) const; 49 void collectPostSplitCoroIds(Function *F); 50 bool processCoroId(CoroIdInst *, AAResults &AA, DominatorTree &DT, 51 OptimizationRemarkEmitter &ORE); 52 bool hasEscapePath(const CoroBeginInst *, 53 const SmallPtrSetImpl<BasicBlock *> &) const; 54 }; 55 } // end anonymous namespace 56 57 // Go through the list of coro.subfn.addr intrinsics and replace them with the 58 // provided constant. 59 static void replaceWithConstant(Constant *Value, 60 SmallVectorImpl<CoroSubFnInst *> &Users) { 61 if (Users.empty()) 62 return; 63 64 // See if we need to bitcast the constant to match the type of the intrinsic 65 // being replaced. Note: All coro.subfn.addr intrinsics return the same type, 66 // so we only need to examine the type of the first one in the list. 67 Type *IntrTy = Users.front()->getType(); 68 Type *ValueTy = Value->getType(); 69 if (ValueTy != IntrTy) { 70 // May need to tweak the function type to match the type expected at the 71 // use site. 72 assert(ValueTy->isPointerTy() && IntrTy->isPointerTy()); 73 Value = ConstantExpr::getBitCast(Value, IntrTy); 74 } 75 76 // Now the value type matches the type of the intrinsic. Replace them all! 77 for (CoroSubFnInst *I : Users) 78 replaceAndRecursivelySimplify(I, Value); 79 } 80 81 // See if any operand of the call instruction references the coroutine frame. 82 static bool operandReferences(CallInst *CI, AllocaInst *Frame, AAResults &AA) { 83 for (Value *Op : CI->operand_values()) 84 if (!AA.isNoAlias(Op, Frame)) 85 return true; 86 return false; 87 } 88 89 // Look for any tail calls referencing the coroutine frame and remove tail 90 // attribute from them, since now coroutine frame resides on the stack and tail 91 // call implies that the function does not references anything on the stack. 92 // However if it's a musttail call, we cannot remove the tailcall attribute. 93 // It's safe to keep it there as the musttail call is for symmetric transfer, 94 // and by that point the frame should have been destroyed and hence not 95 // interfering with operands. 96 static void removeTailCallAttribute(AllocaInst *Frame, AAResults &AA) { 97 Function &F = *Frame->getFunction(); 98 for (Instruction &I : instructions(F)) 99 if (auto *Call = dyn_cast<CallInst>(&I)) 100 if (Call->isTailCall() && operandReferences(Call, Frame, AA) && 101 !Call->isMustTailCall()) 102 Call->setTailCall(false); 103 } 104 105 // Given a resume function @f.resume(%f.frame* %frame), returns the size 106 // and expected alignment of %f.frame type. 107 static std::optional<std::pair<uint64_t, Align>> 108 getFrameLayout(Function *Resume) { 109 // Pull information from the function attributes. 110 auto Size = Resume->getParamDereferenceableBytes(0); 111 if (!Size) 112 return std::nullopt; 113 return std::make_pair(Size, Resume->getParamAlign(0).valueOrOne()); 114 } 115 116 // Finds first non alloca instruction in the entry block of a function. 117 static Instruction *getFirstNonAllocaInTheEntryBlock(Function *F) { 118 for (Instruction &I : F->getEntryBlock()) 119 if (!isa<AllocaInst>(&I)) 120 return &I; 121 llvm_unreachable("no terminator in the entry block"); 122 } 123 124 #ifndef NDEBUG 125 static std::unique_ptr<raw_fd_ostream> getOrCreateLogFile() { 126 assert(!CoroElideInfoOutputFilename.empty() && 127 "coro-elide-info-output-file shouldn't be empty"); 128 std::error_code EC; 129 auto Result = std::make_unique<raw_fd_ostream>(CoroElideInfoOutputFilename, 130 EC, sys::fs::OF_Append); 131 if (!EC) 132 return Result; 133 llvm::errs() << "Error opening coro-elide-info-output-file '" 134 << CoroElideInfoOutputFilename << " for appending!\n"; 135 return std::make_unique<raw_fd_ostream>(2, false); // stderr. 136 } 137 #endif 138 139 // To elide heap allocations we need to suppress code blocks guarded by 140 // llvm.coro.alloc and llvm.coro.free instructions. 141 void Lowerer::elideHeapAllocations(Function *F, uint64_t FrameSize, 142 Align FrameAlign, AAResults &AA) { 143 LLVMContext &C = F->getContext(); 144 auto *InsertPt = 145 getFirstNonAllocaInTheEntryBlock(CoroIds.front()->getFunction()); 146 147 // Replacing llvm.coro.alloc with false will suppress dynamic 148 // allocation as it is expected for the frontend to generate the code that 149 // looks like: 150 // id = coro.id(...) 151 // mem = coro.alloc(id) ? malloc(coro.size()) : 0; 152 // coro.begin(id, mem) 153 auto *False = ConstantInt::getFalse(C); 154 for (auto *CA : CoroAllocs) { 155 CA->replaceAllUsesWith(False); 156 CA->eraseFromParent(); 157 } 158 159 // FIXME: Design how to transmit alignment information for every alloca that 160 // is spilled into the coroutine frame and recreate the alignment information 161 // here. Possibly we will need to do a mini SROA here and break the coroutine 162 // frame into individual AllocaInst recreating the original alignment. 163 const DataLayout &DL = F->getParent()->getDataLayout(); 164 auto FrameTy = ArrayType::get(Type::getInt8Ty(C), FrameSize); 165 auto *Frame = new AllocaInst(FrameTy, DL.getAllocaAddrSpace(), "", InsertPt); 166 Frame->setAlignment(FrameAlign); 167 auto *FrameVoidPtr = 168 new BitCastInst(Frame, Type::getInt8PtrTy(C), "vFrame", InsertPt); 169 170 for (auto *CB : CoroBegins) { 171 CB->replaceAllUsesWith(FrameVoidPtr); 172 CB->eraseFromParent(); 173 } 174 175 // Since now coroutine frame lives on the stack we need to make sure that 176 // any tail call referencing it, must be made non-tail call. 177 removeTailCallAttribute(Frame, AA); 178 } 179 180 bool Lowerer::hasEscapePath(const CoroBeginInst *CB, 181 const SmallPtrSetImpl<BasicBlock *> &TIs) const { 182 const auto &It = DestroyAddr.find(CB); 183 assert(It != DestroyAddr.end()); 184 185 // Limit the number of blocks we visit. 186 unsigned Limit = 32 * (1 + It->second.size()); 187 188 SmallVector<const BasicBlock *, 32> Worklist; 189 Worklist.push_back(CB->getParent()); 190 191 SmallPtrSet<const BasicBlock *, 32> Visited; 192 // Consider basicblock of coro.destroy as visited one, so that we 193 // skip the path pass through coro.destroy. 194 for (auto *DA : It->second) 195 Visited.insert(DA->getParent()); 196 197 SmallPtrSet<const BasicBlock *, 32> EscapingBBs; 198 for (auto *U : CB->users()) { 199 // The use from coroutine intrinsics are not a problem. 200 if (isa<CoroFreeInst, CoroSubFnInst, CoroSaveInst>(U)) 201 continue; 202 203 // Think all other usages may be an escaping candidate conservatively. 204 // 205 // Note that the major user of switch ABI coroutine (the C++) will store 206 // resume.fn, destroy.fn and the index to the coroutine frame immediately. 207 // So the parent of the coro.begin in C++ will be always escaping. 208 // Then we can't get any performance benefits for C++ by improving the 209 // precision of the method. 210 // 211 // The reason why we still judge it is we want to make LLVM Coroutine in 212 // switch ABIs to be self contained as much as possible instead of a 213 // by-product of C++20 Coroutines. 214 EscapingBBs.insert(cast<Instruction>(U)->getParent()); 215 } 216 217 bool PotentiallyEscaped = false; 218 219 do { 220 const auto *BB = Worklist.pop_back_val(); 221 if (!Visited.insert(BB).second) 222 continue; 223 224 // A Path insensitive marker to test whether the coro.begin escapes. 225 // It is intentional to make it path insensitive while it may not be 226 // precise since we don't want the process to be too slow. 227 PotentiallyEscaped |= EscapingBBs.count(BB); 228 229 if (TIs.count(BB)) { 230 if (!BB->getTerminator()->isExceptionalTerminator() || PotentiallyEscaped) 231 return true; 232 233 // If the function ends with the exceptional terminator, the memory used 234 // by the coroutine frame can be released by stack unwinding 235 // automatically. So we can think the coro.begin doesn't escape if it 236 // exits the function by exceptional terminator. 237 238 continue; 239 } 240 241 // Conservatively say that there is potentially a path. 242 if (!--Limit) 243 return true; 244 245 auto TI = BB->getTerminator(); 246 // Although the default dest of coro.suspend switches is suspend pointer 247 // which means a escape path to normal terminator, it is reasonable to skip 248 // it since coroutine frame doesn't change outside the coroutine body. 249 if (isa<SwitchInst>(TI) && 250 CoroSuspendSwitches.count(cast<SwitchInst>(TI))) { 251 Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(1)); 252 Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(2)); 253 } else 254 Worklist.append(succ_begin(BB), succ_end(BB)); 255 256 } while (!Worklist.empty()); 257 258 // We have exhausted all possible paths and are certain that coro.begin can 259 // not reach to any of terminators. 260 return false; 261 } 262 263 bool Lowerer::shouldElide(Function *F, DominatorTree &DT) const { 264 // If no CoroAllocs, we cannot suppress allocation, so elision is not 265 // possible. 266 if (CoroAllocs.empty()) 267 return false; 268 269 // Check that for every coro.begin there is at least one coro.destroy directly 270 // referencing the SSA value of that coro.begin along each 271 // non-exceptional path. 272 // If the value escaped, then coro.destroy would have been referencing a 273 // memory location storing that value and not the virtual register. 274 275 SmallPtrSet<BasicBlock *, 8> Terminators; 276 // First gather all of the terminators for the function. 277 // Consider the final coro.suspend as the real terminator when the current 278 // function is a coroutine. 279 for (BasicBlock &B : *F) { 280 auto *TI = B.getTerminator(); 281 282 if (TI->getNumSuccessors() != 0 || isa<UnreachableInst>(TI)) 283 continue; 284 285 Terminators.insert(&B); 286 } 287 288 // Filter out the coro.destroy that lie along exceptional paths. 289 SmallPtrSet<CoroBeginInst *, 8> ReferencedCoroBegins; 290 for (const auto &It : DestroyAddr) { 291 // If every terminators is dominated by coro.destroy, we could know the 292 // corresponding coro.begin wouldn't escape. 293 // 294 // Otherwise hasEscapePath would decide whether there is any paths from 295 // coro.begin to Terminators which not pass through any of the 296 // coro.destroys. 297 // 298 // hasEscapePath is relatively slow, so we avoid to run it as much as 299 // possible. 300 if (llvm::all_of(Terminators, 301 [&](auto *TI) { 302 return llvm::any_of(It.second, [&](auto *DA) { 303 return DT.dominates(DA, TI->getTerminator()); 304 }); 305 }) || 306 !hasEscapePath(It.first, Terminators)) 307 ReferencedCoroBegins.insert(It.first); 308 } 309 310 // If size of the set is the same as total number of coro.begin, that means we 311 // found a coro.free or coro.destroy referencing each coro.begin, so we can 312 // perform heap elision. 313 return ReferencedCoroBegins.size() == CoroBegins.size(); 314 } 315 316 void Lowerer::collectPostSplitCoroIds(Function *F) { 317 CoroIds.clear(); 318 CoroSuspendSwitches.clear(); 319 for (auto &I : instructions(F)) { 320 if (auto *CII = dyn_cast<CoroIdInst>(&I)) 321 if (CII->getInfo().isPostSplit()) 322 // If it is the coroutine itself, don't touch it. 323 if (CII->getCoroutine() != CII->getFunction()) 324 CoroIds.push_back(CII); 325 326 // Consider case like: 327 // %0 = call i8 @llvm.coro.suspend(...) 328 // switch i8 %0, label %suspend [i8 0, label %resume 329 // i8 1, label %cleanup] 330 // and collect the SwitchInsts which are used by escape analysis later. 331 if (auto *CSI = dyn_cast<CoroSuspendInst>(&I)) 332 if (CSI->hasOneUse() && isa<SwitchInst>(CSI->use_begin()->getUser())) { 333 SwitchInst *SWI = cast<SwitchInst>(CSI->use_begin()->getUser()); 334 if (SWI->getNumCases() == 2) 335 CoroSuspendSwitches.insert(SWI); 336 } 337 } 338 } 339 340 bool Lowerer::processCoroId(CoroIdInst *CoroId, AAResults &AA, 341 DominatorTree &DT, OptimizationRemarkEmitter &ORE) { 342 CoroBegins.clear(); 343 CoroAllocs.clear(); 344 ResumeAddr.clear(); 345 DestroyAddr.clear(); 346 347 // Collect all coro.begin and coro.allocs associated with this coro.id. 348 for (User *U : CoroId->users()) { 349 if (auto *CB = dyn_cast<CoroBeginInst>(U)) 350 CoroBegins.push_back(CB); 351 else if (auto *CA = dyn_cast<CoroAllocInst>(U)) 352 CoroAllocs.push_back(CA); 353 } 354 355 // Collect all coro.subfn.addrs associated with coro.begin. 356 // Note, we only devirtualize the calls if their coro.subfn.addr refers to 357 // coro.begin directly. If we run into cases where this check is too 358 // conservative, we can consider relaxing the check. 359 for (CoroBeginInst *CB : CoroBegins) { 360 for (User *U : CB->users()) 361 if (auto *II = dyn_cast<CoroSubFnInst>(U)) 362 switch (II->getIndex()) { 363 case CoroSubFnInst::ResumeIndex: 364 ResumeAddr.push_back(II); 365 break; 366 case CoroSubFnInst::DestroyIndex: 367 DestroyAddr[CB].push_back(II); 368 break; 369 default: 370 llvm_unreachable("unexpected coro.subfn.addr constant"); 371 } 372 } 373 374 // PostSplit coro.id refers to an array of subfunctions in its Info 375 // argument. 376 ConstantArray *Resumers = CoroId->getInfo().Resumers; 377 assert(Resumers && "PostSplit coro.id Info argument must refer to an array" 378 "of coroutine subfunctions"); 379 auto *ResumeAddrConstant = 380 Resumers->getAggregateElement(CoroSubFnInst::ResumeIndex); 381 382 replaceWithConstant(ResumeAddrConstant, ResumeAddr); 383 384 bool ShouldElide = shouldElide(CoroId->getFunction(), DT); 385 if (!ShouldElide) 386 ORE.emit([&]() { 387 if (auto FrameSizeAndAlign = 388 getFrameLayout(cast<Function>(ResumeAddrConstant))) 389 return OptimizationRemarkMissed(DEBUG_TYPE, "CoroElide", CoroId) 390 << "'" << ore::NV("callee", CoroId->getCoroutine()->getName()) 391 << "' not elided in '" 392 << ore::NV("caller", CoroId->getFunction()->getName()) 393 << "' (frame_size=" 394 << ore::NV("frame_size", FrameSizeAndAlign->first) << ", align=" 395 << ore::NV("align", FrameSizeAndAlign->second.value()) << ")"; 396 else 397 return OptimizationRemarkMissed(DEBUG_TYPE, "CoroElide", CoroId) 398 << "'" << ore::NV("callee", CoroId->getCoroutine()->getName()) 399 << "' not elided in '" 400 << ore::NV("caller", CoroId->getFunction()->getName()) 401 << "' (frame_size=unknown, align=unknown)"; 402 }); 403 404 auto *DestroyAddrConstant = Resumers->getAggregateElement( 405 ShouldElide ? CoroSubFnInst::CleanupIndex : CoroSubFnInst::DestroyIndex); 406 407 for (auto &It : DestroyAddr) 408 replaceWithConstant(DestroyAddrConstant, It.second); 409 410 if (ShouldElide) { 411 if (auto FrameSizeAndAlign = 412 getFrameLayout(cast<Function>(ResumeAddrConstant))) { 413 elideHeapAllocations(CoroId->getFunction(), FrameSizeAndAlign->first, 414 FrameSizeAndAlign->second, AA); 415 coro::replaceCoroFree(CoroId, /*Elide=*/true); 416 NumOfCoroElided++; 417 #ifndef NDEBUG 418 if (!CoroElideInfoOutputFilename.empty()) 419 *getOrCreateLogFile() 420 << "Elide " << CoroId->getCoroutine()->getName() << " in " 421 << CoroId->getFunction()->getName() << "\n"; 422 #endif 423 ORE.emit([&]() { 424 return OptimizationRemark(DEBUG_TYPE, "CoroElide", CoroId) 425 << "'" << ore::NV("callee", CoroId->getCoroutine()->getName()) 426 << "' elided in '" 427 << ore::NV("caller", CoroId->getFunction()->getName()) 428 << "' (frame_size=" 429 << ore::NV("frame_size", FrameSizeAndAlign->first) << ", align=" 430 << ore::NV("align", FrameSizeAndAlign->second.value()) << ")"; 431 }); 432 } else { 433 ORE.emit([&]() { 434 return OptimizationRemarkMissed(DEBUG_TYPE, "CoroElide", CoroId) 435 << "'" << ore::NV("callee", CoroId->getCoroutine()->getName()) 436 << "' not elided in '" 437 << ore::NV("caller", CoroId->getFunction()->getName()) 438 << "' (frame_size=unknown, align=unknown)"; 439 }); 440 } 441 } 442 443 return true; 444 } 445 446 static bool declaresCoroElideIntrinsics(Module &M) { 447 return coro::declaresIntrinsics(M, {"llvm.coro.id", "llvm.coro.id.async"}); 448 } 449 450 PreservedAnalyses CoroElidePass::run(Function &F, FunctionAnalysisManager &AM) { 451 auto &M = *F.getParent(); 452 if (!declaresCoroElideIntrinsics(M)) 453 return PreservedAnalyses::all(); 454 455 Lowerer L(M); 456 L.CoroIds.clear(); 457 L.collectPostSplitCoroIds(&F); 458 // If we did not find any coro.id, there is nothing to do. 459 if (L.CoroIds.empty()) 460 return PreservedAnalyses::all(); 461 462 AAResults &AA = AM.getResult<AAManager>(F); 463 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 464 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 465 466 bool Changed = false; 467 for (auto *CII : L.CoroIds) 468 Changed |= L.processCoroId(CII, AA, DT, ORE); 469 470 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all(); 471 } 472