1 //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// 10 /// Interfaces for registering analysis passes, producing common pass manager 11 /// configurations, and parsing of pass pipelines. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_PASSES_PASSBUILDER_H 16 #define LLVM_PASSES_PASSBUILDER_H 17 18 #include "llvm/ADT/Optional.h" 19 #include "llvm/Analysis/CGSCCPassManager.h" 20 #include "llvm/IR/PassManager.h" 21 #include "llvm/Support/Error.h" 22 #include "llvm/Transforms/Instrumentation.h" 23 #include "llvm/Transforms/Scalar/LoopPassManager.h" 24 #include <vector> 25 26 namespace llvm { 27 class StringRef; 28 class AAManager; 29 class TargetMachine; 30 class ModuleSummaryIndex; 31 32 /// A struct capturing PGO tunables. 33 struct PGOOptions { 34 enum PGOAction { NoAction, IRInstr, IRUse, SampleUse }; 35 enum CSPGOAction { NoCSAction, CSIRInstr, CSIRUse }; 36 PGOOptions(std::string ProfileFile = "", std::string CSProfileGenFile = "", 37 std::string ProfileRemappingFile = "", PGOAction Action = NoAction, 38 CSPGOAction CSAction = NoCSAction, bool SamplePGOSupport = false) 39 : ProfileFile(ProfileFile), CSProfileGenFile(CSProfileGenFile), 40 ProfileRemappingFile(ProfileRemappingFile), Action(Action), 41 CSAction(CSAction), 42 SamplePGOSupport(SamplePGOSupport || Action == SampleUse) { 43 // Note, we do allow ProfileFile.empty() for Action=IRUse LTO can 44 // callback with IRUse action without ProfileFile. 45 46 // If there is a CSAction, PGOAction cannot be IRInstr or SampleUse. 47 assert(this->CSAction == NoCSAction || 48 (this->Action != IRInstr && this->Action != SampleUse)); 49 50 // For CSIRInstr, CSProfileGenFile also needs to be nonempty. 51 assert(this->CSAction != CSIRInstr || !this->CSProfileGenFile.empty()); 52 53 // If CSAction is CSIRUse, PGOAction needs to be IRUse as they share 54 // a profile. 55 assert(this->CSAction != CSIRUse || this->Action == IRUse); 56 57 // If neither Action nor CSAction, SamplePGOSupport needs to be true. 58 assert(this->Action != NoAction || this->CSAction != NoCSAction || 59 this->SamplePGOSupport); 60 } 61 std::string ProfileFile; 62 std::string CSProfileGenFile; 63 std::string ProfileRemappingFile; 64 PGOAction Action; 65 CSPGOAction CSAction; 66 bool SamplePGOSupport; 67 }; 68 69 /// Tunable parameters for passes in the default pipelines. 70 class PipelineTuningOptions { 71 public: 72 /// Constructor sets pipeline tuning defaults based on cl::opts. Each option 73 /// can be set in the PassBuilder when using a LLVM as a library. 74 PipelineTuningOptions(); 75 76 /// Tuning option to set loop interleaving on/off. Its default value is that 77 /// of the flag: `-interleave-loops`. 78 bool LoopInterleaving; 79 80 /// Tuning option to enable/disable loop vectorization. Its default value is 81 /// that of the flag: `-vectorize-loops`. 82 bool LoopVectorization; 83 84 /// Tuning option to enable/disable slp loop vectorization. Its default value 85 /// is that of the flag: `vectorize-slp`. 86 bool SLPVectorization; 87 88 /// Tuning option to enable/disable loop unrolling. Its default value is true. 89 bool LoopUnrolling; 90 91 /// Tuning option to forget all SCEV loops in LoopUnroll. Its default value 92 /// is that of the flag: `-forget-scev-loop-unroll`. 93 bool ForgetAllSCEVInLoopUnroll; 94 95 /// Tuning option to cap the number of calls to retrive clobbering accesses in 96 /// MemorySSA, in LICM. 97 unsigned LicmMssaOptCap; 98 99 /// Tuning option to disable promotion to scalars in LICM with MemorySSA, if 100 /// the number of access is too large. 101 unsigned LicmMssaNoAccForPromotionCap; 102 }; 103 104 /// This class provides access to building LLVM's passes. 105 /// 106 /// Its members provide the baseline state available to passes during their 107 /// construction. The \c PassRegistry.def file specifies how to construct all 108 /// of the built-in passes, and those may reference these members during 109 /// construction. 110 class PassBuilder { 111 TargetMachine *TM; 112 PipelineTuningOptions PTO; 113 Optional<PGOOptions> PGOOpt; 114 PassInstrumentationCallbacks *PIC; 115 116 public: 117 /// A struct to capture parsed pass pipeline names. 118 /// 119 /// A pipeline is defined as a series of names, each of which may in itself 120 /// recursively contain a nested pipeline. A name is either the name of a pass 121 /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the 122 /// name is the name of a pass, the InnerPipeline is empty, since passes 123 /// cannot contain inner pipelines. See parsePassPipeline() for a more 124 /// detailed description of the textual pipeline format. 125 struct PipelineElement { 126 StringRef Name; 127 std::vector<PipelineElement> InnerPipeline; 128 }; 129 130 /// ThinLTO phase. 131 /// 132 /// This enumerates the LLVM ThinLTO optimization phases. 133 enum class ThinLTOPhase { 134 /// No ThinLTO behavior needed. 135 None, 136 /// ThinLTO prelink (summary) phase. 137 PreLink, 138 /// ThinLTO postlink (backend compile) phase. 139 PostLink 140 }; 141 142 /// LLVM-provided high-level optimization levels. 143 /// 144 /// This enumerates the LLVM-provided high-level optimization levels. Each 145 /// level has a specific goal and rationale. 146 enum OptimizationLevel { 147 /// Disable as many optimizations as possible. This doesn't completely 148 /// disable the optimizer in all cases, for example always_inline functions 149 /// can be required to be inlined for correctness. 150 O0, 151 152 /// Optimize quickly without destroying debuggability. 153 /// 154 /// This level is tuned to produce a result from the optimizer as quickly 155 /// as possible and to avoid destroying debuggability. This tends to result 156 /// in a very good development mode where the compiled code will be 157 /// immediately executed as part of testing. As a consequence, where 158 /// possible, we would like to produce efficient-to-execute code, but not 159 /// if it significantly slows down compilation or would prevent even basic 160 /// debugging of the resulting binary. 161 /// 162 /// As an example, complex loop transformations such as versioning, 163 /// vectorization, or fusion don't make sense here due to the degree to 164 /// which the executed code differs from the source code, and the compile time 165 /// cost. 166 O1, 167 168 /// Optimize for fast execution as much as possible without triggering 169 /// significant incremental compile time or code size growth. 170 /// 171 /// The key idea is that optimizations at this level should "pay for 172 /// themselves". So if an optimization increases compile time by 5% or 173 /// increases code size by 5% for a particular benchmark, that benchmark 174 /// should also be one which sees a 5% runtime improvement. If the compile 175 /// time or code size penalties happen on average across a diverse range of 176 /// LLVM users' benchmarks, then the improvements should as well. 177 /// 178 /// And no matter what, the compile time needs to not grow superlinearly 179 /// with the size of input to LLVM so that users can control the runtime of 180 /// the optimizer in this mode. 181 /// 182 /// This is expected to be a good default optimization level for the vast 183 /// majority of users. 184 O2, 185 186 /// Optimize for fast execution as much as possible. 187 /// 188 /// This mode is significantly more aggressive in trading off compile time 189 /// and code size to get execution time improvements. The core idea is that 190 /// this mode should include any optimization that helps execution time on 191 /// balance across a diverse collection of benchmarks, even if it increases 192 /// code size or compile time for some benchmarks without corresponding 193 /// improvements to execution time. 194 /// 195 /// Despite being willing to trade more compile time off to get improved 196 /// execution time, this mode still tries to avoid superlinear growth in 197 /// order to make even significantly slower compile times at least scale 198 /// reasonably. This does not preclude very substantial constant factor 199 /// costs though. 200 O3, 201 202 /// Similar to \c O2 but tries to optimize for small code size instead of 203 /// fast execution without triggering significant incremental execution 204 /// time slowdowns. 205 /// 206 /// The logic here is exactly the same as \c O2, but with code size and 207 /// execution time metrics swapped. 208 /// 209 /// A consequence of the different core goal is that this should in general 210 /// produce substantially smaller executables that still run in 211 /// a reasonable amount of time. 212 Os, 213 214 /// A very specialized mode that will optimize for code size at any and all 215 /// costs. 216 /// 217 /// This is useful primarily when there are absolute size limitations and 218 /// any effort taken to reduce the size is worth it regardless of the 219 /// execution time impact. You should expect this level to produce rather 220 /// slow, but very small, code. 221 Oz 222 }; 223 224 explicit PassBuilder(TargetMachine *TM = nullptr, 225 PipelineTuningOptions PTO = PipelineTuningOptions(), 226 Optional<PGOOptions> PGOOpt = None, 227 PassInstrumentationCallbacks *PIC = nullptr) 228 : TM(TM), PTO(PTO), PGOOpt(PGOOpt), PIC(PIC) {} 229 230 /// Cross register the analysis managers through their proxies. 231 /// 232 /// This is an interface that can be used to cross register each 233 /// AnalysisManager with all the others analysis managers. 234 void crossRegisterProxies(LoopAnalysisManager &LAM, 235 FunctionAnalysisManager &FAM, 236 CGSCCAnalysisManager &CGAM, 237 ModuleAnalysisManager &MAM); 238 239 /// Registers all available module analysis passes. 240 /// 241 /// This is an interface that can be used to populate a \c 242 /// ModuleAnalysisManager with all registered module analyses. Callers can 243 /// still manually register any additional analyses. Callers can also 244 /// pre-register analyses and this will not override those. 245 void registerModuleAnalyses(ModuleAnalysisManager &MAM); 246 247 /// Registers all available CGSCC analysis passes. 248 /// 249 /// This is an interface that can be used to populate a \c CGSCCAnalysisManager 250 /// with all registered CGSCC analyses. Callers can still manually register any 251 /// additional analyses. Callers can also pre-register analyses and this will 252 /// not override those. 253 void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM); 254 255 /// Registers all available function analysis passes. 256 /// 257 /// This is an interface that can be used to populate a \c 258 /// FunctionAnalysisManager with all registered function analyses. Callers can 259 /// still manually register any additional analyses. Callers can also 260 /// pre-register analyses and this will not override those. 261 void registerFunctionAnalyses(FunctionAnalysisManager &FAM); 262 263 /// Registers all available loop analysis passes. 264 /// 265 /// This is an interface that can be used to populate a \c LoopAnalysisManager 266 /// with all registered loop analyses. Callers can still manually register any 267 /// additional analyses. 268 void registerLoopAnalyses(LoopAnalysisManager &LAM); 269 270 /// Construct the core LLVM function canonicalization and simplification 271 /// pipeline. 272 /// 273 /// This is a long pipeline and uses most of the per-function optimization 274 /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run 275 /// repeatedly over the IR and is not expected to destroy important 276 /// information about the semantics of the IR. 277 /// 278 /// Note that \p Level cannot be `O0` here. The pipelines produced are 279 /// only intended for use when attempting to optimize code. If frontends 280 /// require some transformations for semantic reasons, they should explicitly 281 /// build them. 282 /// 283 /// \p Phase indicates the current ThinLTO phase. 284 FunctionPassManager 285 buildFunctionSimplificationPipeline(OptimizationLevel Level, 286 ThinLTOPhase Phase, 287 bool DebugLogging = false); 288 289 /// Construct the core LLVM module canonicalization and simplification 290 /// pipeline. 291 /// 292 /// This pipeline focuses on canonicalizing and simplifying the entire module 293 /// of IR. Much like the function simplification pipeline above, it is 294 /// suitable to run repeatedly over the IR and is not expected to destroy 295 /// important information. It does, however, perform inlining and other 296 /// heuristic based simplifications that are not strictly reversible. 297 /// 298 /// Note that \p Level cannot be `O0` here. The pipelines produced are 299 /// only intended for use when attempting to optimize code. If frontends 300 /// require some transformations for semantic reasons, they should explicitly 301 /// build them. 302 /// 303 /// \p Phase indicates the current ThinLTO phase. 304 ModulePassManager 305 buildModuleSimplificationPipeline(OptimizationLevel Level, 306 ThinLTOPhase Phase, 307 bool DebugLogging = false); 308 309 /// Construct the core LLVM module optimization pipeline. 310 /// 311 /// This pipeline focuses on optimizing the execution speed of the IR. It 312 /// uses cost modeling and thresholds to balance code growth against runtime 313 /// improvements. It includes vectorization and other information destroying 314 /// transformations. It also cannot generally be run repeatedly on a module 315 /// without potentially seriously regressing either runtime performance of 316 /// the code or serious code size growth. 317 /// 318 /// Note that \p Level cannot be `O0` here. The pipelines produced are 319 /// only intended for use when attempting to optimize code. If frontends 320 /// require some transformations for semantic reasons, they should explicitly 321 /// build them. 322 ModulePassManager buildModuleOptimizationPipeline(OptimizationLevel Level, 323 bool DebugLogging = false, 324 bool LTOPreLink = false); 325 326 /// Build a per-module default optimization pipeline. 327 /// 328 /// This provides a good default optimization pipeline for per-module 329 /// optimization and code generation without any link-time optimization. It 330 /// typically correspond to frontend "-O[123]" options for optimization 331 /// levels \c O1, \c O2 and \c O3 resp. 332 /// 333 /// Note that \p Level cannot be `O0` here. The pipelines produced are 334 /// only intended for use when attempting to optimize code. If frontends 335 /// require some transformations for semantic reasons, they should explicitly 336 /// build them. 337 ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level, 338 bool DebugLogging = false, 339 bool LTOPreLink = false); 340 341 /// Build a pre-link, ThinLTO-targeting default optimization pipeline to 342 /// a pass manager. 343 /// 344 /// This adds the pre-link optimizations tuned to prepare a module for 345 /// a ThinLTO run. It works to minimize the IR which needs to be analyzed 346 /// without making irreversible decisions which could be made better during 347 /// the LTO run. 348 /// 349 /// Note that \p Level cannot be `O0` here. The pipelines produced are 350 /// only intended for use when attempting to optimize code. If frontends 351 /// require some transformations for semantic reasons, they should explicitly 352 /// build them. 353 ModulePassManager 354 buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level, 355 bool DebugLogging = false); 356 357 /// Build an ThinLTO default optimization pipeline to a pass manager. 358 /// 359 /// This provides a good default optimization pipeline for link-time 360 /// optimization and code generation. It is particularly tuned to fit well 361 /// when IR coming into the LTO phase was first run through \c 362 /// addPreLinkLTODefaultPipeline, and the two coordinate closely. 363 /// 364 /// Note that \p Level cannot be `O0` here. The pipelines produced are 365 /// only intended for use when attempting to optimize code. If frontends 366 /// require some transformations for semantic reasons, they should explicitly 367 /// build them. 368 ModulePassManager 369 buildThinLTODefaultPipeline(OptimizationLevel Level, bool DebugLogging, 370 const ModuleSummaryIndex *ImportSummary); 371 372 /// Build a pre-link, LTO-targeting default optimization pipeline to a pass 373 /// manager. 374 /// 375 /// This adds the pre-link optimizations tuned to work well with a later LTO 376 /// run. It works to minimize the IR which needs to be analyzed without 377 /// making irreversible decisions which could be made better during the LTO 378 /// run. 379 /// 380 /// Note that \p Level cannot be `O0` here. The pipelines produced are 381 /// only intended for use when attempting to optimize code. If frontends 382 /// require some transformations for semantic reasons, they should explicitly 383 /// build them. 384 ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level, 385 bool DebugLogging = false); 386 387 /// Build an LTO default optimization pipeline to a pass manager. 388 /// 389 /// This provides a good default optimization pipeline for link-time 390 /// optimization and code generation. It is particularly tuned to fit well 391 /// when IR coming into the LTO phase was first run through \c 392 /// addPreLinkLTODefaultPipeline, and the two coordinate closely. 393 /// 394 /// Note that \p Level cannot be `O0` here. The pipelines produced are 395 /// only intended for use when attempting to optimize code. If frontends 396 /// require some transformations for semantic reasons, they should explicitly 397 /// build them. 398 ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level, 399 bool DebugLogging, 400 ModuleSummaryIndex *ExportSummary); 401 402 /// Build the default `AAManager` with the default alias analysis pipeline 403 /// registered. 404 AAManager buildDefaultAAPipeline(); 405 406 /// Parse a textual pass pipeline description into a \c 407 /// ModulePassManager. 408 /// 409 /// The format of the textual pass pipeline description looks something like: 410 /// 411 /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...) 412 /// 413 /// Pass managers have ()s describing the nest structure of passes. All passes 414 /// are comma separated. As a special shortcut, if the very first pass is not 415 /// a module pass (as a module pass manager is), this will automatically form 416 /// the shortest stack of pass managers that allow inserting that first pass. 417 /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop 418 /// passes 'lpassN', all of these are valid: 419 /// 420 /// fpass1,fpass2,fpass3 421 /// cgpass1,cgpass2,cgpass3 422 /// lpass1,lpass2,lpass3 423 /// 424 /// And they are equivalent to the following (resp.): 425 /// 426 /// module(function(fpass1,fpass2,fpass3)) 427 /// module(cgscc(cgpass1,cgpass2,cgpass3)) 428 /// module(function(loop(lpass1,lpass2,lpass3))) 429 /// 430 /// This shortcut is especially useful for debugging and testing small pass 431 /// combinations. Note that these shortcuts don't introduce any other magic. 432 /// If the sequence of passes aren't all the exact same kind of pass, it will 433 /// be an error. You cannot mix different levels implicitly, you must 434 /// explicitly form a pass manager in which to nest passes. 435 Error parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText, 436 bool VerifyEachPass = true, 437 bool DebugLogging = false); 438 439 /// {{@ Parse a textual pass pipeline description into a specific PassManager 440 /// 441 /// Automatic deduction of an appropriate pass manager stack is not supported. 442 /// For example, to insert a loop pass 'lpass' into a FunctionPassManager, 443 /// this is the valid pipeline text: 444 /// 445 /// function(lpass) 446 Error parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText, 447 bool VerifyEachPass = true, 448 bool DebugLogging = false); 449 Error parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText, 450 bool VerifyEachPass = true, 451 bool DebugLogging = false); 452 Error parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText, 453 bool VerifyEachPass = true, 454 bool DebugLogging = false); 455 /// @}} 456 457 /// Parse a textual alias analysis pipeline into the provided AA manager. 458 /// 459 /// The format of the textual AA pipeline is a comma separated list of AA 460 /// pass names: 461 /// 462 /// basic-aa,globals-aa,... 463 /// 464 /// The AA manager is set up such that the provided alias analyses are tried 465 /// in the order specified. See the \c AAManaager documentation for details 466 /// about the logic used. This routine just provides the textual mapping 467 /// between AA names and the analyses to register with the manager. 468 /// 469 /// Returns false if the text cannot be parsed cleanly. The specific state of 470 /// the \p AA manager is unspecified if such an error is encountered and this 471 /// returns false. 472 Error parseAAPipeline(AAManager &AA, StringRef PipelineText); 473 474 /// Register a callback for a default optimizer pipeline extension 475 /// point 476 /// 477 /// This extension point allows adding passes that perform peephole 478 /// optimizations similar to the instruction combiner. These passes will be 479 /// inserted after each instance of the instruction combiner pass. 480 void registerPeepholeEPCallback( 481 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) { 482 PeepholeEPCallbacks.push_back(C); 483 } 484 485 /// Register a callback for a default optimizer pipeline extension 486 /// point 487 /// 488 /// This extension point allows adding late loop canonicalization and 489 /// simplification passes. This is the last point in the loop optimization 490 /// pipeline before loop deletion. Each pass added 491 /// here must be an instance of LoopPass. 492 /// This is the place to add passes that can remove loops, such as target- 493 /// specific loop idiom recognition. 494 void registerLateLoopOptimizationsEPCallback( 495 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) { 496 LateLoopOptimizationsEPCallbacks.push_back(C); 497 } 498 499 /// Register a callback for a default optimizer pipeline extension 500 /// point 501 /// 502 /// This extension point allows adding loop passes to the end of the loop 503 /// optimizer. 504 void registerLoopOptimizerEndEPCallback( 505 const std::function<void(LoopPassManager &, OptimizationLevel)> &C) { 506 LoopOptimizerEndEPCallbacks.push_back(C); 507 } 508 509 /// Register a callback for a default optimizer pipeline extension 510 /// point 511 /// 512 /// This extension point allows adding optimization passes after most of the 513 /// main optimizations, but before the last cleanup-ish optimizations. 514 void registerScalarOptimizerLateEPCallback( 515 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) { 516 ScalarOptimizerLateEPCallbacks.push_back(C); 517 } 518 519 /// Register a callback for a default optimizer pipeline extension 520 /// point 521 /// 522 /// This extension point allows adding CallGraphSCC passes at the end of the 523 /// main CallGraphSCC passes and before any function simplification passes run 524 /// by CGPassManager. 525 void registerCGSCCOptimizerLateEPCallback( 526 const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) { 527 CGSCCOptimizerLateEPCallbacks.push_back(C); 528 } 529 530 /// Register a callback for a default optimizer pipeline extension 531 /// point 532 /// 533 /// This extension point allows adding optimization passes before the 534 /// vectorizer and other highly target specific optimization passes are 535 /// executed. 536 void registerVectorizerStartEPCallback( 537 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) { 538 VectorizerStartEPCallbacks.push_back(C); 539 } 540 541 /// Register a callback for a default optimizer pipeline extension point. 542 /// 543 /// This extension point allows adding optimization once at the start of the 544 /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO 545 /// link-time pipelines). 546 void registerPipelineStartEPCallback( 547 const std::function<void(ModulePassManager &)> &C) { 548 PipelineStartEPCallbacks.push_back(C); 549 } 550 551 /// Register a callback for a default optimizer pipeline extension point 552 /// 553 /// This extension point allows adding optimizations at the very end of the 554 /// function optimization pipeline. A key difference between this and the 555 /// legacy PassManager's OptimizerLast callback is that this extension point 556 /// is not triggered at O0. Extensions to the O0 pipeline should append their 557 /// passes to the end of the overall pipeline. 558 void registerOptimizerLastEPCallback( 559 const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) { 560 OptimizerLastEPCallbacks.push_back(C); 561 } 562 563 /// Register a callback for parsing an AliasAnalysis Name to populate 564 /// the given AAManager \p AA 565 void registerParseAACallback( 566 const std::function<bool(StringRef Name, AAManager &AA)> &C) { 567 AAParsingCallbacks.push_back(C); 568 } 569 570 /// {{@ Register callbacks for analysis registration with this PassBuilder 571 /// instance. 572 /// Callees register their analyses with the given AnalysisManager objects. 573 void registerAnalysisRegistrationCallback( 574 const std::function<void(CGSCCAnalysisManager &)> &C) { 575 CGSCCAnalysisRegistrationCallbacks.push_back(C); 576 } 577 void registerAnalysisRegistrationCallback( 578 const std::function<void(FunctionAnalysisManager &)> &C) { 579 FunctionAnalysisRegistrationCallbacks.push_back(C); 580 } 581 void registerAnalysisRegistrationCallback( 582 const std::function<void(LoopAnalysisManager &)> &C) { 583 LoopAnalysisRegistrationCallbacks.push_back(C); 584 } 585 void registerAnalysisRegistrationCallback( 586 const std::function<void(ModuleAnalysisManager &)> &C) { 587 ModuleAnalysisRegistrationCallbacks.push_back(C); 588 } 589 /// @}} 590 591 /// {{@ Register pipeline parsing callbacks with this pass builder instance. 592 /// Using these callbacks, callers can parse both a single pass name, as well 593 /// as entire sub-pipelines, and populate the PassManager instance 594 /// accordingly. 595 void registerPipelineParsingCallback( 596 const std::function<bool(StringRef Name, CGSCCPassManager &, 597 ArrayRef<PipelineElement>)> &C) { 598 CGSCCPipelineParsingCallbacks.push_back(C); 599 } 600 void registerPipelineParsingCallback( 601 const std::function<bool(StringRef Name, FunctionPassManager &, 602 ArrayRef<PipelineElement>)> &C) { 603 FunctionPipelineParsingCallbacks.push_back(C); 604 } 605 void registerPipelineParsingCallback( 606 const std::function<bool(StringRef Name, LoopPassManager &, 607 ArrayRef<PipelineElement>)> &C) { 608 LoopPipelineParsingCallbacks.push_back(C); 609 } 610 void registerPipelineParsingCallback( 611 const std::function<bool(StringRef Name, ModulePassManager &, 612 ArrayRef<PipelineElement>)> &C) { 613 ModulePipelineParsingCallbacks.push_back(C); 614 } 615 /// @}} 616 617 /// Register a callback for a top-level pipeline entry. 618 /// 619 /// If the PassManager type is not given at the top level of the pipeline 620 /// text, this Callback should be used to determine the appropriate stack of 621 /// PassManagers and populate the passed ModulePassManager. 622 void registerParseTopLevelPipelineCallback( 623 const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>, 624 bool VerifyEachPass, bool DebugLogging)> &C) { 625 TopLevelPipelineParsingCallbacks.push_back(C); 626 } 627 628 /// Add PGOInstrumenation passes for O0 only. 629 void addPGOInstrPassesForO0(ModulePassManager &MPM, bool DebugLogging, 630 bool RunProfileGen, bool IsCS, 631 std::string ProfileFile, 632 std::string ProfileRemappingFile); 633 634 635 /// Returns PIC. External libraries can use this to register pass 636 /// instrumentation callbacks. 637 PassInstrumentationCallbacks *getPassInstrumentationCallbacks() const { 638 return PIC; 639 } 640 641 private: 642 static Optional<std::vector<PipelineElement>> 643 parsePipelineText(StringRef Text); 644 645 Error parseModulePass(ModulePassManager &MPM, const PipelineElement &E, 646 bool VerifyEachPass, bool DebugLogging); 647 Error parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E, 648 bool VerifyEachPass, bool DebugLogging); 649 Error parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E, 650 bool VerifyEachPass, bool DebugLogging); 651 Error parseLoopPass(LoopPassManager &LPM, const PipelineElement &E, 652 bool VerifyEachPass, bool DebugLogging); 653 bool parseAAPassName(AAManager &AA, StringRef Name); 654 655 Error parseLoopPassPipeline(LoopPassManager &LPM, 656 ArrayRef<PipelineElement> Pipeline, 657 bool VerifyEachPass, bool DebugLogging); 658 Error parseFunctionPassPipeline(FunctionPassManager &FPM, 659 ArrayRef<PipelineElement> Pipeline, 660 bool VerifyEachPass, bool DebugLogging); 661 Error parseCGSCCPassPipeline(CGSCCPassManager &CGPM, 662 ArrayRef<PipelineElement> Pipeline, 663 bool VerifyEachPass, bool DebugLogging); 664 Error parseModulePassPipeline(ModulePassManager &MPM, 665 ArrayRef<PipelineElement> Pipeline, 666 bool VerifyEachPass, bool DebugLogging); 667 668 void addPGOInstrPasses(ModulePassManager &MPM, bool DebugLogging, 669 OptimizationLevel Level, bool RunProfileGen, bool IsCS, 670 std::string ProfileFile, 671 std::string ProfileRemappingFile); 672 void invokePeepholeEPCallbacks(FunctionPassManager &, OptimizationLevel); 673 674 // Extension Point callbacks 675 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2> 676 PeepholeEPCallbacks; 677 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2> 678 LateLoopOptimizationsEPCallbacks; 679 SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2> 680 LoopOptimizerEndEPCallbacks; 681 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2> 682 ScalarOptimizerLateEPCallbacks; 683 SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2> 684 CGSCCOptimizerLateEPCallbacks; 685 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2> 686 VectorizerStartEPCallbacks; 687 SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2> 688 OptimizerLastEPCallbacks; 689 // Module callbacks 690 SmallVector<std::function<void(ModulePassManager &)>, 2> 691 PipelineStartEPCallbacks; 692 SmallVector<std::function<void(ModuleAnalysisManager &)>, 2> 693 ModuleAnalysisRegistrationCallbacks; 694 SmallVector<std::function<bool(StringRef, ModulePassManager &, 695 ArrayRef<PipelineElement>)>, 696 2> 697 ModulePipelineParsingCallbacks; 698 SmallVector<std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>, 699 bool VerifyEachPass, bool DebugLogging)>, 700 2> 701 TopLevelPipelineParsingCallbacks; 702 // CGSCC callbacks 703 SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2> 704 CGSCCAnalysisRegistrationCallbacks; 705 SmallVector<std::function<bool(StringRef, CGSCCPassManager &, 706 ArrayRef<PipelineElement>)>, 707 2> 708 CGSCCPipelineParsingCallbacks; 709 // Function callbacks 710 SmallVector<std::function<void(FunctionAnalysisManager &)>, 2> 711 FunctionAnalysisRegistrationCallbacks; 712 SmallVector<std::function<bool(StringRef, FunctionPassManager &, 713 ArrayRef<PipelineElement>)>, 714 2> 715 FunctionPipelineParsingCallbacks; 716 // Loop callbacks 717 SmallVector<std::function<void(LoopAnalysisManager &)>, 2> 718 LoopAnalysisRegistrationCallbacks; 719 SmallVector<std::function<bool(StringRef, LoopPassManager &, 720 ArrayRef<PipelineElement>)>, 721 2> 722 LoopPipelineParsingCallbacks; 723 // AA callbacks 724 SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2> 725 AAParsingCallbacks; 726 }; 727 728 /// This utility template takes care of adding require<> and invalidate<> 729 /// passes for an analysis to a given \c PassManager. It is intended to be used 730 /// during parsing of a pass pipeline when parsing a single PipelineName. 731 /// When registering a new function analysis FancyAnalysis with the pass 732 /// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look 733 /// like this: 734 /// 735 /// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM, 736 /// ArrayRef<PipelineElement> P) { 737 /// if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name, 738 /// FPM)) 739 /// return true; 740 /// return false; 741 /// } 742 template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT, 743 typename... ExtraArgTs> 744 bool parseAnalysisUtilityPasses( 745 StringRef AnalysisName, StringRef PipelineName, 746 PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) { 747 if (!PipelineName.endswith(">")) 748 return false; 749 // See if this is an invalidate<> pass name 750 if (PipelineName.startswith("invalidate<")) { 751 PipelineName = PipelineName.substr(11, PipelineName.size() - 12); 752 if (PipelineName != AnalysisName) 753 return false; 754 PM.addPass(InvalidateAnalysisPass<AnalysisT>()); 755 return true; 756 } 757 758 // See if this is a require<> pass name 759 if (PipelineName.startswith("require<")) { 760 PipelineName = PipelineName.substr(8, PipelineName.size() - 9); 761 if (PipelineName != AnalysisName) 762 return false; 763 PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT, 764 ExtraArgTs...>()); 765 return true; 766 } 767 768 return false; 769 } 770 } 771 772 #endif 773