1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// The AMDGPU target machine contains all of the hardware specific
11 /// information needed to emit code for SI+ GPUs.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "AMDGPUTargetMachine.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUAliasAnalysis.h"
18 #include "AMDGPUExportClustering.h"
19 #include "AMDGPUMacroFusion.h"
20 #include "AMDGPUTargetObjectFile.h"
21 #include "AMDGPUTargetTransformInfo.h"
22 #include "GCNIterativeScheduler.h"
23 #include "GCNSchedStrategy.h"
24 #include "R600.h"
25 #include "R600TargetMachine.h"
26 #include "SIMachineFunctionInfo.h"
27 #include "SIMachineScheduler.h"
28 #include "TargetInfo/AMDGPUTargetInfo.h"
29 #include "llvm/Analysis/CGSCCPassManager.h"
30 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
31 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
32 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
33 #include "llvm/CodeGen/GlobalISel/Localizer.h"
34 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
35 #include "llvm/CodeGen/MIRParser/MIParser.h"
36 #include "llvm/CodeGen/Passes.h"
37 #include "llvm/CodeGen/RegAllocRegistry.h"
38 #include "llvm/CodeGen/TargetPassConfig.h"
39 #include "llvm/IR/LegacyPassManager.h"
40 #include "llvm/IR/PassManager.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/MC/TargetRegistry.h"
43 #include "llvm/Passes/PassBuilder.h"
44 #include "llvm/Transforms/IPO.h"
45 #include "llvm/Transforms/IPO/AlwaysInliner.h"
46 #include "llvm/Transforms/IPO/GlobalDCE.h"
47 #include "llvm/Transforms/IPO/Internalize.h"
48 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
49 #include "llvm/Transforms/Scalar.h"
50 #include "llvm/Transforms/Scalar/GVN.h"
51 #include "llvm/Transforms/Scalar/InferAddressSpaces.h"
52 #include "llvm/Transforms/Utils.h"
53 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
54 #include "llvm/Transforms/Vectorize.h"
55
56 using namespace llvm;
57
58 namespace {
59 class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
60 public:
SGPRRegisterRegAlloc(const char * N,const char * D,FunctionPassCtor C)61 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
62 : RegisterRegAllocBase(N, D, C) {}
63 };
64
65 class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
66 public:
VGPRRegisterRegAlloc(const char * N,const char * D,FunctionPassCtor C)67 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
68 : RegisterRegAllocBase(N, D, C) {}
69 };
70
onlyAllocateSGPRs(const TargetRegisterInfo & TRI,const TargetRegisterClass & RC)71 static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
72 const TargetRegisterClass &RC) {
73 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
74 }
75
onlyAllocateVGPRs(const TargetRegisterInfo & TRI,const TargetRegisterClass & RC)76 static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
77 const TargetRegisterClass &RC) {
78 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
79 }
80
81
82 /// -{sgpr|vgpr}-regalloc=... command line option.
useDefaultRegisterAllocator()83 static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
84
85 /// A dummy default pass factory indicates whether the register allocator is
86 /// overridden on the command line.
87 static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
88 static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
89
90 static SGPRRegisterRegAlloc
91 defaultSGPRRegAlloc("default",
92 "pick SGPR register allocator based on -O option",
93 useDefaultRegisterAllocator);
94
95 static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
96 RegisterPassParser<SGPRRegisterRegAlloc>>
97 SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
98 cl::desc("Register allocator to use for SGPRs"));
99
100 static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
101 RegisterPassParser<VGPRRegisterRegAlloc>>
102 VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
103 cl::desc("Register allocator to use for VGPRs"));
104
105
initializeDefaultSGPRRegisterAllocatorOnce()106 static void initializeDefaultSGPRRegisterAllocatorOnce() {
107 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
108
109 if (!Ctor) {
110 Ctor = SGPRRegAlloc;
111 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
112 }
113 }
114
initializeDefaultVGPRRegisterAllocatorOnce()115 static void initializeDefaultVGPRRegisterAllocatorOnce() {
116 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
117
118 if (!Ctor) {
119 Ctor = VGPRRegAlloc;
120 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
121 }
122 }
123
createBasicSGPRRegisterAllocator()124 static FunctionPass *createBasicSGPRRegisterAllocator() {
125 return createBasicRegisterAllocator(onlyAllocateSGPRs);
126 }
127
createGreedySGPRRegisterAllocator()128 static FunctionPass *createGreedySGPRRegisterAllocator() {
129 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
130 }
131
createFastSGPRRegisterAllocator()132 static FunctionPass *createFastSGPRRegisterAllocator() {
133 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
134 }
135
createBasicVGPRRegisterAllocator()136 static FunctionPass *createBasicVGPRRegisterAllocator() {
137 return createBasicRegisterAllocator(onlyAllocateVGPRs);
138 }
139
createGreedyVGPRRegisterAllocator()140 static FunctionPass *createGreedyVGPRRegisterAllocator() {
141 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
142 }
143
createFastVGPRRegisterAllocator()144 static FunctionPass *createFastVGPRRegisterAllocator() {
145 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
146 }
147
148 static SGPRRegisterRegAlloc basicRegAllocSGPR(
149 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
150 static SGPRRegisterRegAlloc greedyRegAllocSGPR(
151 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
152
153 static SGPRRegisterRegAlloc fastRegAllocSGPR(
154 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
155
156
157 static VGPRRegisterRegAlloc basicRegAllocVGPR(
158 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
159 static VGPRRegisterRegAlloc greedyRegAllocVGPR(
160 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
161
162 static VGPRRegisterRegAlloc fastRegAllocVGPR(
163 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
164 }
165
166 static cl::opt<bool> EnableSROA(
167 "amdgpu-sroa",
168 cl::desc("Run SROA after promote alloca pass"),
169 cl::ReallyHidden,
170 cl::init(true));
171
172 static cl::opt<bool>
173 EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
174 cl::desc("Run early if-conversion"),
175 cl::init(false));
176
177 static cl::opt<bool>
178 OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
179 cl::desc("Run pre-RA exec mask optimizations"),
180 cl::init(true));
181
182 // Option to disable vectorizer for tests.
183 static cl::opt<bool> EnableLoadStoreVectorizer(
184 "amdgpu-load-store-vectorizer",
185 cl::desc("Enable load store vectorizer"),
186 cl::init(true),
187 cl::Hidden);
188
189 // Option to control global loads scalarization
190 static cl::opt<bool> ScalarizeGlobal(
191 "amdgpu-scalarize-global-loads",
192 cl::desc("Enable global load scalarization"),
193 cl::init(true),
194 cl::Hidden);
195
196 // Option to run internalize pass.
197 static cl::opt<bool> InternalizeSymbols(
198 "amdgpu-internalize-symbols",
199 cl::desc("Enable elimination of non-kernel functions and unused globals"),
200 cl::init(false),
201 cl::Hidden);
202
203 // Option to inline all early.
204 static cl::opt<bool> EarlyInlineAll(
205 "amdgpu-early-inline-all",
206 cl::desc("Inline all functions early"),
207 cl::init(false),
208 cl::Hidden);
209
210 static cl::opt<bool> EnableSDWAPeephole(
211 "amdgpu-sdwa-peephole",
212 cl::desc("Enable SDWA peepholer"),
213 cl::init(true));
214
215 static cl::opt<bool> EnableDPPCombine(
216 "amdgpu-dpp-combine",
217 cl::desc("Enable DPP combiner"),
218 cl::init(true));
219
220 // Enable address space based alias analysis
221 static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
222 cl::desc("Enable AMDGPU Alias Analysis"),
223 cl::init(true));
224
225 // Option to run late CFG structurizer
226 static cl::opt<bool, true> LateCFGStructurize(
227 "amdgpu-late-structurize",
228 cl::desc("Enable late CFG structurization"),
229 cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
230 cl::Hidden);
231
232 static cl::opt<bool, true> EnableAMDGPUFixedFunctionABIOpt(
233 "amdgpu-fixed-function-abi",
234 cl::desc("Enable all implicit function arguments"),
235 cl::location(AMDGPUTargetMachine::EnableFixedFunctionABI),
236 cl::init(false),
237 cl::Hidden);
238
239 // Enable lib calls simplifications
240 static cl::opt<bool> EnableLibCallSimplify(
241 "amdgpu-simplify-libcall",
242 cl::desc("Enable amdgpu library simplifications"),
243 cl::init(true),
244 cl::Hidden);
245
246 static cl::opt<bool> EnableLowerKernelArguments(
247 "amdgpu-ir-lower-kernel-arguments",
248 cl::desc("Lower kernel argument loads in IR pass"),
249 cl::init(true),
250 cl::Hidden);
251
252 static cl::opt<bool> EnableRegReassign(
253 "amdgpu-reassign-regs",
254 cl::desc("Enable register reassign optimizations on gfx10+"),
255 cl::init(true),
256 cl::Hidden);
257
258 static cl::opt<bool> OptVGPRLiveRange(
259 "amdgpu-opt-vgpr-liverange",
260 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
261 cl::init(true), cl::Hidden);
262
263 // Enable atomic optimization
264 static cl::opt<bool> EnableAtomicOptimizations(
265 "amdgpu-atomic-optimizations",
266 cl::desc("Enable atomic optimizations"),
267 cl::init(false),
268 cl::Hidden);
269
270 // Enable Mode register optimization
271 static cl::opt<bool> EnableSIModeRegisterPass(
272 "amdgpu-mode-register",
273 cl::desc("Enable mode register pass"),
274 cl::init(true),
275 cl::Hidden);
276
277 // Option is used in lit tests to prevent deadcoding of patterns inspected.
278 static cl::opt<bool>
279 EnableDCEInRA("amdgpu-dce-in-ra",
280 cl::init(true), cl::Hidden,
281 cl::desc("Enable machine DCE inside regalloc"));
282
283 static cl::opt<bool> EnableScalarIRPasses(
284 "amdgpu-scalar-ir-passes",
285 cl::desc("Enable scalar IR passes"),
286 cl::init(true),
287 cl::Hidden);
288
289 static cl::opt<bool> EnableStructurizerWorkarounds(
290 "amdgpu-enable-structurizer-workarounds",
291 cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true),
292 cl::Hidden);
293
294 static cl::opt<bool> EnableLDSReplaceWithPointer(
295 "amdgpu-enable-lds-replace-with-pointer",
296 cl::desc("Enable LDS replace with pointer pass"), cl::init(false),
297 cl::Hidden);
298
299 static cl::opt<bool, true> EnableLowerModuleLDS(
300 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
301 cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true),
302 cl::Hidden);
303
304 static cl::opt<bool> EnablePreRAOptimizations(
305 "amdgpu-enable-pre-ra-optimizations",
306 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
307 cl::Hidden);
308
LLVMInitializeAMDGPUTarget()309 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget() {
310 // Register the target
311 RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
312 RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
313
314 PassRegistry *PR = PassRegistry::getPassRegistry();
315 initializeR600ClauseMergePassPass(*PR);
316 initializeR600ControlFlowFinalizerPass(*PR);
317 initializeR600PacketizerPass(*PR);
318 initializeR600ExpandSpecialInstrsPassPass(*PR);
319 initializeR600VectorRegMergerPass(*PR);
320 initializeGlobalISel(*PR);
321 initializeAMDGPUDAGToDAGISelPass(*PR);
322 initializeGCNDPPCombinePass(*PR);
323 initializeSILowerI1CopiesPass(*PR);
324 initializeSILowerSGPRSpillsPass(*PR);
325 initializeSIFixSGPRCopiesPass(*PR);
326 initializeSIFixVGPRCopiesPass(*PR);
327 initializeSIFoldOperandsPass(*PR);
328 initializeSIPeepholeSDWAPass(*PR);
329 initializeSIShrinkInstructionsPass(*PR);
330 initializeSIOptimizeExecMaskingPreRAPass(*PR);
331 initializeSIOptimizeVGPRLiveRangePass(*PR);
332 initializeSILoadStoreOptimizerPass(*PR);
333 initializeAMDGPUFixFunctionBitcastsPass(*PR);
334 initializeAMDGPUCtorDtorLoweringPass(*PR);
335 initializeAMDGPUAlwaysInlinePass(*PR);
336 initializeAMDGPUAttributorPass(*PR);
337 initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
338 initializeAMDGPUAnnotateUniformValuesPass(*PR);
339 initializeAMDGPUArgumentUsageInfoPass(*PR);
340 initializeAMDGPUAtomicOptimizerPass(*PR);
341 initializeAMDGPULowerKernelArgumentsPass(*PR);
342 initializeAMDGPULowerKernelAttributesPass(*PR);
343 initializeAMDGPULowerIntrinsicsPass(*PR);
344 initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
345 initializeAMDGPUPostLegalizerCombinerPass(*PR);
346 initializeAMDGPUPreLegalizerCombinerPass(*PR);
347 initializeAMDGPURegBankCombinerPass(*PR);
348 initializeAMDGPUPromoteAllocaPass(*PR);
349 initializeAMDGPUPromoteAllocaToVectorPass(*PR);
350 initializeAMDGPUCodeGenPreparePass(*PR);
351 initializeAMDGPULateCodeGenPreparePass(*PR);
352 initializeAMDGPUPropagateAttributesEarlyPass(*PR);
353 initializeAMDGPUPropagateAttributesLatePass(*PR);
354 initializeAMDGPUReplaceLDSUseWithPointerPass(*PR);
355 initializeAMDGPULowerModuleLDSPass(*PR);
356 initializeAMDGPURewriteOutArgumentsPass(*PR);
357 initializeAMDGPUUnifyMetadataPass(*PR);
358 initializeSIAnnotateControlFlowPass(*PR);
359 initializeSIInsertHardClausesPass(*PR);
360 initializeSIInsertWaitcntsPass(*PR);
361 initializeSIModeRegisterPass(*PR);
362 initializeSIWholeQuadModePass(*PR);
363 initializeSILowerControlFlowPass(*PR);
364 initializeSIPreEmitPeepholePass(*PR);
365 initializeSILateBranchLoweringPass(*PR);
366 initializeSIMemoryLegalizerPass(*PR);
367 initializeSIOptimizeExecMaskingPass(*PR);
368 initializeSIPreAllocateWWMRegsPass(*PR);
369 initializeSIFormMemoryClausesPass(*PR);
370 initializeSIPostRABundlerPass(*PR);
371 initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
372 initializeAMDGPUAAWrapperPassPass(*PR);
373 initializeAMDGPUExternalAAWrapperPass(*PR);
374 initializeAMDGPUUseNativeCallsPass(*PR);
375 initializeAMDGPUSimplifyLibCallsPass(*PR);
376 initializeAMDGPUPrintfRuntimeBindingPass(*PR);
377 initializeAMDGPUResourceUsageAnalysisPass(*PR);
378 initializeGCNNSAReassignPass(*PR);
379 initializeGCNPreRAOptimizationsPass(*PR);
380 }
381
createTLOF(const Triple & TT)382 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
383 return std::make_unique<AMDGPUTargetObjectFile>();
384 }
385
createSIMachineScheduler(MachineSchedContext * C)386 static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
387 return new SIScheduleDAGMI(C);
388 }
389
390 static ScheduleDAGInstrs *
createGCNMaxOccupancyMachineScheduler(MachineSchedContext * C)391 createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
392 ScheduleDAGMILive *DAG =
393 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
394 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
395 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
396 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
397 return DAG;
398 }
399
400 static ScheduleDAGInstrs *
createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext * C)401 createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
402 auto DAG = new GCNIterativeScheduler(C,
403 GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
404 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
405 return DAG;
406 }
407
createMinRegScheduler(MachineSchedContext * C)408 static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
409 return new GCNIterativeScheduler(C,
410 GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
411 }
412
413 static ScheduleDAGInstrs *
createIterativeILPMachineScheduler(MachineSchedContext * C)414 createIterativeILPMachineScheduler(MachineSchedContext *C) {
415 auto DAG = new GCNIterativeScheduler(C,
416 GCNIterativeScheduler::SCHEDULE_ILP);
417 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
418 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
419 return DAG;
420 }
421
422 static MachineSchedRegistry
423 SISchedRegistry("si", "Run SI's custom scheduler",
424 createSIMachineScheduler);
425
426 static MachineSchedRegistry
427 GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
428 "Run GCN scheduler to maximize occupancy",
429 createGCNMaxOccupancyMachineScheduler);
430
431 static MachineSchedRegistry
432 IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
433 "Run GCN scheduler to maximize occupancy (experimental)",
434 createIterativeGCNMaxOccupancyMachineScheduler);
435
436 static MachineSchedRegistry
437 GCNMinRegSchedRegistry("gcn-minreg",
438 "Run GCN iterative scheduler for minimal register usage (experimental)",
439 createMinRegScheduler);
440
441 static MachineSchedRegistry
442 GCNILPSchedRegistry("gcn-ilp",
443 "Run GCN iterative scheduler for ILP scheduling (experimental)",
444 createIterativeILPMachineScheduler);
445
computeDataLayout(const Triple & TT)446 static StringRef computeDataLayout(const Triple &TT) {
447 if (TT.getArch() == Triple::r600) {
448 // 32-bit pointers.
449 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
450 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
451 }
452
453 // 32-bit private, local, and region pointers. 64-bit global, constant and
454 // flat, non-integral buffer fat pointers.
455 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
456 "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
457 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
458 "-ni:7";
459 }
460
461 LLVM_READNONE
getGPUOrDefault(const Triple & TT,StringRef GPU)462 static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
463 if (!GPU.empty())
464 return GPU;
465
466 // Need to default to a target with flat support for HSA.
467 if (TT.getArch() == Triple::amdgcn)
468 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
469
470 return "r600";
471 }
472
getEffectiveRelocModel(Optional<Reloc::Model> RM)473 static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
474 // The AMDGPU toolchain only supports generating shared objects, so we
475 // must always use PIC.
476 return Reloc::PIC_;
477 }
478
AMDGPUTargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,TargetOptions Options,Optional<Reloc::Model> RM,Optional<CodeModel::Model> CM,CodeGenOpt::Level OptLevel)479 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
480 StringRef CPU, StringRef FS,
481 TargetOptions Options,
482 Optional<Reloc::Model> RM,
483 Optional<CodeModel::Model> CM,
484 CodeGenOpt::Level OptLevel)
485 : LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
486 FS, Options, getEffectiveRelocModel(RM),
487 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
488 TLOF(createTLOF(getTargetTriple())) {
489 initAsmInfo();
490 if (TT.getArch() == Triple::amdgcn) {
491 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
492 MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave64));
493 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
494 MRI.reset(llvm::createGCNMCRegisterInfo(AMDGPUDwarfFlavour::Wave32));
495 }
496 }
497
498 bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
499 bool AMDGPUTargetMachine::EnableFunctionCalls = false;
500 bool AMDGPUTargetMachine::EnableFixedFunctionABI = false;
501 bool AMDGPUTargetMachine::EnableLowerModuleLDS = true;
502
503 AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
504
getGPUName(const Function & F) const505 StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
506 Attribute GPUAttr = F.getFnAttribute("target-cpu");
507 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
508 }
509
getFeatureString(const Function & F) const510 StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
511 Attribute FSAttr = F.getFnAttribute("target-features");
512
513 return FSAttr.isValid() ? FSAttr.getValueAsString()
514 : getTargetFeatureString();
515 }
516
517 /// Predicate for Internalize pass.
mustPreserveGV(const GlobalValue & GV)518 static bool mustPreserveGV(const GlobalValue &GV) {
519 if (const Function *F = dyn_cast<Function>(&GV))
520 return F->isDeclaration() || F->getName().startswith("__asan_") ||
521 F->getName().startswith("__sanitizer_") ||
522 AMDGPU::isEntryFunctionCC(F->getCallingConv());
523
524 GV.removeDeadConstantUsers();
525 return !GV.use_empty();
526 }
527
adjustPassManager(PassManagerBuilder & Builder)528 void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
529 Builder.DivergentTarget = true;
530
531 bool EnableOpt = getOptLevel() > CodeGenOpt::None;
532 bool Internalize = InternalizeSymbols;
533 bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableFunctionCalls;
534 bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
535 bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
536
537 if (EnableFunctionCalls) {
538 delete Builder.Inliner;
539 Builder.Inliner = createFunctionInliningPass();
540 }
541
542 Builder.addExtension(
543 PassManagerBuilder::EP_ModuleOptimizerEarly,
544 [Internalize, EarlyInline, AMDGPUAA, this](const PassManagerBuilder &,
545 legacy::PassManagerBase &PM) {
546 if (AMDGPUAA) {
547 PM.add(createAMDGPUAAWrapperPass());
548 PM.add(createAMDGPUExternalAAWrapperPass());
549 }
550 PM.add(createAMDGPUUnifyMetadataPass());
551 PM.add(createAMDGPUPrintfRuntimeBinding());
552 if (Internalize)
553 PM.add(createInternalizePass(mustPreserveGV));
554 PM.add(createAMDGPUPropagateAttributesLatePass(this));
555 if (Internalize)
556 PM.add(createGlobalDCEPass());
557 if (EarlyInline)
558 PM.add(createAMDGPUAlwaysInlinePass(false));
559 });
560
561 Builder.addExtension(
562 PassManagerBuilder::EP_EarlyAsPossible,
563 [AMDGPUAA, LibCallSimplify, this](const PassManagerBuilder &,
564 legacy::PassManagerBase &PM) {
565 if (AMDGPUAA) {
566 PM.add(createAMDGPUAAWrapperPass());
567 PM.add(createAMDGPUExternalAAWrapperPass());
568 }
569 PM.add(llvm::createAMDGPUPropagateAttributesEarlyPass(this));
570 PM.add(llvm::createAMDGPUUseNativeCallsPass());
571 if (LibCallSimplify)
572 PM.add(llvm::createAMDGPUSimplifyLibCallsPass(this));
573 });
574
575 Builder.addExtension(
576 PassManagerBuilder::EP_CGSCCOptimizerLate,
577 [EnableOpt](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
578 // Add infer address spaces pass to the opt pipeline after inlining
579 // but before SROA to increase SROA opportunities.
580 PM.add(createInferAddressSpacesPass());
581
582 // This should run after inlining to have any chance of doing anything,
583 // and before other cleanup optimizations.
584 PM.add(createAMDGPULowerKernelAttributesPass());
585
586 // Promote alloca to vector before SROA and loop unroll. If we manage
587 // to eliminate allocas before unroll we may choose to unroll less.
588 if (EnableOpt)
589 PM.add(createAMDGPUPromoteAllocaToVector());
590 });
591 }
592
registerDefaultAliasAnalyses(AAManager & AAM)593 void AMDGPUTargetMachine::registerDefaultAliasAnalyses(AAManager &AAM) {
594 AAM.registerFunctionAnalysis<AMDGPUAA>();
595 }
596
registerPassBuilderCallbacks(PassBuilder & PB)597 void AMDGPUTargetMachine::registerPassBuilderCallbacks(PassBuilder &PB) {
598 PB.registerPipelineParsingCallback(
599 [this](StringRef PassName, ModulePassManager &PM,
600 ArrayRef<PassBuilder::PipelineElement>) {
601 if (PassName == "amdgpu-propagate-attributes-late") {
602 PM.addPass(AMDGPUPropagateAttributesLatePass(*this));
603 return true;
604 }
605 if (PassName == "amdgpu-unify-metadata") {
606 PM.addPass(AMDGPUUnifyMetadataPass());
607 return true;
608 }
609 if (PassName == "amdgpu-printf-runtime-binding") {
610 PM.addPass(AMDGPUPrintfRuntimeBindingPass());
611 return true;
612 }
613 if (PassName == "amdgpu-always-inline") {
614 PM.addPass(AMDGPUAlwaysInlinePass());
615 return true;
616 }
617 if (PassName == "amdgpu-replace-lds-use-with-pointer") {
618 PM.addPass(AMDGPUReplaceLDSUseWithPointerPass());
619 return true;
620 }
621 if (PassName == "amdgpu-lower-module-lds") {
622 PM.addPass(AMDGPULowerModuleLDSPass());
623 return true;
624 }
625 return false;
626 });
627 PB.registerPipelineParsingCallback(
628 [this](StringRef PassName, FunctionPassManager &PM,
629 ArrayRef<PassBuilder::PipelineElement>) {
630 if (PassName == "amdgpu-simplifylib") {
631 PM.addPass(AMDGPUSimplifyLibCallsPass(*this));
632 return true;
633 }
634 if (PassName == "amdgpu-usenative") {
635 PM.addPass(AMDGPUUseNativeCallsPass());
636 return true;
637 }
638 if (PassName == "amdgpu-promote-alloca") {
639 PM.addPass(AMDGPUPromoteAllocaPass(*this));
640 return true;
641 }
642 if (PassName == "amdgpu-promote-alloca-to-vector") {
643 PM.addPass(AMDGPUPromoteAllocaToVectorPass(*this));
644 return true;
645 }
646 if (PassName == "amdgpu-lower-kernel-attributes") {
647 PM.addPass(AMDGPULowerKernelAttributesPass());
648 return true;
649 }
650 if (PassName == "amdgpu-propagate-attributes-early") {
651 PM.addPass(AMDGPUPropagateAttributesEarlyPass(*this));
652 return true;
653 }
654 return false;
655 });
656
657 PB.registerAnalysisRegistrationCallback([](FunctionAnalysisManager &FAM) {
658 FAM.registerPass([&] { return AMDGPUAA(); });
659 });
660
661 PB.registerParseAACallback([](StringRef AAName, AAManager &AAM) {
662 if (AAName == "amdgpu-aa") {
663 AAM.registerFunctionAnalysis<AMDGPUAA>();
664 return true;
665 }
666 return false;
667 });
668
669 PB.registerPipelineStartEPCallback(
670 [this](ModulePassManager &PM, OptimizationLevel Level) {
671 FunctionPassManager FPM;
672 FPM.addPass(AMDGPUPropagateAttributesEarlyPass(*this));
673 FPM.addPass(AMDGPUUseNativeCallsPass());
674 if (EnableLibCallSimplify && Level != OptimizationLevel::O0)
675 FPM.addPass(AMDGPUSimplifyLibCallsPass(*this));
676 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
677 });
678
679 PB.registerPipelineEarlySimplificationEPCallback(
680 [this](ModulePassManager &PM, OptimizationLevel Level) {
681 if (Level == OptimizationLevel::O0)
682 return;
683
684 PM.addPass(AMDGPUUnifyMetadataPass());
685 PM.addPass(AMDGPUPrintfRuntimeBindingPass());
686
687 if (InternalizeSymbols) {
688 PM.addPass(InternalizePass(mustPreserveGV));
689 }
690 PM.addPass(AMDGPUPropagateAttributesLatePass(*this));
691 if (InternalizeSymbols) {
692 PM.addPass(GlobalDCEPass());
693 }
694 if (EarlyInlineAll && !EnableFunctionCalls)
695 PM.addPass(AMDGPUAlwaysInlinePass());
696 });
697
698 PB.registerCGSCCOptimizerLateEPCallback(
699 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
700 if (Level == OptimizationLevel::O0)
701 return;
702
703 FunctionPassManager FPM;
704
705 // Add infer address spaces pass to the opt pipeline after inlining
706 // but before SROA to increase SROA opportunities.
707 FPM.addPass(InferAddressSpacesPass());
708
709 // This should run after inlining to have any chance of doing
710 // anything, and before other cleanup optimizations.
711 FPM.addPass(AMDGPULowerKernelAttributesPass());
712
713 if (Level != OptimizationLevel::O0) {
714 // Promote alloca to vector before SROA and loop unroll. If we
715 // manage to eliminate allocas before unroll we may choose to unroll
716 // less.
717 FPM.addPass(AMDGPUPromoteAllocaToVectorPass(*this));
718 }
719
720 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
721 });
722 }
723
getNullPointerValue(unsigned AddrSpace)724 int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
725 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
726 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
727 AddrSpace == AMDGPUAS::REGION_ADDRESS)
728 ? -1
729 : 0;
730 }
731
isNoopAddrSpaceCast(unsigned SrcAS,unsigned DestAS) const732 bool AMDGPUTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS,
733 unsigned DestAS) const {
734 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
735 AMDGPU::isFlatGlobalAddrSpace(DestAS);
736 }
737
getAssumedAddrSpace(const Value * V) const738 unsigned AMDGPUTargetMachine::getAssumedAddrSpace(const Value *V) const {
739 const auto *LD = dyn_cast<LoadInst>(V);
740 if (!LD)
741 return AMDGPUAS::UNKNOWN_ADDRESS_SPACE;
742
743 // It must be a generic pointer loaded.
744 assert(V->getType()->isPointerTy() &&
745 V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
746
747 const auto *Ptr = LD->getPointerOperand();
748 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
749 return AMDGPUAS::UNKNOWN_ADDRESS_SPACE;
750 // For a generic pointer loaded from the constant memory, it could be assumed
751 // as a global pointer since the constant memory is only populated on the
752 // host side. As implied by the offload programming model, only global
753 // pointers could be referenced on the host side.
754 return AMDGPUAS::GLOBAL_ADDRESS;
755 }
756
757 //===----------------------------------------------------------------------===//
758 // GCN Target Machine (SI+)
759 //===----------------------------------------------------------------------===//
760
GCNTargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,TargetOptions Options,Optional<Reloc::Model> RM,Optional<CodeModel::Model> CM,CodeGenOpt::Level OL,bool JIT)761 GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
762 StringRef CPU, StringRef FS,
763 TargetOptions Options,
764 Optional<Reloc::Model> RM,
765 Optional<CodeModel::Model> CM,
766 CodeGenOpt::Level OL, bool JIT)
767 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
768
769 const TargetSubtargetInfo *
getSubtargetImpl(const Function & F) const770 GCNTargetMachine::getSubtargetImpl(const Function &F) const {
771 StringRef GPU = getGPUName(F);
772 StringRef FS = getFeatureString(F);
773
774 SmallString<128> SubtargetKey(GPU);
775 SubtargetKey.append(FS);
776
777 auto &I = SubtargetMap[SubtargetKey];
778 if (!I) {
779 // This needs to be done before we create a new subtarget since any
780 // creation will depend on the TM and the code generation flags on the
781 // function that reside in TargetOptions.
782 resetTargetOptions(F);
783 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
784 }
785
786 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
787
788 return I.get();
789 }
790
791 TargetTransformInfo
getTargetTransformInfo(const Function & F)792 GCNTargetMachine::getTargetTransformInfo(const Function &F) {
793 return TargetTransformInfo(GCNTTIImpl(this, F));
794 }
795
796 //===----------------------------------------------------------------------===//
797 // AMDGPU Pass Setup
798 //===----------------------------------------------------------------------===//
799
getCSEConfig() const800 std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
801 return getStandardCSEConfigForOpt(TM->getOptLevel());
802 }
803
804 namespace {
805
806 class GCNPassConfig final : public AMDGPUPassConfig {
807 public:
GCNPassConfig(LLVMTargetMachine & TM,PassManagerBase & PM)808 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
809 : AMDGPUPassConfig(TM, PM) {
810 // It is necessary to know the register usage of the entire call graph. We
811 // allow calls without EnableAMDGPUFunctionCalls if they are marked
812 // noinline, so this is always required.
813 setRequiresCodeGenSCCOrder(true);
814 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
815 }
816
getGCNTargetMachine() const817 GCNTargetMachine &getGCNTargetMachine() const {
818 return getTM<GCNTargetMachine>();
819 }
820
821 ScheduleDAGInstrs *
822 createMachineScheduler(MachineSchedContext *C) const override;
823
824 ScheduleDAGInstrs *
createPostMachineScheduler(MachineSchedContext * C) const825 createPostMachineScheduler(MachineSchedContext *C) const override {
826 ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
827 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
828 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
829 return DAG;
830 }
831
832 bool addPreISel() override;
833 void addMachineSSAOptimization() override;
834 bool addILPOpts() override;
835 bool addInstSelector() override;
836 bool addIRTranslator() override;
837 void addPreLegalizeMachineIR() override;
838 bool addLegalizeMachineIR() override;
839 void addPreRegBankSelect() override;
840 bool addRegBankSelect() override;
841 void addPreGlobalInstructionSelect() override;
842 bool addGlobalInstructionSelect() override;
843 void addFastRegAlloc() override;
844 void addOptimizedRegAlloc() override;
845
846 FunctionPass *createSGPRAllocPass(bool Optimized);
847 FunctionPass *createVGPRAllocPass(bool Optimized);
848 FunctionPass *createRegAllocPass(bool Optimized) override;
849
850 bool addRegAssignAndRewriteFast() override;
851 bool addRegAssignAndRewriteOptimized() override;
852
853 void addPreRegAlloc() override;
854 bool addPreRewrite() override;
855 void addPostRegAlloc() override;
856 void addPreSched2() override;
857 void addPreEmitPass() override;
858 };
859
860 } // end anonymous namespace
861
AMDGPUPassConfig(LLVMTargetMachine & TM,PassManagerBase & PM)862 AMDGPUPassConfig::AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
863 : TargetPassConfig(TM, PM) {
864 // Exceptions and StackMaps are not supported, so these passes will never do
865 // anything.
866 disablePass(&StackMapLivenessID);
867 disablePass(&FuncletLayoutID);
868 // Garbage collection is not supported.
869 disablePass(&GCLoweringID);
870 disablePass(&ShadowStackGCLoweringID);
871 }
872
addEarlyCSEOrGVNPass()873 void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
874 if (getOptLevel() == CodeGenOpt::Aggressive)
875 addPass(createGVNPass());
876 else
877 addPass(createEarlyCSEPass());
878 }
879
addStraightLineScalarOptimizationPasses()880 void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
881 addPass(createLICMPass());
882 addPass(createSeparateConstOffsetFromGEPPass());
883 addPass(createSpeculativeExecutionPass());
884 // ReassociateGEPs exposes more opportunities for SLSR. See
885 // the example in reassociate-geps-and-slsr.ll.
886 addPass(createStraightLineStrengthReducePass());
887 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
888 // EarlyCSE can reuse.
889 addEarlyCSEOrGVNPass();
890 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
891 addPass(createNaryReassociatePass());
892 // NaryReassociate on GEPs creates redundant common expressions, so run
893 // EarlyCSE after it.
894 addPass(createEarlyCSEPass());
895 }
896
addIRPasses()897 void AMDGPUPassConfig::addIRPasses() {
898 const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
899
900 // There is no reason to run these.
901 disablePass(&StackMapLivenessID);
902 disablePass(&FuncletLayoutID);
903 disablePass(&PatchableFunctionID);
904
905 addPass(createAMDGPUPrintfRuntimeBinding());
906 addPass(createAMDGPUCtorDtorLoweringPass());
907
908 // This must occur before inlining, as the inliner will not look through
909 // bitcast calls.
910 addPass(createAMDGPUFixFunctionBitcastsPass());
911
912 // A call to propagate attributes pass in the backend in case opt was not run.
913 addPass(createAMDGPUPropagateAttributesEarlyPass(&TM));
914
915 addPass(createAMDGPULowerIntrinsicsPass());
916
917 // Function calls are not supported, so make sure we inline everything.
918 addPass(createAMDGPUAlwaysInlinePass());
919 addPass(createAlwaysInlinerLegacyPass());
920 // We need to add the barrier noop pass, otherwise adding the function
921 // inlining pass will cause all of the PassConfigs passes to be run
922 // one function at a time, which means if we have a nodule with two
923 // functions, then we will generate code for the first function
924 // without ever running any passes on the second.
925 addPass(createBarrierNoopPass());
926
927 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
928 if (TM.getTargetTriple().getArch() == Triple::r600)
929 addPass(createR600OpenCLImageTypeLoweringPass());
930
931 // Replace OpenCL enqueued block function pointers with global variables.
932 addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
933
934 // Can increase LDS used by kernel so runs before PromoteAlloca
935 if (EnableLowerModuleLDS) {
936 // The pass "amdgpu-replace-lds-use-with-pointer" need to be run before the
937 // pass "amdgpu-lower-module-lds", and also it required to be run only if
938 // "amdgpu-lower-module-lds" pass is enabled.
939 if (EnableLDSReplaceWithPointer)
940 addPass(createAMDGPUReplaceLDSUseWithPointerPass());
941
942 addPass(createAMDGPULowerModuleLDSPass());
943 }
944
945 if (TM.getOptLevel() > CodeGenOpt::None)
946 addPass(createInferAddressSpacesPass());
947
948 addPass(createAtomicExpandPass());
949
950 if (TM.getOptLevel() > CodeGenOpt::None) {
951 addPass(createAMDGPUPromoteAlloca());
952
953 if (EnableSROA)
954 addPass(createSROAPass());
955 if (isPassEnabled(EnableScalarIRPasses))
956 addStraightLineScalarOptimizationPasses();
957
958 if (EnableAMDGPUAliasAnalysis) {
959 addPass(createAMDGPUAAWrapperPass());
960 addPass(createExternalAAWrapperPass([](Pass &P, Function &,
961 AAResults &AAR) {
962 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
963 AAR.addAAResult(WrapperPass->getResult());
964 }));
965 }
966
967 if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
968 // TODO: May want to move later or split into an early and late one.
969 addPass(createAMDGPUCodeGenPreparePass());
970 }
971 }
972
973 TargetPassConfig::addIRPasses();
974
975 // EarlyCSE is not always strong enough to clean up what LSR produces. For
976 // example, GVN can combine
977 //
978 // %0 = add %a, %b
979 // %1 = add %b, %a
980 //
981 // and
982 //
983 // %0 = shl nsw %a, 2
984 // %1 = shl %a, 2
985 //
986 // but EarlyCSE can do neither of them.
987 if (isPassEnabled(EnableScalarIRPasses))
988 addEarlyCSEOrGVNPass();
989 }
990
addCodeGenPrepare()991 void AMDGPUPassConfig::addCodeGenPrepare() {
992 if (TM->getTargetTriple().getArch() == Triple::amdgcn) {
993 addPass(createAMDGPUAttributorPass());
994
995 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
996 // analysis, and should be removed.
997 addPass(createAMDGPUAnnotateKernelFeaturesPass());
998 }
999
1000 if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
1001 EnableLowerKernelArguments)
1002 addPass(createAMDGPULowerKernelArgumentsPass());
1003
1004 TargetPassConfig::addCodeGenPrepare();
1005
1006 if (isPassEnabled(EnableLoadStoreVectorizer))
1007 addPass(createLoadStoreVectorizerPass());
1008
1009 // LowerSwitch pass may introduce unreachable blocks that can
1010 // cause unexpected behavior for subsequent passes. Placing it
1011 // here seems better that these blocks would get cleaned up by
1012 // UnreachableBlockElim inserted next in the pass flow.
1013 addPass(createLowerSwitchPass());
1014 }
1015
addPreISel()1016 bool AMDGPUPassConfig::addPreISel() {
1017 if (TM->getOptLevel() > CodeGenOpt::None)
1018 addPass(createFlattenCFGPass());
1019 return false;
1020 }
1021
addInstSelector()1022 bool AMDGPUPassConfig::addInstSelector() {
1023 addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
1024 return false;
1025 }
1026
addGCPasses()1027 bool AMDGPUPassConfig::addGCPasses() {
1028 // Do nothing. GC is not supported.
1029 return false;
1030 }
1031
1032 llvm::ScheduleDAGInstrs *
createMachineScheduler(MachineSchedContext * C) const1033 AMDGPUPassConfig::createMachineScheduler(MachineSchedContext *C) const {
1034 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
1035 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1036 return DAG;
1037 }
1038
1039 //===----------------------------------------------------------------------===//
1040 // GCN Pass Setup
1041 //===----------------------------------------------------------------------===//
1042
createMachineScheduler(MachineSchedContext * C) const1043 ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1044 MachineSchedContext *C) const {
1045 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1046 if (ST.enableSIScheduler())
1047 return createSIMachineScheduler(C);
1048 return createGCNMaxOccupancyMachineScheduler(C);
1049 }
1050
addPreISel()1051 bool GCNPassConfig::addPreISel() {
1052 AMDGPUPassConfig::addPreISel();
1053
1054 if (TM->getOptLevel() > CodeGenOpt::None)
1055 addPass(createAMDGPULateCodeGenPreparePass());
1056
1057 if (isPassEnabled(EnableAtomicOptimizations, CodeGenOpt::Less)) {
1058 addPass(createAMDGPUAtomicOptimizerPass());
1059 }
1060
1061 if (TM->getOptLevel() > CodeGenOpt::None)
1062 addPass(createSinkingPass());
1063
1064 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1065 // regions formed by them.
1066 addPass(&AMDGPUUnifyDivergentExitNodesID);
1067 if (!LateCFGStructurize) {
1068 if (EnableStructurizerWorkarounds) {
1069 addPass(createFixIrreduciblePass());
1070 addPass(createUnifyLoopExitsPass());
1071 }
1072 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1073 }
1074 addPass(createAMDGPUAnnotateUniformValues());
1075 if (!LateCFGStructurize) {
1076 addPass(createSIAnnotateControlFlowPass());
1077 }
1078 addPass(createLCSSAPass());
1079
1080 if (TM->getOptLevel() > CodeGenOpt::Less)
1081 addPass(&AMDGPUPerfHintAnalysisID);
1082
1083 return false;
1084 }
1085
addMachineSSAOptimization()1086 void GCNPassConfig::addMachineSSAOptimization() {
1087 TargetPassConfig::addMachineSSAOptimization();
1088
1089 // We want to fold operands after PeepholeOptimizer has run (or as part of
1090 // it), because it will eliminate extra copies making it easier to fold the
1091 // real source operand. We want to eliminate dead instructions after, so that
1092 // we see fewer uses of the copies. We then need to clean up the dead
1093 // instructions leftover after the operands are folded as well.
1094 //
1095 // XXX - Can we get away without running DeadMachineInstructionElim again?
1096 addPass(&SIFoldOperandsID);
1097 if (EnableDPPCombine)
1098 addPass(&GCNDPPCombineID);
1099 addPass(&SILoadStoreOptimizerID);
1100 if (isPassEnabled(EnableSDWAPeephole)) {
1101 addPass(&SIPeepholeSDWAID);
1102 addPass(&EarlyMachineLICMID);
1103 addPass(&MachineCSEID);
1104 addPass(&SIFoldOperandsID);
1105 }
1106 addPass(&DeadMachineInstructionElimID);
1107 addPass(createSIShrinkInstructionsPass());
1108 }
1109
addILPOpts()1110 bool GCNPassConfig::addILPOpts() {
1111 if (EnableEarlyIfConversion)
1112 addPass(&EarlyIfConverterID);
1113
1114 TargetPassConfig::addILPOpts();
1115 return false;
1116 }
1117
addInstSelector()1118 bool GCNPassConfig::addInstSelector() {
1119 AMDGPUPassConfig::addInstSelector();
1120 addPass(&SIFixSGPRCopiesID);
1121 addPass(createSILowerI1CopiesPass());
1122 return false;
1123 }
1124
addIRTranslator()1125 bool GCNPassConfig::addIRTranslator() {
1126 addPass(new IRTranslator(getOptLevel()));
1127 return false;
1128 }
1129
addPreLegalizeMachineIR()1130 void GCNPassConfig::addPreLegalizeMachineIR() {
1131 bool IsOptNone = getOptLevel() == CodeGenOpt::None;
1132 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1133 addPass(new Localizer());
1134 }
1135
addLegalizeMachineIR()1136 bool GCNPassConfig::addLegalizeMachineIR() {
1137 addPass(new Legalizer());
1138 return false;
1139 }
1140
addPreRegBankSelect()1141 void GCNPassConfig::addPreRegBankSelect() {
1142 bool IsOptNone = getOptLevel() == CodeGenOpt::None;
1143 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1144 }
1145
addRegBankSelect()1146 bool GCNPassConfig::addRegBankSelect() {
1147 addPass(new RegBankSelect());
1148 return false;
1149 }
1150
addPreGlobalInstructionSelect()1151 void GCNPassConfig::addPreGlobalInstructionSelect() {
1152 bool IsOptNone = getOptLevel() == CodeGenOpt::None;
1153 addPass(createAMDGPURegBankCombiner(IsOptNone));
1154 }
1155
addGlobalInstructionSelect()1156 bool GCNPassConfig::addGlobalInstructionSelect() {
1157 addPass(new InstructionSelect(getOptLevel()));
1158 return false;
1159 }
1160
addPreRegAlloc()1161 void GCNPassConfig::addPreRegAlloc() {
1162 if (LateCFGStructurize) {
1163 addPass(createAMDGPUMachineCFGStructurizerPass());
1164 }
1165 }
1166
addFastRegAlloc()1167 void GCNPassConfig::addFastRegAlloc() {
1168 // FIXME: We have to disable the verifier here because of PHIElimination +
1169 // TwoAddressInstructions disabling it.
1170
1171 // This must be run immediately after phi elimination and before
1172 // TwoAddressInstructions, otherwise the processing of the tied operand of
1173 // SI_ELSE will introduce a copy of the tied operand source after the else.
1174 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
1175
1176 insertPass(&TwoAddressInstructionPassID, &SIWholeQuadModeID);
1177 insertPass(&TwoAddressInstructionPassID, &SIPreAllocateWWMRegsID);
1178
1179 TargetPassConfig::addFastRegAlloc();
1180 }
1181
addOptimizedRegAlloc()1182 void GCNPassConfig::addOptimizedRegAlloc() {
1183 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1184 // instructions that cause scheduling barriers.
1185 insertPass(&MachineSchedulerID, &SIWholeQuadModeID);
1186 insertPass(&MachineSchedulerID, &SIPreAllocateWWMRegsID);
1187
1188 if (OptExecMaskPreRA)
1189 insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
1190
1191 if (isPassEnabled(EnablePreRAOptimizations))
1192 insertPass(&RenameIndependentSubregsID, &GCNPreRAOptimizationsID);
1193
1194 // This is not an essential optimization and it has a noticeable impact on
1195 // compilation time, so we only enable it from O2.
1196 if (TM->getOptLevel() > CodeGenOpt::Less)
1197 insertPass(&MachineSchedulerID, &SIFormMemoryClausesID);
1198
1199 // FIXME: when an instruction has a Killed operand, and the instruction is
1200 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1201 // the register in LiveVariables, this would trigger a failure in verifier,
1202 // we should fix it and enable the verifier.
1203 if (OptVGPRLiveRange)
1204 insertPass(&LiveVariablesID, &SIOptimizeVGPRLiveRangeID, false);
1205 // This must be run immediately after phi elimination and before
1206 // TwoAddressInstructions, otherwise the processing of the tied operand of
1207 // SI_ELSE will introduce a copy of the tied operand source after the else.
1208 insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
1209
1210 if (EnableDCEInRA)
1211 insertPass(&DetectDeadLanesID, &DeadMachineInstructionElimID);
1212
1213 TargetPassConfig::addOptimizedRegAlloc();
1214 }
1215
addPreRewrite()1216 bool GCNPassConfig::addPreRewrite() {
1217 if (EnableRegReassign)
1218 addPass(&GCNNSAReassignID);
1219 return true;
1220 }
1221
createSGPRAllocPass(bool Optimized)1222 FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1223 // Initialize the global default.
1224 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1225 initializeDefaultSGPRRegisterAllocatorOnce);
1226
1227 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1228 if (Ctor != useDefaultRegisterAllocator)
1229 return Ctor();
1230
1231 if (Optimized)
1232 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1233
1234 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1235 }
1236
createVGPRAllocPass(bool Optimized)1237 FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1238 // Initialize the global default.
1239 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1240 initializeDefaultVGPRRegisterAllocatorOnce);
1241
1242 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1243 if (Ctor != useDefaultRegisterAllocator)
1244 return Ctor();
1245
1246 if (Optimized)
1247 return createGreedyVGPRRegisterAllocator();
1248
1249 return createFastVGPRRegisterAllocator();
1250 }
1251
createRegAllocPass(bool Optimized)1252 FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1253 llvm_unreachable("should not be used");
1254 }
1255
1256 static const char RegAllocOptNotSupportedMessage[] =
1257 "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1258
addRegAssignAndRewriteFast()1259 bool GCNPassConfig::addRegAssignAndRewriteFast() {
1260 if (!usingDefaultRegAlloc())
1261 report_fatal_error(RegAllocOptNotSupportedMessage);
1262
1263 addPass(createSGPRAllocPass(false));
1264
1265 // Equivalent of PEI for SGPRs.
1266 addPass(&SILowerSGPRSpillsID);
1267
1268 addPass(createVGPRAllocPass(false));
1269 return true;
1270 }
1271
addRegAssignAndRewriteOptimized()1272 bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1273 if (!usingDefaultRegAlloc())
1274 report_fatal_error(RegAllocOptNotSupportedMessage);
1275
1276 addPass(createSGPRAllocPass(true));
1277
1278 // Commit allocated register changes. This is mostly necessary because too
1279 // many things rely on the use lists of the physical registers, such as the
1280 // verifier. This is only necessary with allocators which use LiveIntervals,
1281 // since FastRegAlloc does the replacements itself.
1282 addPass(createVirtRegRewriter(false));
1283
1284 // Equivalent of PEI for SGPRs.
1285 addPass(&SILowerSGPRSpillsID);
1286
1287 addPass(createVGPRAllocPass(true));
1288
1289 addPreRewrite();
1290 addPass(&VirtRegRewriterID);
1291
1292 return true;
1293 }
1294
addPostRegAlloc()1295 void GCNPassConfig::addPostRegAlloc() {
1296 addPass(&SIFixVGPRCopiesID);
1297 if (getOptLevel() > CodeGenOpt::None)
1298 addPass(&SIOptimizeExecMaskingID);
1299 TargetPassConfig::addPostRegAlloc();
1300 }
1301
addPreSched2()1302 void GCNPassConfig::addPreSched2() {
1303 addPass(&SIPostRABundlerID);
1304 }
1305
addPreEmitPass()1306 void GCNPassConfig::addPreEmitPass() {
1307 addPass(createSIMemoryLegalizerPass());
1308 addPass(createSIInsertWaitcntsPass());
1309
1310 if (TM->getOptLevel() > CodeGenOpt::None)
1311 addPass(createSIShrinkInstructionsPass());
1312
1313 addPass(createSIModeRegisterPass());
1314
1315 if (getOptLevel() > CodeGenOpt::None)
1316 addPass(&SIInsertHardClausesID);
1317
1318 addPass(&SILateBranchLoweringPassID);
1319 if (getOptLevel() > CodeGenOpt::None)
1320 addPass(&SIPreEmitPeepholeID);
1321 // The hazard recognizer that runs as part of the post-ra scheduler does not
1322 // guarantee to be able handle all hazards correctly. This is because if there
1323 // are multiple scheduling regions in a basic block, the regions are scheduled
1324 // bottom up, so when we begin to schedule a region we don't know what
1325 // instructions were emitted directly before it.
1326 //
1327 // Here we add a stand-alone hazard recognizer pass which can handle all
1328 // cases.
1329 addPass(&PostRAHazardRecognizerID);
1330 addPass(&BranchRelaxationPassID);
1331 }
1332
createPassConfig(PassManagerBase & PM)1333 TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
1334 return new GCNPassConfig(*this, PM);
1335 }
1336
createDefaultFuncInfoYAML() const1337 yaml::MachineFunctionInfo *GCNTargetMachine::createDefaultFuncInfoYAML() const {
1338 return new yaml::SIMachineFunctionInfo();
1339 }
1340
1341 yaml::MachineFunctionInfo *
convertFuncInfoToYAML(const MachineFunction & MF) const1342 GCNTargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
1343 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1344 return new yaml::SIMachineFunctionInfo(
1345 *MFI, *MF.getSubtarget().getRegisterInfo(), MF);
1346 }
1347
parseMachineFunctionInfo(const yaml::MachineFunctionInfo & MFI_,PerFunctionMIParsingState & PFS,SMDiagnostic & Error,SMRange & SourceRange) const1348 bool GCNTargetMachine::parseMachineFunctionInfo(
1349 const yaml::MachineFunctionInfo &MFI_, PerFunctionMIParsingState &PFS,
1350 SMDiagnostic &Error, SMRange &SourceRange) const {
1351 const yaml::SIMachineFunctionInfo &YamlMFI =
1352 reinterpret_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1353 MachineFunction &MF = PFS.MF;
1354 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1355
1356 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1357 return true;
1358
1359 if (MFI->Occupancy == 0) {
1360 // Fixup the subtarget dependent default value.
1361 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1362 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1363 }
1364
1365 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1366 Register TempReg;
1367 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1368 SourceRange = RegName.SourceRange;
1369 return true;
1370 }
1371 RegVal = TempReg;
1372
1373 return false;
1374 };
1375
1376 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1377 // Create a diagnostic for a the register string literal.
1378 const MemoryBuffer &Buffer =
1379 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1380 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1381 RegName.Value.size(), SourceMgr::DK_Error,
1382 "incorrect register class for field", RegName.Value,
1383 None, None);
1384 SourceRange = RegName.SourceRange;
1385 return true;
1386 };
1387
1388 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1389 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1390 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1391 return true;
1392
1393 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1394 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1395 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1396 }
1397
1398 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1399 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1400 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1401 }
1402
1403 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1404 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1405 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1406 }
1407
1408 auto parseAndCheckArgument = [&](const Optional<yaml::SIArgument> &A,
1409 const TargetRegisterClass &RC,
1410 ArgDescriptor &Arg, unsigned UserSGPRs,
1411 unsigned SystemSGPRs) {
1412 // Skip parsing if it's not present.
1413 if (!A)
1414 return false;
1415
1416 if (A->IsRegister) {
1417 Register Reg;
1418 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1419 SourceRange = A->RegisterName.SourceRange;
1420 return true;
1421 }
1422 if (!RC.contains(Reg))
1423 return diagnoseRegisterClass(A->RegisterName);
1424 Arg = ArgDescriptor::createRegister(Reg);
1425 } else
1426 Arg = ArgDescriptor::createStack(A->StackOffset);
1427 // Check and apply the optional mask.
1428 if (A->Mask)
1429 Arg = ArgDescriptor::createArg(Arg, A->Mask.getValue());
1430
1431 MFI->NumUserSGPRs += UserSGPRs;
1432 MFI->NumSystemSGPRs += SystemSGPRs;
1433 return false;
1434 };
1435
1436 if (YamlMFI.ArgInfo &&
1437 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1438 AMDGPU::SGPR_128RegClass,
1439 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1440 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1441 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1442 2, 0) ||
1443 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1444 MFI->ArgInfo.QueuePtr, 2, 0) ||
1445 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1446 AMDGPU::SReg_64RegClass,
1447 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1448 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1449 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1450 2, 0) ||
1451 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1452 AMDGPU::SReg_64RegClass,
1453 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1454 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1455 AMDGPU::SGPR_32RegClass,
1456 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1457 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1458 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1459 0, 1) ||
1460 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1461 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1462 0, 1) ||
1463 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1464 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1465 0, 1) ||
1466 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1467 AMDGPU::SGPR_32RegClass,
1468 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1469 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1470 AMDGPU::SGPR_32RegClass,
1471 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1472 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1473 AMDGPU::SReg_64RegClass,
1474 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1475 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1476 AMDGPU::SReg_64RegClass,
1477 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1478 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1479 AMDGPU::VGPR_32RegClass,
1480 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1481 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1482 AMDGPU::VGPR_32RegClass,
1483 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1484 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1485 AMDGPU::VGPR_32RegClass,
1486 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1487 return true;
1488
1489 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1490 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1491 MFI->Mode.FP32InputDenormals = YamlMFI.Mode.FP32InputDenormals;
1492 MFI->Mode.FP32OutputDenormals = YamlMFI.Mode.FP32OutputDenormals;
1493 MFI->Mode.FP64FP16InputDenormals = YamlMFI.Mode.FP64FP16InputDenormals;
1494 MFI->Mode.FP64FP16OutputDenormals = YamlMFI.Mode.FP64FP16OutputDenormals;
1495
1496 return false;
1497 }
1498