1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //
10 //===----------------------------------------------------------------------===//
11
12 #include "AArch64TargetMachine.h"
13 #include "AArch64.h"
14 #include "AArch64MachineFunctionInfo.h"
15 #include "AArch64MacroFusion.h"
16 #include "AArch64Subtarget.h"
17 #include "AArch64TargetObjectFile.h"
18 #include "AArch64TargetTransformInfo.h"
19 #include "MCTargetDesc/AArch64MCTargetDesc.h"
20 #include "TargetInfo/AArch64TargetInfo.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/Analysis/TargetTransformInfo.h"
24 #include "llvm/CodeGen/CSEConfigBase.h"
25 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
26 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
27 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
28 #include "llvm/CodeGen/GlobalISel/Localizer.h"
29 #include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
30 #include "llvm/CodeGen/MIRParser/MIParser.h"
31 #include "llvm/CodeGen/MachineScheduler.h"
32 #include "llvm/CodeGen/Passes.h"
33 #include "llvm/CodeGen/TargetPassConfig.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/InitializePasses.h"
37 #include "llvm/MC/MCAsmInfo.h"
38 #include "llvm/MC/MCTargetOptions.h"
39 #include "llvm/MC/TargetRegistry.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/CodeGen.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Target/TargetLoweringObjectFile.h"
44 #include "llvm/Target/TargetOptions.h"
45 #include "llvm/Transforms/CFGuard.h"
46 #include "llvm/Transforms/Scalar.h"
47 #include <memory>
48 #include <string>
49
50 using namespace llvm;
51
52 static cl::opt<bool> EnableCCMP("aarch64-enable-ccmp",
53 cl::desc("Enable the CCMP formation pass"),
54 cl::init(true), cl::Hidden);
55
56 static cl::opt<bool>
57 EnableCondBrTuning("aarch64-enable-cond-br-tune",
58 cl::desc("Enable the conditional branch tuning pass"),
59 cl::init(true), cl::Hidden);
60
61 static cl::opt<bool> EnableMCR("aarch64-enable-mcr",
62 cl::desc("Enable the machine combiner pass"),
63 cl::init(true), cl::Hidden);
64
65 static cl::opt<bool> EnableStPairSuppress("aarch64-enable-stp-suppress",
66 cl::desc("Suppress STP for AArch64"),
67 cl::init(true), cl::Hidden);
68
69 static cl::opt<bool> EnableAdvSIMDScalar(
70 "aarch64-enable-simd-scalar",
71 cl::desc("Enable use of AdvSIMD scalar integer instructions"),
72 cl::init(false), cl::Hidden);
73
74 static cl::opt<bool>
75 EnablePromoteConstant("aarch64-enable-promote-const",
76 cl::desc("Enable the promote constant pass"),
77 cl::init(true), cl::Hidden);
78
79 static cl::opt<bool> EnableCollectLOH(
80 "aarch64-enable-collect-loh",
81 cl::desc("Enable the pass that emits the linker optimization hints (LOH)"),
82 cl::init(true), cl::Hidden);
83
84 static cl::opt<bool>
85 EnableDeadRegisterElimination("aarch64-enable-dead-defs", cl::Hidden,
86 cl::desc("Enable the pass that removes dead"
87 " definitons and replaces stores to"
88 " them with stores to the zero"
89 " register"),
90 cl::init(true));
91
92 static cl::opt<bool> EnableRedundantCopyElimination(
93 "aarch64-enable-copyelim",
94 cl::desc("Enable the redundant copy elimination pass"), cl::init(true),
95 cl::Hidden);
96
97 static cl::opt<bool> EnableLoadStoreOpt("aarch64-enable-ldst-opt",
98 cl::desc("Enable the load/store pair"
99 " optimization pass"),
100 cl::init(true), cl::Hidden);
101
102 static cl::opt<bool> EnableAtomicTidy(
103 "aarch64-enable-atomic-cfg-tidy", cl::Hidden,
104 cl::desc("Run SimplifyCFG after expanding atomic operations"
105 " to make use of cmpxchg flow-based information"),
106 cl::init(true));
107
108 static cl::opt<bool>
109 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
110 cl::desc("Run early if-conversion"),
111 cl::init(true));
112
113 static cl::opt<bool>
114 EnableCondOpt("aarch64-enable-condopt",
115 cl::desc("Enable the condition optimizer pass"),
116 cl::init(true), cl::Hidden);
117
118 static cl::opt<bool>
119 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
120 cl::desc("Work around Cortex-A53 erratum 835769"),
121 cl::init(false));
122
123 static cl::opt<bool>
124 EnableGEPOpt("aarch64-enable-gep-opt", cl::Hidden,
125 cl::desc("Enable optimizations on complex GEPs"),
126 cl::init(false));
127
128 static cl::opt<bool>
129 BranchRelaxation("aarch64-enable-branch-relax", cl::Hidden, cl::init(true),
130 cl::desc("Relax out of range conditional branches"));
131
132 static cl::opt<bool> EnableCompressJumpTables(
133 "aarch64-enable-compress-jump-tables", cl::Hidden, cl::init(true),
134 cl::desc("Use smallest entry possible for jump tables"));
135
136 // FIXME: Unify control over GlobalMerge.
137 static cl::opt<cl::boolOrDefault>
138 EnableGlobalMerge("aarch64-enable-global-merge", cl::Hidden,
139 cl::desc("Enable the global merge pass"));
140
141 static cl::opt<bool>
142 EnableLoopDataPrefetch("aarch64-enable-loop-data-prefetch", cl::Hidden,
143 cl::desc("Enable the loop data prefetch pass"),
144 cl::init(true));
145
146 static cl::opt<int> EnableGlobalISelAtO(
147 "aarch64-enable-global-isel-at-O", cl::Hidden,
148 cl::desc("Enable GlobalISel at or below an opt level (-1 to disable)"),
149 cl::init(0));
150
151 static cl::opt<bool>
152 EnableSVEIntrinsicOpts("aarch64-enable-sve-intrinsic-opts", cl::Hidden,
153 cl::desc("Enable SVE intrinsic opts"),
154 cl::init(true));
155
156 static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix",
157 cl::init(true), cl::Hidden);
158
159 static cl::opt<bool>
160 EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden,
161 cl::desc("Enable the AArch64 branch target pass"),
162 cl::init(true));
163
164 static cl::opt<unsigned> SVEVectorBitsMaxOpt(
165 "aarch64-sve-vector-bits-max",
166 cl::desc("Assume SVE vector registers are at most this big, "
167 "with zero meaning no maximum size is assumed."),
168 cl::init(0), cl::Hidden);
169
170 static cl::opt<unsigned> SVEVectorBitsMinOpt(
171 "aarch64-sve-vector-bits-min",
172 cl::desc("Assume SVE vector registers are at least this big, "
173 "with zero meaning no minimum size is assumed."),
174 cl::init(0), cl::Hidden);
175
176 extern cl::opt<bool> EnableHomogeneousPrologEpilog;
177
LLVMInitializeAArch64Target()178 extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64Target() {
179 // Register the target.
180 RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget());
181 RegisterTargetMachine<AArch64beTargetMachine> Y(getTheAArch64beTarget());
182 RegisterTargetMachine<AArch64leTargetMachine> Z(getTheARM64Target());
183 RegisterTargetMachine<AArch64leTargetMachine> W(getTheARM64_32Target());
184 RegisterTargetMachine<AArch64leTargetMachine> V(getTheAArch64_32Target());
185 auto PR = PassRegistry::getPassRegistry();
186 initializeGlobalISel(*PR);
187 initializeAArch64A53Fix835769Pass(*PR);
188 initializeAArch64A57FPLoadBalancingPass(*PR);
189 initializeAArch64AdvSIMDScalarPass(*PR);
190 initializeAArch64BranchTargetsPass(*PR);
191 initializeAArch64CollectLOHPass(*PR);
192 initializeAArch64CompressJumpTablesPass(*PR);
193 initializeAArch64ConditionalComparesPass(*PR);
194 initializeAArch64ConditionOptimizerPass(*PR);
195 initializeAArch64DeadRegisterDefinitionsPass(*PR);
196 initializeAArch64ExpandPseudoPass(*PR);
197 initializeAArch64LoadStoreOptPass(*PR);
198 initializeAArch64MIPeepholeOptPass(*PR);
199 initializeAArch64SIMDInstrOptPass(*PR);
200 initializeAArch64O0PreLegalizerCombinerPass(*PR);
201 initializeAArch64PreLegalizerCombinerPass(*PR);
202 initializeAArch64PostLegalizerCombinerPass(*PR);
203 initializeAArch64PostLegalizerLoweringPass(*PR);
204 initializeAArch64PostSelectOptimizePass(*PR);
205 initializeAArch64PromoteConstantPass(*PR);
206 initializeAArch64RedundantCopyEliminationPass(*PR);
207 initializeAArch64StorePairSuppressPass(*PR);
208 initializeFalkorHWPFFixPass(*PR);
209 initializeFalkorMarkStridedAccessesLegacyPass(*PR);
210 initializeLDTLSCleanupPass(*PR);
211 initializeSVEIntrinsicOptsPass(*PR);
212 initializeAArch64SpeculationHardeningPass(*PR);
213 initializeAArch64SLSHardeningPass(*PR);
214 initializeAArch64StackTaggingPass(*PR);
215 initializeAArch64StackTaggingPreRAPass(*PR);
216 initializeAArch64LowerHomogeneousPrologEpilogPass(*PR);
217 }
218
219 //===----------------------------------------------------------------------===//
220 // AArch64 Lowering public interface.
221 //===----------------------------------------------------------------------===//
createTLOF(const Triple & TT)222 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
223 if (TT.isOSBinFormatMachO())
224 return std::make_unique<AArch64_MachoTargetObjectFile>();
225 if (TT.isOSBinFormatCOFF())
226 return std::make_unique<AArch64_COFFTargetObjectFile>();
227
228 return std::make_unique<AArch64_ELFTargetObjectFile>();
229 }
230
231 // Helper function to build a DataLayout string
computeDataLayout(const Triple & TT,const MCTargetOptions & Options,bool LittleEndian)232 static std::string computeDataLayout(const Triple &TT,
233 const MCTargetOptions &Options,
234 bool LittleEndian) {
235 if (TT.isOSBinFormatMachO()) {
236 if (TT.getArch() == Triple::aarch64_32)
237 return "e-m:o-p:32:32-i64:64-i128:128-n32:64-S128";
238 return "e-m:o-i64:64-i128:128-n32:64-S128";
239 }
240 if (TT.isOSBinFormatCOFF())
241 return "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128";
242 std::string Endian = LittleEndian ? "e" : "E";
243 std::string Ptr32 = TT.getEnvironment() == Triple::GNUILP32 ? "-p:32:32" : "";
244 return Endian + "-m:e" + Ptr32 +
245 "-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128";
246 }
247
computeDefaultCPU(const Triple & TT,StringRef CPU)248 static StringRef computeDefaultCPU(const Triple &TT, StringRef CPU) {
249 if (CPU.empty() && TT.isArm64e())
250 return "apple-a12";
251 return CPU;
252 }
253
getEffectiveRelocModel(const Triple & TT,Optional<Reloc::Model> RM)254 static Reloc::Model getEffectiveRelocModel(const Triple &TT,
255 Optional<Reloc::Model> RM) {
256 // AArch64 Darwin and Windows are always PIC.
257 if (TT.isOSDarwin() || TT.isOSWindows())
258 return Reloc::PIC_;
259 // On ELF platforms the default static relocation model has a smart enough
260 // linker to cope with referencing external symbols defined in a shared
261 // library. Hence DynamicNoPIC doesn't need to be promoted to PIC.
262 if (!RM.hasValue() || *RM == Reloc::DynamicNoPIC)
263 return Reloc::Static;
264 return *RM;
265 }
266
267 static CodeModel::Model
getEffectiveAArch64CodeModel(const Triple & TT,Optional<CodeModel::Model> CM,bool JIT)268 getEffectiveAArch64CodeModel(const Triple &TT, Optional<CodeModel::Model> CM,
269 bool JIT) {
270 if (CM) {
271 if (*CM != CodeModel::Small && *CM != CodeModel::Tiny &&
272 *CM != CodeModel::Large) {
273 report_fatal_error(
274 "Only small, tiny and large code models are allowed on AArch64");
275 } else if (*CM == CodeModel::Tiny && !TT.isOSBinFormatELF())
276 report_fatal_error("tiny code model is only supported on ELF");
277 return *CM;
278 }
279 // The default MCJIT memory managers make no guarantees about where they can
280 // find an executable page; JITed code needs to be able to refer to globals
281 // no matter how far away they are.
282 // We should set the CodeModel::Small for Windows ARM64 in JIT mode,
283 // since with large code model LLVM generating 4 MOV instructions, and
284 // Windows doesn't support relocating these long branch (4 MOVs).
285 if (JIT && !TT.isOSWindows())
286 return CodeModel::Large;
287 return CodeModel::Small;
288 }
289
290 /// Create an AArch64 architecture model.
291 ///
AArch64TargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,const TargetOptions & Options,Optional<Reloc::Model> RM,Optional<CodeModel::Model> CM,CodeGenOpt::Level OL,bool JIT,bool LittleEndian)292 AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
293 StringRef CPU, StringRef FS,
294 const TargetOptions &Options,
295 Optional<Reloc::Model> RM,
296 Optional<CodeModel::Model> CM,
297 CodeGenOpt::Level OL, bool JIT,
298 bool LittleEndian)
299 : LLVMTargetMachine(T,
300 computeDataLayout(TT, Options.MCOptions, LittleEndian),
301 TT, computeDefaultCPU(TT, CPU), FS, Options,
302 getEffectiveRelocModel(TT, RM),
303 getEffectiveAArch64CodeModel(TT, CM, JIT), OL),
304 TLOF(createTLOF(getTargetTriple())), isLittle(LittleEndian) {
305 initAsmInfo();
306
307 if (TT.isOSBinFormatMachO()) {
308 this->Options.TrapUnreachable = true;
309 this->Options.NoTrapAfterNoreturn = true;
310 }
311
312 if (getMCAsmInfo()->usesWindowsCFI()) {
313 // Unwinding can get confused if the last instruction in an
314 // exception-handling region (function, funclet, try block, etc.)
315 // is a call.
316 //
317 // FIXME: We could elide the trap if the next instruction would be in
318 // the same region anyway.
319 this->Options.TrapUnreachable = true;
320 }
321
322 if (this->Options.TLSSize == 0) // default
323 this->Options.TLSSize = 24;
324 if ((getCodeModel() == CodeModel::Small ||
325 getCodeModel() == CodeModel::Kernel) &&
326 this->Options.TLSSize > 32)
327 // for the small (and kernel) code model, the maximum TLS size is 4GiB
328 this->Options.TLSSize = 32;
329 else if (getCodeModel() == CodeModel::Tiny && this->Options.TLSSize > 24)
330 // for the tiny code model, the maximum TLS size is 1MiB (< 16MiB)
331 this->Options.TLSSize = 24;
332
333 // Enable GlobalISel at or below EnableGlobalISelAt0, unless this is
334 // MachO/CodeModel::Large, which GlobalISel does not support.
335 if (getOptLevel() <= EnableGlobalISelAtO &&
336 TT.getArch() != Triple::aarch64_32 &&
337 TT.getEnvironment() != Triple::GNUILP32 &&
338 !(getCodeModel() == CodeModel::Large && TT.isOSBinFormatMachO())) {
339 setGlobalISel(true);
340 setGlobalISelAbort(GlobalISelAbortMode::Disable);
341 }
342
343 // AArch64 supports the MachineOutliner.
344 setMachineOutliner(true);
345
346 // AArch64 supports default outlining behaviour.
347 setSupportsDefaultOutlining(true);
348
349 // AArch64 supports the debug entry values.
350 setSupportsDebugEntryValues(true);
351 }
352
353 AArch64TargetMachine::~AArch64TargetMachine() = default;
354
355 const AArch64Subtarget *
getSubtargetImpl(const Function & F) const356 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
357 Attribute CPUAttr = F.getFnAttribute("target-cpu");
358 Attribute FSAttr = F.getFnAttribute("target-features");
359
360 std::string CPU =
361 CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU;
362 std::string FS =
363 FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS;
364
365 SmallString<512> Key;
366
367 unsigned MinSVEVectorSize = 0;
368 unsigned MaxSVEVectorSize = 0;
369 Attribute VScaleRangeAttr = F.getFnAttribute(Attribute::VScaleRange);
370 if (VScaleRangeAttr.isValid()) {
371 std::tie(MinSVEVectorSize, MaxSVEVectorSize) =
372 VScaleRangeAttr.getVScaleRangeArgs();
373 MinSVEVectorSize *= 128;
374 MaxSVEVectorSize *= 128;
375 } else {
376 MinSVEVectorSize = SVEVectorBitsMinOpt;
377 MaxSVEVectorSize = SVEVectorBitsMaxOpt;
378 }
379
380 assert(MinSVEVectorSize % 128 == 0 &&
381 "SVE requires vector length in multiples of 128!");
382 assert(MaxSVEVectorSize % 128 == 0 &&
383 "SVE requires vector length in multiples of 128!");
384 assert((MaxSVEVectorSize >= MinSVEVectorSize || MaxSVEVectorSize == 0) &&
385 "Minimum SVE vector size should not be larger than its maximum!");
386
387 // Sanitize user input in case of no asserts
388 if (MaxSVEVectorSize == 0)
389 MinSVEVectorSize = (MinSVEVectorSize / 128) * 128;
390 else {
391 MinSVEVectorSize =
392 (std::min(MinSVEVectorSize, MaxSVEVectorSize) / 128) * 128;
393 MaxSVEVectorSize =
394 (std::max(MinSVEVectorSize, MaxSVEVectorSize) / 128) * 128;
395 }
396
397 Key += "SVEMin";
398 Key += std::to_string(MinSVEVectorSize);
399 Key += "SVEMax";
400 Key += std::to_string(MaxSVEVectorSize);
401 Key += CPU;
402 Key += FS;
403
404 auto &I = SubtargetMap[Key];
405 if (!I) {
406 // This needs to be done before we create a new subtarget since any
407 // creation will depend on the TM and the code generation flags on the
408 // function that reside in TargetOptions.
409 resetTargetOptions(F);
410 I = std::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
411 isLittle, MinSVEVectorSize,
412 MaxSVEVectorSize);
413 }
414 return I.get();
415 }
416
anchor()417 void AArch64leTargetMachine::anchor() { }
418
AArch64leTargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,const TargetOptions & Options,Optional<Reloc::Model> RM,Optional<CodeModel::Model> CM,CodeGenOpt::Level OL,bool JIT)419 AArch64leTargetMachine::AArch64leTargetMachine(
420 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
421 const TargetOptions &Options, Optional<Reloc::Model> RM,
422 Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT)
423 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {}
424
anchor()425 void AArch64beTargetMachine::anchor() { }
426
AArch64beTargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,const TargetOptions & Options,Optional<Reloc::Model> RM,Optional<CodeModel::Model> CM,CodeGenOpt::Level OL,bool JIT)427 AArch64beTargetMachine::AArch64beTargetMachine(
428 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
429 const TargetOptions &Options, Optional<Reloc::Model> RM,
430 Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT)
431 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
432
433 namespace {
434
435 /// AArch64 Code Generator Pass Configuration Options.
436 class AArch64PassConfig : public TargetPassConfig {
437 public:
AArch64PassConfig(AArch64TargetMachine & TM,PassManagerBase & PM)438 AArch64PassConfig(AArch64TargetMachine &TM, PassManagerBase &PM)
439 : TargetPassConfig(TM, PM) {
440 if (TM.getOptLevel() != CodeGenOpt::None)
441 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
442 }
443
getAArch64TargetMachine() const444 AArch64TargetMachine &getAArch64TargetMachine() const {
445 return getTM<AArch64TargetMachine>();
446 }
447
448 ScheduleDAGInstrs *
createMachineScheduler(MachineSchedContext * C) const449 createMachineScheduler(MachineSchedContext *C) const override {
450 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
451 ScheduleDAGMILive *DAG = createGenericSchedLive(C);
452 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
453 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
454 if (ST.hasFusion())
455 DAG->addMutation(createAArch64MacroFusionDAGMutation());
456 return DAG;
457 }
458
459 ScheduleDAGInstrs *
createPostMachineScheduler(MachineSchedContext * C) const460 createPostMachineScheduler(MachineSchedContext *C) const override {
461 const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
462 if (ST.hasFusion()) {
463 // Run the Macro Fusion after RA again since literals are expanded from
464 // pseudos then (v. addPreSched2()).
465 ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
466 DAG->addMutation(createAArch64MacroFusionDAGMutation());
467 return DAG;
468 }
469
470 return nullptr;
471 }
472
473 void addIRPasses() override;
474 bool addPreISel() override;
475 void addCodeGenPrepare() override;
476 bool addInstSelector() override;
477 bool addIRTranslator() override;
478 void addPreLegalizeMachineIR() override;
479 bool addLegalizeMachineIR() override;
480 void addPreRegBankSelect() override;
481 bool addRegBankSelect() override;
482 void addPreGlobalInstructionSelect() override;
483 bool addGlobalInstructionSelect() override;
484 void addMachineSSAOptimization() override;
485 bool addILPOpts() override;
486 void addPreRegAlloc() override;
487 void addPostRegAlloc() override;
488 void addPreSched2() override;
489 void addPreEmitPass() override;
490 void addPreEmitPass2() override;
491
492 std::unique_ptr<CSEConfigBase> getCSEConfig() const override;
493 };
494
495 } // end anonymous namespace
496
497 TargetTransformInfo
getTargetTransformInfo(const Function & F)498 AArch64TargetMachine::getTargetTransformInfo(const Function &F) {
499 return TargetTransformInfo(AArch64TTIImpl(this, F));
500 }
501
createPassConfig(PassManagerBase & PM)502 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
503 return new AArch64PassConfig(*this, PM);
504 }
505
getCSEConfig() const506 std::unique_ptr<CSEConfigBase> AArch64PassConfig::getCSEConfig() const {
507 return getStandardCSEConfigForOpt(TM->getOptLevel());
508 }
509
addIRPasses()510 void AArch64PassConfig::addIRPasses() {
511 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
512 // ourselves.
513 addPass(createAtomicExpandPass());
514
515 // Expand any SVE vector library calls that we can't code generate directly.
516 if (EnableSVEIntrinsicOpts && TM->getOptLevel() == CodeGenOpt::Aggressive)
517 addPass(createSVEIntrinsicOptsPass());
518
519 // Cmpxchg instructions are often used with a subsequent comparison to
520 // determine whether it succeeded. We can exploit existing control-flow in
521 // ldrex/strex loops to simplify this, but it needs tidying up.
522 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
523 addPass(createCFGSimplificationPass(SimplifyCFGOptions()
524 .forwardSwitchCondToPhi(true)
525 .convertSwitchToLookupTable(true)
526 .needCanonicalLoops(false)
527 .hoistCommonInsts(true)
528 .sinkCommonInsts(true)));
529
530 // Run LoopDataPrefetch
531 //
532 // Run this before LSR to remove the multiplies involved in computing the
533 // pointer values N iterations ahead.
534 if (TM->getOptLevel() != CodeGenOpt::None) {
535 if (EnableLoopDataPrefetch)
536 addPass(createLoopDataPrefetchPass());
537 if (EnableFalkorHWPFFix)
538 addPass(createFalkorMarkStridedAccessesPass());
539 }
540
541 TargetPassConfig::addIRPasses();
542
543 addPass(createAArch64StackTaggingPass(
544 /*IsOptNone=*/TM->getOptLevel() == CodeGenOpt::None));
545
546 // Match interleaved memory accesses to ldN/stN intrinsics.
547 if (TM->getOptLevel() != CodeGenOpt::None) {
548 addPass(createInterleavedLoadCombinePass());
549 addPass(createInterleavedAccessPass());
550 }
551
552 if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
553 // Call SeparateConstOffsetFromGEP pass to extract constants within indices
554 // and lower a GEP with multiple indices to either arithmetic operations or
555 // multiple GEPs with single index.
556 addPass(createSeparateConstOffsetFromGEPPass(true));
557 // Call EarlyCSE pass to find and remove subexpressions in the lowered
558 // result.
559 addPass(createEarlyCSEPass());
560 // Do loop invariant code motion in case part of the lowered result is
561 // invariant.
562 addPass(createLICMPass());
563 }
564
565 // Add Control Flow Guard checks.
566 if (TM->getTargetTriple().isOSWindows())
567 addPass(createCFGuardCheckPass());
568 }
569
570 // Pass Pipeline Configuration
addPreISel()571 bool AArch64PassConfig::addPreISel() {
572 // Run promote constant before global merge, so that the promoted constants
573 // get a chance to be merged
574 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
575 addPass(createAArch64PromoteConstantPass());
576 // FIXME: On AArch64, this depends on the type.
577 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
578 // and the offset has to be a multiple of the related size in bytes.
579 if ((TM->getOptLevel() != CodeGenOpt::None &&
580 EnableGlobalMerge == cl::BOU_UNSET) ||
581 EnableGlobalMerge == cl::BOU_TRUE) {
582 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
583 (EnableGlobalMerge == cl::BOU_UNSET);
584
585 // Merging of extern globals is enabled by default on non-Mach-O as we
586 // expect it to be generally either beneficial or harmless. On Mach-O it
587 // is disabled as we emit the .subsections_via_symbols directive which
588 // means that merging extern globals is not safe.
589 bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO();
590
591 // FIXME: extern global merging is only enabled when we optimise for size
592 // because there are some regressions with it also enabled for performance.
593 if (!OnlyOptimizeForSize)
594 MergeExternalByDefault = false;
595
596 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize,
597 MergeExternalByDefault));
598 }
599
600 return false;
601 }
602
addCodeGenPrepare()603 void AArch64PassConfig::addCodeGenPrepare() {
604 if (getOptLevel() != CodeGenOpt::None)
605 addPass(createTypePromotionPass());
606 TargetPassConfig::addCodeGenPrepare();
607 }
608
addInstSelector()609 bool AArch64PassConfig::addInstSelector() {
610 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
611
612 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
613 // references to _TLS_MODULE_BASE_ as possible.
614 if (TM->getTargetTriple().isOSBinFormatELF() &&
615 getOptLevel() != CodeGenOpt::None)
616 addPass(createAArch64CleanupLocalDynamicTLSPass());
617
618 return false;
619 }
620
addIRTranslator()621 bool AArch64PassConfig::addIRTranslator() {
622 addPass(new IRTranslator(getOptLevel()));
623 return false;
624 }
625
addPreLegalizeMachineIR()626 void AArch64PassConfig::addPreLegalizeMachineIR() {
627 if (getOptLevel() == CodeGenOpt::None)
628 addPass(createAArch64O0PreLegalizerCombiner());
629 else
630 addPass(createAArch64PreLegalizerCombiner());
631 }
632
addLegalizeMachineIR()633 bool AArch64PassConfig::addLegalizeMachineIR() {
634 addPass(new Legalizer());
635 return false;
636 }
637
addPreRegBankSelect()638 void AArch64PassConfig::addPreRegBankSelect() {
639 bool IsOptNone = getOptLevel() == CodeGenOpt::None;
640 if (!IsOptNone)
641 addPass(createAArch64PostLegalizerCombiner(IsOptNone));
642 addPass(createAArch64PostLegalizerLowering());
643 }
644
addRegBankSelect()645 bool AArch64PassConfig::addRegBankSelect() {
646 addPass(new RegBankSelect());
647 return false;
648 }
649
addPreGlobalInstructionSelect()650 void AArch64PassConfig::addPreGlobalInstructionSelect() {
651 addPass(new Localizer());
652 }
653
addGlobalInstructionSelect()654 bool AArch64PassConfig::addGlobalInstructionSelect() {
655 addPass(new InstructionSelect(getOptLevel()));
656 if (getOptLevel() != CodeGenOpt::None)
657 addPass(createAArch64PostSelectOptimize());
658 return false;
659 }
660
addMachineSSAOptimization()661 void AArch64PassConfig::addMachineSSAOptimization() {
662 // Run default MachineSSAOptimization first.
663 TargetPassConfig::addMachineSSAOptimization();
664
665 if (TM->getOptLevel() != CodeGenOpt::None)
666 addPass(createAArch64MIPeepholeOptPass());
667 }
668
addILPOpts()669 bool AArch64PassConfig::addILPOpts() {
670 if (EnableCondOpt)
671 addPass(createAArch64ConditionOptimizerPass());
672 if (EnableCCMP)
673 addPass(createAArch64ConditionalCompares());
674 if (EnableMCR)
675 addPass(&MachineCombinerID);
676 if (EnableCondBrTuning)
677 addPass(createAArch64CondBrTuning());
678 if (EnableEarlyIfConversion)
679 addPass(&EarlyIfConverterID);
680 if (EnableStPairSuppress)
681 addPass(createAArch64StorePairSuppressPass());
682 addPass(createAArch64SIMDInstrOptPass());
683 if (TM->getOptLevel() != CodeGenOpt::None)
684 addPass(createAArch64StackTaggingPreRAPass());
685 return true;
686 }
687
addPreRegAlloc()688 void AArch64PassConfig::addPreRegAlloc() {
689 // Change dead register definitions to refer to the zero register.
690 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
691 addPass(createAArch64DeadRegisterDefinitions());
692
693 // Use AdvSIMD scalar instructions whenever profitable.
694 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
695 addPass(createAArch64AdvSIMDScalar());
696 // The AdvSIMD pass may produce copies that can be rewritten to
697 // be register coalescer friendly.
698 addPass(&PeepholeOptimizerID);
699 }
700 }
701
addPostRegAlloc()702 void AArch64PassConfig::addPostRegAlloc() {
703 // Remove redundant copy instructions.
704 if (TM->getOptLevel() != CodeGenOpt::None && EnableRedundantCopyElimination)
705 addPass(createAArch64RedundantCopyEliminationPass());
706
707 if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
708 // Improve performance for some FP/SIMD code for A57.
709 addPass(createAArch64A57FPLoadBalancing());
710 }
711
addPreSched2()712 void AArch64PassConfig::addPreSched2() {
713 // Lower homogeneous frame instructions
714 if (EnableHomogeneousPrologEpilog)
715 addPass(createAArch64LowerHomogeneousPrologEpilogPass());
716 // Expand some pseudo instructions to allow proper scheduling.
717 addPass(createAArch64ExpandPseudoPass());
718 // Use load/store pair instructions when possible.
719 if (TM->getOptLevel() != CodeGenOpt::None) {
720 if (EnableLoadStoreOpt)
721 addPass(createAArch64LoadStoreOptimizationPass());
722 }
723
724 // The AArch64SpeculationHardeningPass destroys dominator tree and natural
725 // loop info, which is needed for the FalkorHWPFFixPass and also later on.
726 // Therefore, run the AArch64SpeculationHardeningPass before the
727 // FalkorHWPFFixPass to avoid recomputing dominator tree and natural loop
728 // info.
729 addPass(createAArch64SpeculationHardeningPass());
730
731 addPass(createAArch64IndirectThunks());
732 addPass(createAArch64SLSHardeningPass());
733
734 if (TM->getOptLevel() != CodeGenOpt::None) {
735 if (EnableFalkorHWPFFix)
736 addPass(createFalkorHWPFFixPass());
737 }
738 }
739
addPreEmitPass()740 void AArch64PassConfig::addPreEmitPass() {
741 // Machine Block Placement might have created new opportunities when run
742 // at O3, where the Tail Duplication Threshold is set to 4 instructions.
743 // Run the load/store optimizer once more.
744 if (TM->getOptLevel() >= CodeGenOpt::Aggressive && EnableLoadStoreOpt)
745 addPass(createAArch64LoadStoreOptimizationPass());
746
747 if (EnableA53Fix835769)
748 addPass(createAArch64A53Fix835769());
749
750 if (EnableBranchTargets)
751 addPass(createAArch64BranchTargetsPass());
752
753 // Relax conditional branch instructions if they're otherwise out of
754 // range of their destination.
755 if (BranchRelaxation)
756 addPass(&BranchRelaxationPassID);
757
758 if (TM->getTargetTriple().isOSWindows()) {
759 // Identify valid longjmp targets for Windows Control Flow Guard.
760 addPass(createCFGuardLongjmpPass());
761 // Identify valid eh continuation targets for Windows EHCont Guard.
762 addPass(createEHContGuardCatchretPass());
763 }
764
765 if (TM->getOptLevel() != CodeGenOpt::None && EnableCompressJumpTables)
766 addPass(createAArch64CompressJumpTablesPass());
767
768 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
769 TM->getTargetTriple().isOSBinFormatMachO())
770 addPass(createAArch64CollectLOHPass());
771 }
772
addPreEmitPass2()773 void AArch64PassConfig::addPreEmitPass2() {
774 // SVE bundles move prefixes with destructive operations. BLR_RVMARKER pseudo
775 // instructions are lowered to bundles as well.
776 addPass(createUnpackMachineBundles(nullptr));
777 }
778
779 yaml::MachineFunctionInfo *
createDefaultFuncInfoYAML() const780 AArch64TargetMachine::createDefaultFuncInfoYAML() const {
781 return new yaml::AArch64FunctionInfo();
782 }
783
784 yaml::MachineFunctionInfo *
convertFuncInfoToYAML(const MachineFunction & MF) const785 AArch64TargetMachine::convertFuncInfoToYAML(const MachineFunction &MF) const {
786 const auto *MFI = MF.getInfo<AArch64FunctionInfo>();
787 return new yaml::AArch64FunctionInfo(*MFI);
788 }
789
parseMachineFunctionInfo(const yaml::MachineFunctionInfo & MFI,PerFunctionMIParsingState & PFS,SMDiagnostic & Error,SMRange & SourceRange) const790 bool AArch64TargetMachine::parseMachineFunctionInfo(
791 const yaml::MachineFunctionInfo &MFI, PerFunctionMIParsingState &PFS,
792 SMDiagnostic &Error, SMRange &SourceRange) const {
793 const auto &YamlMFI =
794 reinterpret_cast<const yaml::AArch64FunctionInfo &>(MFI);
795 MachineFunction &MF = PFS.MF;
796 MF.getInfo<AArch64FunctionInfo>()->initializeBaseYamlFields(YamlMFI);
797 return false;
798 }
799