1 //===-- AArch64Subtarget.cpp - AArch64 Subtarget Information ----*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the AArch64 specific subclass of TargetSubtarget.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "AArch64Subtarget.h"
15
16 #include "AArch64.h"
17 #include "AArch64CallLowering.h"
18 #include "AArch64InstrInfo.h"
19 #include "AArch64LegalizerInfo.h"
20 #include "AArch64PBQPRegAlloc.h"
21 #include "AArch64RegisterBankInfo.h"
22 #include "AArch64TargetMachine.h"
23 #include "MCTargetDesc/AArch64AddressingModes.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
25 #include "llvm/CodeGen/MachineScheduler.h"
26 #include "llvm/IR/GlobalValue.h"
27 #include "llvm/Support/TargetParser.h"
28
29 using namespace llvm;
30
31 #define DEBUG_TYPE "aarch64-subtarget"
32
33 #define GET_SUBTARGETINFO_CTOR
34 #define GET_SUBTARGETINFO_TARGET_DESC
35 #include "AArch64GenSubtargetInfo.inc"
36
37 static cl::opt<bool>
38 EnableEarlyIfConvert("aarch64-early-ifcvt", cl::desc("Enable the early if "
39 "converter pass"), cl::init(true), cl::Hidden);
40
41 // If OS supports TBI, use this flag to enable it.
42 static cl::opt<bool>
43 UseAddressTopByteIgnored("aarch64-use-tbi", cl::desc("Assume that top byte of "
44 "an address is ignored"), cl::init(false), cl::Hidden);
45
46 static cl::opt<bool>
47 UseNonLazyBind("aarch64-enable-nonlazybind",
48 cl::desc("Call nonlazybind functions via direct GOT load"),
49 cl::init(false), cl::Hidden);
50
51 AArch64Subtarget &
initializeSubtargetDependencies(StringRef FS,StringRef CPUString)52 AArch64Subtarget::initializeSubtargetDependencies(StringRef FS,
53 StringRef CPUString) {
54 // Determine default and user-specified characteristics
55
56 if (CPUString.empty())
57 CPUString = "generic";
58
59 ParseSubtargetFeatures(CPUString, FS);
60 initializeProperties();
61
62 return *this;
63 }
64
initializeProperties()65 void AArch64Subtarget::initializeProperties() {
66 // Initialize CPU specific properties. We should add a tablegen feature for
67 // this in the future so we can specify it together with the subtarget
68 // features.
69 switch (ARMProcFamily) {
70 case Others:
71 break;
72 case CortexA35:
73 break;
74 case CortexA53:
75 PrefFunctionAlignment = 3;
76 break;
77 case CortexA55:
78 break;
79 case CortexA57:
80 MaxInterleaveFactor = 4;
81 PrefFunctionAlignment = 4;
82 break;
83 case CortexA72:
84 case CortexA73:
85 case CortexA75:
86 PrefFunctionAlignment = 4;
87 break;
88 case Cyclone:
89 CacheLineSize = 64;
90 PrefetchDistance = 280;
91 MinPrefetchStride = 2048;
92 MaxPrefetchIterationsAhead = 3;
93 break;
94 case ExynosM1:
95 MaxInterleaveFactor = 4;
96 MaxJumpTableSize = 8;
97 PrefFunctionAlignment = 4;
98 PrefLoopAlignment = 3;
99 break;
100 case ExynosM3:
101 MaxInterleaveFactor = 4;
102 MaxJumpTableSize = 20;
103 PrefFunctionAlignment = 5;
104 PrefLoopAlignment = 4;
105 break;
106 case Falkor:
107 MaxInterleaveFactor = 4;
108 // FIXME: remove this to enable 64-bit SLP if performance looks good.
109 MinVectorRegisterBitWidth = 128;
110 CacheLineSize = 128;
111 PrefetchDistance = 820;
112 MinPrefetchStride = 2048;
113 MaxPrefetchIterationsAhead = 8;
114 break;
115 case Kryo:
116 MaxInterleaveFactor = 4;
117 VectorInsertExtractBaseCost = 2;
118 CacheLineSize = 128;
119 PrefetchDistance = 740;
120 MinPrefetchStride = 1024;
121 MaxPrefetchIterationsAhead = 11;
122 // FIXME: remove this to enable 64-bit SLP if performance looks good.
123 MinVectorRegisterBitWidth = 128;
124 break;
125 case Saphira:
126 MaxInterleaveFactor = 4;
127 // FIXME: remove this to enable 64-bit SLP if performance looks good.
128 MinVectorRegisterBitWidth = 128;
129 break;
130 case ThunderX2T99:
131 CacheLineSize = 64;
132 PrefFunctionAlignment = 3;
133 PrefLoopAlignment = 2;
134 MaxInterleaveFactor = 4;
135 PrefetchDistance = 128;
136 MinPrefetchStride = 1024;
137 MaxPrefetchIterationsAhead = 4;
138 // FIXME: remove this to enable 64-bit SLP if performance looks good.
139 MinVectorRegisterBitWidth = 128;
140 break;
141 case ThunderX:
142 case ThunderXT88:
143 case ThunderXT81:
144 case ThunderXT83:
145 CacheLineSize = 128;
146 PrefFunctionAlignment = 3;
147 PrefLoopAlignment = 2;
148 // FIXME: remove this to enable 64-bit SLP if performance looks good.
149 MinVectorRegisterBitWidth = 128;
150 break;
151 case TSV110:
152 CacheLineSize = 64;
153 PrefFunctionAlignment = 4;
154 PrefLoopAlignment = 2;
155 break;
156 }
157 }
158
AArch64Subtarget(const Triple & TT,const std::string & CPU,const std::string & FS,const TargetMachine & TM,bool LittleEndian)159 AArch64Subtarget::AArch64Subtarget(const Triple &TT, const std::string &CPU,
160 const std::string &FS,
161 const TargetMachine &TM, bool LittleEndian)
162 : AArch64GenSubtargetInfo(TT, CPU, FS),
163 ReserveXRegister(AArch64::GPR64commonRegClass.getNumRegs()),
164 CustomCallSavedXRegs(AArch64::GPR64commonRegClass.getNumRegs()),
165 IsLittle(LittleEndian),
166 TargetTriple(TT), FrameLowering(),
167 InstrInfo(initializeSubtargetDependencies(FS, CPU)), TSInfo(),
168 TLInfo(TM, *this) {
169 if (AArch64::isX18ReservedByDefault(TT))
170 ReserveXRegister.set(18);
171
172 CallLoweringInfo.reset(new AArch64CallLowering(*getTargetLowering()));
173 Legalizer.reset(new AArch64LegalizerInfo(*this));
174
175 auto *RBI = new AArch64RegisterBankInfo(*getRegisterInfo());
176
177 // FIXME: At this point, we can't rely on Subtarget having RBI.
178 // It's awkward to mix passing RBI and the Subtarget; should we pass
179 // TII/TRI as well?
180 InstSelector.reset(createAArch64InstructionSelector(
181 *static_cast<const AArch64TargetMachine *>(&TM), *this, *RBI));
182
183 RegBankInfo.reset(RBI);
184 }
185
getCallLowering() const186 const CallLowering *AArch64Subtarget::getCallLowering() const {
187 return CallLoweringInfo.get();
188 }
189
getInstructionSelector() const190 const InstructionSelector *AArch64Subtarget::getInstructionSelector() const {
191 return InstSelector.get();
192 }
193
getLegalizerInfo() const194 const LegalizerInfo *AArch64Subtarget::getLegalizerInfo() const {
195 return Legalizer.get();
196 }
197
getRegBankInfo() const198 const RegisterBankInfo *AArch64Subtarget::getRegBankInfo() const {
199 return RegBankInfo.get();
200 }
201
202 /// Find the target operand flags that describe how a global value should be
203 /// referenced for the current subtarget.
204 unsigned char
ClassifyGlobalReference(const GlobalValue * GV,const TargetMachine & TM) const205 AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV,
206 const TargetMachine &TM) const {
207 // MachO large model always goes via a GOT, simply to get a single 8-byte
208 // absolute relocation on all global addresses.
209 if (TM.getCodeModel() == CodeModel::Large && isTargetMachO())
210 return AArch64II::MO_GOT;
211
212 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) {
213 if (GV->hasDLLImportStorageClass())
214 return AArch64II::MO_GOT | AArch64II::MO_DLLIMPORT;
215 if (getTargetTriple().isOSWindows())
216 return AArch64II::MO_GOT | AArch64II::MO_COFFSTUB;
217 return AArch64II::MO_GOT;
218 }
219
220 // The small code model's direct accesses use ADRP, which cannot
221 // necessarily produce the value 0 (if the code is above 4GB).
222 // Same for the tiny code model, where we have a pc relative LDR.
223 if ((useSmallAddressing() || TM.getCodeModel() == CodeModel::Tiny) &&
224 GV->hasExternalWeakLinkage())
225 return AArch64II::MO_GOT;
226
227 return AArch64II::MO_NO_FLAG;
228 }
229
classifyGlobalFunctionReference(const GlobalValue * GV,const TargetMachine & TM) const230 unsigned char AArch64Subtarget::classifyGlobalFunctionReference(
231 const GlobalValue *GV, const TargetMachine &TM) const {
232 // MachO large model always goes via a GOT, because we don't have the
233 // relocations available to do anything else..
234 if (TM.getCodeModel() == CodeModel::Large && isTargetMachO() &&
235 !GV->hasInternalLinkage())
236 return AArch64II::MO_GOT;
237
238 // NonLazyBind goes via GOT unless we know it's available locally.
239 auto *F = dyn_cast<Function>(GV);
240 if (UseNonLazyBind && F && F->hasFnAttribute(Attribute::NonLazyBind) &&
241 !TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
242 return AArch64II::MO_GOT;
243
244 return AArch64II::MO_NO_FLAG;
245 }
246
overrideSchedPolicy(MachineSchedPolicy & Policy,unsigned NumRegionInstrs) const247 void AArch64Subtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
248 unsigned NumRegionInstrs) const {
249 // LNT run (at least on Cyclone) showed reasonably significant gains for
250 // bi-directional scheduling. 253.perlbmk.
251 Policy.OnlyTopDown = false;
252 Policy.OnlyBottomUp = false;
253 // Enabling or Disabling the latency heuristic is a close call: It seems to
254 // help nearly no benchmark on out-of-order architectures, on the other hand
255 // it regresses register pressure on a few benchmarking.
256 Policy.DisableLatencyHeuristic = DisableLatencySchedHeuristic;
257 }
258
enableEarlyIfConversion() const259 bool AArch64Subtarget::enableEarlyIfConversion() const {
260 return EnableEarlyIfConvert;
261 }
262
supportsAddressTopByteIgnored() const263 bool AArch64Subtarget::supportsAddressTopByteIgnored() const {
264 if (!UseAddressTopByteIgnored)
265 return false;
266
267 if (TargetTriple.isiOS()) {
268 unsigned Major, Minor, Micro;
269 TargetTriple.getiOSVersion(Major, Minor, Micro);
270 return Major >= 8;
271 }
272
273 return false;
274 }
275
276 std::unique_ptr<PBQPRAConstraint>
getCustomPBQPConstraints() const277 AArch64Subtarget::getCustomPBQPConstraints() const {
278 return balanceFPOps() ? llvm::make_unique<A57ChainingConstraint>() : nullptr;
279 }
280
mirFileLoaded(MachineFunction & MF) const281 void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const {
282 // We usually compute max call frame size after ISel. Do the computation now
283 // if the .mir file didn't specify it. Note that this will probably give you
284 // bogus values after PEI has eliminated the callframe setup/destroy pseudo
285 // instructions, specify explicitly if you need it to be correct.
286 MachineFrameInfo &MFI = MF.getFrameInfo();
287 if (!MFI.isMaxCallFrameSizeComputed())
288 MFI.computeMaxCallFrameSize(MF);
289 }
290