1 //===-- X86Subtarget.cpp - X86 Subtarget Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the X86 specific subclass of TargetSubtargetInfo.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "X86Subtarget.h"
14 #include "MCTargetDesc/X86BaseInfo.h"
15 #include "X86.h"
16 #include "X86CallLowering.h"
17 #include "X86LegalizerInfo.h"
18 #include "X86MacroFusion.h"
19 #include "X86RegisterBankInfo.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
25 #include "llvm/CodeGen/ScheduleDAGMutation.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/ConstantRange.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/GlobalValue.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/CodeGen.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetMachine.h"
37
38 #if defined(_MSC_VER)
39 #include <intrin.h>
40 #endif
41
42 using namespace llvm;
43
44 #define DEBUG_TYPE "subtarget"
45
46 #define GET_SUBTARGETINFO_TARGET_DESC
47 #define GET_SUBTARGETINFO_CTOR
48 #include "X86GenSubtargetInfo.inc"
49
50 // Temporary option to control early if-conversion for x86 while adding machine
51 // models.
52 static cl::opt<bool>
53 X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
54 cl::desc("Enable early if-conversion on X86"));
55
56
57 /// Classify a blockaddress reference for the current subtarget according to how
58 /// we should reference it in a non-pcrel context.
classifyBlockAddressReference() const59 unsigned char X86Subtarget::classifyBlockAddressReference() const {
60 return classifyLocalReference(nullptr);
61 }
62
63 /// Classify a global variable reference for the current subtarget according to
64 /// how we should reference it in a non-pcrel context.
65 unsigned char
classifyGlobalReference(const GlobalValue * GV) const66 X86Subtarget::classifyGlobalReference(const GlobalValue *GV) const {
67 return classifyGlobalReference(GV, *GV->getParent());
68 }
69
70 unsigned char
classifyLocalReference(const GlobalValue * GV) const71 X86Subtarget::classifyLocalReference(const GlobalValue *GV) const {
72 // Tagged globals have non-zero upper bits, which makes direct references
73 // require a 64-bit immediate. On the small code model this causes relocation
74 // errors, so we go through the GOT instead.
75 if (AllowTaggedGlobals && TM.getCodeModel() == CodeModel::Small && GV &&
76 !isa<Function>(GV))
77 return X86II::MO_GOTPCREL_NORELAX;
78
79 // If we're not PIC, it's not very interesting.
80 if (!isPositionIndependent())
81 return X86II::MO_NO_FLAG;
82
83 if (is64Bit()) {
84 // 64-bit ELF PIC local references may use GOTOFF relocations.
85 if (isTargetELF()) {
86 switch (TM.getCodeModel()) {
87 // 64-bit small code model is simple: All rip-relative.
88 case CodeModel::Tiny:
89 llvm_unreachable("Tiny codesize model not supported on X86");
90 case CodeModel::Small:
91 case CodeModel::Kernel:
92 return X86II::MO_NO_FLAG;
93
94 // The large PIC code model uses GOTOFF.
95 case CodeModel::Large:
96 return X86II::MO_GOTOFF;
97
98 // Medium is a hybrid: RIP-rel for code, GOTOFF for DSO local data.
99 case CodeModel::Medium:
100 // Constant pool and jump table handling pass a nullptr to this
101 // function so we need to use isa_and_nonnull.
102 if (isa_and_nonnull<Function>(GV))
103 return X86II::MO_NO_FLAG; // All code is RIP-relative
104 return X86II::MO_GOTOFF; // Local symbols use GOTOFF.
105 }
106 llvm_unreachable("invalid code model");
107 }
108
109 // Otherwise, this is either a RIP-relative reference or a 64-bit movabsq,
110 // both of which use MO_NO_FLAG.
111 return X86II::MO_NO_FLAG;
112 }
113
114 // The COFF dynamic linker just patches the executable sections.
115 if (isTargetCOFF())
116 return X86II::MO_NO_FLAG;
117
118 if (isTargetDarwin()) {
119 // 32 bit macho has no relocation for a-b if a is undefined, even if
120 // b is in the section that is being relocated.
121 // This means we have to use o load even for GVs that are known to be
122 // local to the dso.
123 if (GV && (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
124 return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
125
126 return X86II::MO_PIC_BASE_OFFSET;
127 }
128
129 return X86II::MO_GOTOFF;
130 }
131
classifyGlobalReference(const GlobalValue * GV,const Module & M) const132 unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV,
133 const Module &M) const {
134 // The static large model never uses stubs.
135 if (TM.getCodeModel() == CodeModel::Large && !isPositionIndependent())
136 return X86II::MO_NO_FLAG;
137
138 // Absolute symbols can be referenced directly.
139 if (GV) {
140 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) {
141 // See if we can use the 8-bit immediate form. Note that some instructions
142 // will sign extend the immediate operand, so to be conservative we only
143 // accept the range [0,128).
144 if (CR->getUnsignedMax().ult(128))
145 return X86II::MO_ABS8;
146 else
147 return X86II::MO_NO_FLAG;
148 }
149 }
150
151 if (TM.shouldAssumeDSOLocal(M, GV))
152 return classifyLocalReference(GV);
153
154 if (isTargetCOFF()) {
155 // ExternalSymbolSDNode like _tls_index.
156 if (!GV)
157 return X86II::MO_NO_FLAG;
158 if (GV->hasDLLImportStorageClass())
159 return X86II::MO_DLLIMPORT;
160 return X86II::MO_COFFSTUB;
161 }
162 // Some JIT users use *-win32-elf triples; these shouldn't use GOT tables.
163 if (isOSWindows())
164 return X86II::MO_NO_FLAG;
165
166 if (is64Bit()) {
167 // ELF supports a large, truly PIC code model with non-PC relative GOT
168 // references. Other object file formats do not. Use the no-flag, 64-bit
169 // reference for them.
170 if (TM.getCodeModel() == CodeModel::Large)
171 return isTargetELF() ? X86II::MO_GOT : X86II::MO_NO_FLAG;
172 // Tagged globals have non-zero upper bits, which makes direct references
173 // require a 64-bit immediate. So we can't let the linker relax the
174 // relocation to a 32-bit RIP-relative direct reference.
175 if (AllowTaggedGlobals && GV && !isa<Function>(GV))
176 return X86II::MO_GOTPCREL_NORELAX;
177 return X86II::MO_GOTPCREL;
178 }
179
180 if (isTargetDarwin()) {
181 if (!isPositionIndependent())
182 return X86II::MO_DARWIN_NONLAZY;
183 return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
184 }
185
186 // 32-bit ELF references GlobalAddress directly in static relocation model.
187 // We cannot use MO_GOT because EBX may not be set up.
188 if (TM.getRelocationModel() == Reloc::Static)
189 return X86II::MO_NO_FLAG;
190 return X86II::MO_GOT;
191 }
192
193 unsigned char
classifyGlobalFunctionReference(const GlobalValue * GV) const194 X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV) const {
195 return classifyGlobalFunctionReference(GV, *GV->getParent());
196 }
197
198 unsigned char
classifyGlobalFunctionReference(const GlobalValue * GV,const Module & M) const199 X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
200 const Module &M) const {
201 if (TM.shouldAssumeDSOLocal(M, GV))
202 return X86II::MO_NO_FLAG;
203
204 // Functions on COFF can be non-DSO local for three reasons:
205 // - They are intrinsic functions (!GV)
206 // - They are marked dllimport
207 // - They are extern_weak, and a stub is needed
208 if (isTargetCOFF()) {
209 if (!GV)
210 return X86II::MO_NO_FLAG;
211 if (GV->hasDLLImportStorageClass())
212 return X86II::MO_DLLIMPORT;
213 return X86II::MO_COFFSTUB;
214 }
215
216 const Function *F = dyn_cast_or_null<Function>(GV);
217
218 if (isTargetELF()) {
219 if (is64Bit() && F && (CallingConv::X86_RegCall == F->getCallingConv()))
220 // According to psABI, PLT stub clobbers XMM8-XMM15.
221 // In Regcall calling convention those registers are used for passing
222 // parameters. Thus we need to prevent lazy binding in Regcall.
223 return X86II::MO_GOTPCREL;
224 // If PLT must be avoided then the call should be via GOTPCREL.
225 if (((F && F->hasFnAttribute(Attribute::NonLazyBind)) ||
226 (!F && M.getRtLibUseGOT())) &&
227 is64Bit())
228 return X86II::MO_GOTPCREL;
229 // Reference ExternalSymbol directly in static relocation model.
230 if (!is64Bit() && !GV && TM.getRelocationModel() == Reloc::Static)
231 return X86II::MO_NO_FLAG;
232 return X86II::MO_PLT;
233 }
234
235 if (is64Bit()) {
236 if (F && F->hasFnAttribute(Attribute::NonLazyBind))
237 // If the function is marked as non-lazy, generate an indirect call
238 // which loads from the GOT directly. This avoids runtime overhead
239 // at the cost of eager binding (and one extra byte of encoding).
240 return X86II::MO_GOTPCREL;
241 return X86II::MO_NO_FLAG;
242 }
243
244 return X86II::MO_NO_FLAG;
245 }
246
247 /// Return true if the subtarget allows calls to immediate address.
isLegalToCallImmediateAddr() const248 bool X86Subtarget::isLegalToCallImmediateAddr() const {
249 // FIXME: I386 PE/COFF supports PC relative calls using IMAGE_REL_I386_REL32
250 // but WinCOFFObjectWriter::RecordRelocation cannot emit them. Once it does,
251 // the following check for Win32 should be removed.
252 if (Is64Bit || isTargetWin32())
253 return false;
254 return isTargetELF() || TM.getRelocationModel() == Reloc::Static;
255 }
256
initSubtargetFeatures(StringRef CPU,StringRef TuneCPU,StringRef FS)257 void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef TuneCPU,
258 StringRef FS) {
259 if (CPU.empty())
260 CPU = "generic";
261
262 if (TuneCPU.empty())
263 TuneCPU = "i586"; // FIXME: "generic" is more modern than llc tests expect.
264
265 std::string FullFS = X86_MC::ParseX86Triple(TargetTriple);
266 assert(!FullFS.empty() && "Failed to parse X86 triple");
267
268 if (!FS.empty())
269 FullFS = (Twine(FullFS) + "," + FS).str();
270
271 // Parse features string and set the CPU.
272 ParseSubtargetFeatures(CPU, TuneCPU, FullFS);
273
274 // All CPUs that implement SSE4.2 or SSE4A support unaligned accesses of
275 // 16-bytes and under that are reasonably fast. These features were
276 // introduced with Intel's Nehalem/Silvermont and AMD's Family10h
277 // micro-architectures respectively.
278 if (hasSSE42() || hasSSE4A())
279 IsUnalignedMem16Slow = false;
280
281 LLVM_DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
282 << ", 3DNowLevel " << X863DNowLevel << ", 64bit "
283 << HasX86_64 << "\n");
284 if (Is64Bit && !HasX86_64)
285 report_fatal_error("64-bit code requested on a subtarget that doesn't "
286 "support it!");
287
288 // Stack alignment is 16 bytes on Darwin, Linux, kFreeBSD, NaCl, and for all
289 // 64-bit targets. On Solaris (32-bit), stack alignment is 4 bytes
290 // following the i386 psABI, while on Illumos it is always 16 bytes.
291 if (StackAlignOverride)
292 stackAlignment = *StackAlignOverride;
293 else if (isTargetDarwin() || isTargetLinux() || isTargetKFreeBSD() ||
294 isTargetNaCl() || Is64Bit)
295 stackAlignment = Align(16);
296
297 // Consume the vector width attribute or apply any target specific limit.
298 if (PreferVectorWidthOverride)
299 PreferVectorWidth = PreferVectorWidthOverride;
300 else if (Prefer128Bit)
301 PreferVectorWidth = 128;
302 else if (Prefer256Bit)
303 PreferVectorWidth = 256;
304 }
305
initializeSubtargetDependencies(StringRef CPU,StringRef TuneCPU,StringRef FS)306 X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
307 StringRef TuneCPU,
308 StringRef FS) {
309 initSubtargetFeatures(CPU, TuneCPU, FS);
310 return *this;
311 }
312
X86Subtarget(const Triple & TT,StringRef CPU,StringRef TuneCPU,StringRef FS,const X86TargetMachine & TM,MaybeAlign StackAlignOverride,unsigned PreferVectorWidthOverride,unsigned RequiredVectorWidth)313 X86Subtarget::X86Subtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU,
314 StringRef FS, const X86TargetMachine &TM,
315 MaybeAlign StackAlignOverride,
316 unsigned PreferVectorWidthOverride,
317 unsigned RequiredVectorWidth)
318 : X86GenSubtargetInfo(TT, CPU, TuneCPU, FS),
319 PICStyle(PICStyles::Style::None), TM(TM), TargetTriple(TT),
320 StackAlignOverride(StackAlignOverride),
321 PreferVectorWidthOverride(PreferVectorWidthOverride),
322 RequiredVectorWidth(RequiredVectorWidth),
323 InstrInfo(initializeSubtargetDependencies(CPU, TuneCPU, FS)),
324 TLInfo(TM, *this), FrameLowering(*this, getStackAlignment()) {
325 // Determine the PICStyle based on the target selected.
326 if (!isPositionIndependent())
327 setPICStyle(PICStyles::Style::None);
328 else if (is64Bit())
329 setPICStyle(PICStyles::Style::RIPRel);
330 else if (isTargetCOFF())
331 setPICStyle(PICStyles::Style::None);
332 else if (isTargetDarwin())
333 setPICStyle(PICStyles::Style::StubPIC);
334 else if (isTargetELF())
335 setPICStyle(PICStyles::Style::GOT);
336
337 CallLoweringInfo.reset(new X86CallLowering(*getTargetLowering()));
338 Legalizer.reset(new X86LegalizerInfo(*this, TM));
339
340 auto *RBI = new X86RegisterBankInfo(*getRegisterInfo());
341 RegBankInfo.reset(RBI);
342 InstSelector.reset(createX86InstructionSelector(TM, *this, *RBI));
343 }
344
getCallLowering() const345 const CallLowering *X86Subtarget::getCallLowering() const {
346 return CallLoweringInfo.get();
347 }
348
getInstructionSelector() const349 InstructionSelector *X86Subtarget::getInstructionSelector() const {
350 return InstSelector.get();
351 }
352
getLegalizerInfo() const353 const LegalizerInfo *X86Subtarget::getLegalizerInfo() const {
354 return Legalizer.get();
355 }
356
getRegBankInfo() const357 const RegisterBankInfo *X86Subtarget::getRegBankInfo() const {
358 return RegBankInfo.get();
359 }
360
enableEarlyIfConversion() const361 bool X86Subtarget::enableEarlyIfConversion() const {
362 return canUseCMOV() && X86EarlyIfConv;
363 }
364
getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>> & Mutations) const365 void X86Subtarget::getPostRAMutations(
366 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
367 Mutations.push_back(createX86MacroFusionDAGMutation());
368 }
369
isPositionIndependent() const370 bool X86Subtarget::isPositionIndependent() const {
371 return TM.isPositionIndependent();
372 }
373