1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the TargetLoweringBase class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/ADT/BitVector.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/Analysis/Loads.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/ISDOpcodes.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineOperand.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/RuntimeLibcalls.h"
33 #include "llvm/CodeGen/StackMaps.h"
34 #include "llvm/CodeGen/TargetLowering.h"
35 #include "llvm/CodeGen/TargetOpcodes.h"
36 #include "llvm/CodeGen/TargetRegisterInfo.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MachineValueType.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Target/TargetMachine.h"
55 #include "llvm/Transforms/Utils/SizeOpts.h"
56 #include <algorithm>
57 #include <cassert>
58 #include <cstddef>
59 #include <cstdint>
60 #include <cstring>
61 #include <iterator>
62 #include <string>
63 #include <tuple>
64 #include <utility>
65
66 using namespace llvm;
67
68 static cl::opt<bool> JumpIsExpensiveOverride(
69 "jump-is-expensive", cl::init(false),
70 cl::desc("Do not create extra branches to split comparison logic."),
71 cl::Hidden);
72
73 static cl::opt<unsigned> MinimumJumpTableEntries
74 ("min-jump-table-entries", cl::init(4), cl::Hidden,
75 cl::desc("Set minimum number of entries to use a jump table."));
76
77 static cl::opt<unsigned> MaximumJumpTableSize
78 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
79 cl::desc("Set maximum size of jump tables."));
80
81 /// Minimum jump table density for normal functions.
82 static cl::opt<unsigned>
83 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
84 cl::desc("Minimum density for building a jump table in "
85 "a normal function"));
86
87 /// Minimum jump table density for -Os or -Oz functions.
88 static cl::opt<unsigned> OptsizeJumpTableDensity(
89 "optsize-jump-table-density", cl::init(40), cl::Hidden,
90 cl::desc("Minimum density for building a jump table in "
91 "an optsize function"));
92
93 // FIXME: This option is only to test if the strict fp operation processed
94 // correctly by preventing mutating strict fp operation to normal fp operation
95 // during development. When the backend supports strict float operation, this
96 // option will be meaningless.
97 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation",
98 cl::desc("Don't mutate strict-float node to a legalize node"),
99 cl::init(false), cl::Hidden);
100
darwinHasSinCos(const Triple & TT)101 static bool darwinHasSinCos(const Triple &TT) {
102 assert(TT.isOSDarwin() && "should be called with darwin triple");
103 // Don't bother with 32 bit x86.
104 if (TT.getArch() == Triple::x86)
105 return false;
106 // Macos < 10.9 has no sincos_stret.
107 if (TT.isMacOSX())
108 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
109 // iOS < 7.0 has no sincos_stret.
110 if (TT.isiOS())
111 return !TT.isOSVersionLT(7, 0);
112 // Any other darwin such as WatchOS/TvOS is new enough.
113 return true;
114 }
115
InitLibcalls(const Triple & TT)116 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
117 #define HANDLE_LIBCALL(code, name) \
118 setLibcallName(RTLIB::code, name);
119 #include "llvm/IR/RuntimeLibcalls.def"
120 #undef HANDLE_LIBCALL
121 // Initialize calling conventions to their default.
122 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
123 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
124
125 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf".
126 if (TT.isPPC()) {
127 setLibcallName(RTLIB::ADD_F128, "__addkf3");
128 setLibcallName(RTLIB::SUB_F128, "__subkf3");
129 setLibcallName(RTLIB::MUL_F128, "__mulkf3");
130 setLibcallName(RTLIB::DIV_F128, "__divkf3");
131 setLibcallName(RTLIB::POWI_F128, "__powikf2");
132 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2");
133 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2");
134 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2");
135 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2");
136 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi");
137 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi");
138 setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti");
139 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi");
140 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi");
141 setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti");
142 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf");
143 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf");
144 setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf");
145 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf");
146 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf");
147 setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf");
148 setLibcallName(RTLIB::OEQ_F128, "__eqkf2");
149 setLibcallName(RTLIB::UNE_F128, "__nekf2");
150 setLibcallName(RTLIB::OGE_F128, "__gekf2");
151 setLibcallName(RTLIB::OLT_F128, "__ltkf2");
152 setLibcallName(RTLIB::OLE_F128, "__lekf2");
153 setLibcallName(RTLIB::OGT_F128, "__gtkf2");
154 setLibcallName(RTLIB::UO_F128, "__unordkf2");
155 }
156
157 // A few names are different on particular architectures or environments.
158 if (TT.isOSDarwin()) {
159 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
160 // of the gnueabi-style __gnu_*_ieee.
161 // FIXME: What about other targets?
162 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
163 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
164
165 // Some darwins have an optimized __bzero/bzero function.
166 switch (TT.getArch()) {
167 case Triple::x86:
168 case Triple::x86_64:
169 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
170 setLibcallName(RTLIB::BZERO, "__bzero");
171 break;
172 case Triple::aarch64:
173 case Triple::aarch64_32:
174 setLibcallName(RTLIB::BZERO, "bzero");
175 break;
176 default:
177 break;
178 }
179
180 if (darwinHasSinCos(TT)) {
181 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
182 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
183 if (TT.isWatchABI()) {
184 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
185 CallingConv::ARM_AAPCS_VFP);
186 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
187 CallingConv::ARM_AAPCS_VFP);
188 }
189 }
190 } else {
191 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
192 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
193 }
194
195 if (TT.isGNUEnvironment() || TT.isOSFuchsia() ||
196 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) {
197 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
198 setLibcallName(RTLIB::SINCOS_F64, "sincos");
199 setLibcallName(RTLIB::SINCOS_F80, "sincosl");
200 setLibcallName(RTLIB::SINCOS_F128, "sincosl");
201 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
202 }
203
204 if (TT.isPS4CPU()) {
205 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
206 setLibcallName(RTLIB::SINCOS_F64, "sincos");
207 }
208
209 if (TT.isOSOpenBSD()) {
210 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
211 }
212 }
213
214 /// GetFPLibCall - Helper to return the right libcall for the given floating
215 /// point type, or UNKNOWN_LIBCALL if there is none.
getFPLibCall(EVT VT,RTLIB::Libcall Call_F32,RTLIB::Libcall Call_F64,RTLIB::Libcall Call_F80,RTLIB::Libcall Call_F128,RTLIB::Libcall Call_PPCF128)216 RTLIB::Libcall RTLIB::getFPLibCall(EVT VT,
217 RTLIB::Libcall Call_F32,
218 RTLIB::Libcall Call_F64,
219 RTLIB::Libcall Call_F80,
220 RTLIB::Libcall Call_F128,
221 RTLIB::Libcall Call_PPCF128) {
222 return
223 VT == MVT::f32 ? Call_F32 :
224 VT == MVT::f64 ? Call_F64 :
225 VT == MVT::f80 ? Call_F80 :
226 VT == MVT::f128 ? Call_F128 :
227 VT == MVT::ppcf128 ? Call_PPCF128 :
228 RTLIB::UNKNOWN_LIBCALL;
229 }
230
231 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
232 /// UNKNOWN_LIBCALL if there is none.
getFPEXT(EVT OpVT,EVT RetVT)233 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
234 if (OpVT == MVT::f16) {
235 if (RetVT == MVT::f32)
236 return FPEXT_F16_F32;
237 if (RetVT == MVT::f64)
238 return FPEXT_F16_F64;
239 if (RetVT == MVT::f128)
240 return FPEXT_F16_F128;
241 } else if (OpVT == MVT::f32) {
242 if (RetVT == MVT::f64)
243 return FPEXT_F32_F64;
244 if (RetVT == MVT::f128)
245 return FPEXT_F32_F128;
246 if (RetVT == MVT::ppcf128)
247 return FPEXT_F32_PPCF128;
248 } else if (OpVT == MVT::f64) {
249 if (RetVT == MVT::f128)
250 return FPEXT_F64_F128;
251 else if (RetVT == MVT::ppcf128)
252 return FPEXT_F64_PPCF128;
253 } else if (OpVT == MVT::f80) {
254 if (RetVT == MVT::f128)
255 return FPEXT_F80_F128;
256 }
257
258 return UNKNOWN_LIBCALL;
259 }
260
261 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
262 /// UNKNOWN_LIBCALL if there is none.
getFPROUND(EVT OpVT,EVT RetVT)263 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
264 if (RetVT == MVT::f16) {
265 if (OpVT == MVT::f32)
266 return FPROUND_F32_F16;
267 if (OpVT == MVT::f64)
268 return FPROUND_F64_F16;
269 if (OpVT == MVT::f80)
270 return FPROUND_F80_F16;
271 if (OpVT == MVT::f128)
272 return FPROUND_F128_F16;
273 if (OpVT == MVT::ppcf128)
274 return FPROUND_PPCF128_F16;
275 } else if (RetVT == MVT::f32) {
276 if (OpVT == MVT::f64)
277 return FPROUND_F64_F32;
278 if (OpVT == MVT::f80)
279 return FPROUND_F80_F32;
280 if (OpVT == MVT::f128)
281 return FPROUND_F128_F32;
282 if (OpVT == MVT::ppcf128)
283 return FPROUND_PPCF128_F32;
284 } else if (RetVT == MVT::f64) {
285 if (OpVT == MVT::f80)
286 return FPROUND_F80_F64;
287 if (OpVT == MVT::f128)
288 return FPROUND_F128_F64;
289 if (OpVT == MVT::ppcf128)
290 return FPROUND_PPCF128_F64;
291 } else if (RetVT == MVT::f80) {
292 if (OpVT == MVT::f128)
293 return FPROUND_F128_F80;
294 }
295
296 return UNKNOWN_LIBCALL;
297 }
298
299 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
300 /// UNKNOWN_LIBCALL if there is none.
getFPTOSINT(EVT OpVT,EVT RetVT)301 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
302 if (OpVT == MVT::f16) {
303 if (RetVT == MVT::i32)
304 return FPTOSINT_F16_I32;
305 if (RetVT == MVT::i64)
306 return FPTOSINT_F16_I64;
307 if (RetVT == MVT::i128)
308 return FPTOSINT_F16_I128;
309 } else if (OpVT == MVT::f32) {
310 if (RetVT == MVT::i32)
311 return FPTOSINT_F32_I32;
312 if (RetVT == MVT::i64)
313 return FPTOSINT_F32_I64;
314 if (RetVT == MVT::i128)
315 return FPTOSINT_F32_I128;
316 } else if (OpVT == MVT::f64) {
317 if (RetVT == MVT::i32)
318 return FPTOSINT_F64_I32;
319 if (RetVT == MVT::i64)
320 return FPTOSINT_F64_I64;
321 if (RetVT == MVT::i128)
322 return FPTOSINT_F64_I128;
323 } else if (OpVT == MVT::f80) {
324 if (RetVT == MVT::i32)
325 return FPTOSINT_F80_I32;
326 if (RetVT == MVT::i64)
327 return FPTOSINT_F80_I64;
328 if (RetVT == MVT::i128)
329 return FPTOSINT_F80_I128;
330 } else if (OpVT == MVT::f128) {
331 if (RetVT == MVT::i32)
332 return FPTOSINT_F128_I32;
333 if (RetVT == MVT::i64)
334 return FPTOSINT_F128_I64;
335 if (RetVT == MVT::i128)
336 return FPTOSINT_F128_I128;
337 } else if (OpVT == MVT::ppcf128) {
338 if (RetVT == MVT::i32)
339 return FPTOSINT_PPCF128_I32;
340 if (RetVT == MVT::i64)
341 return FPTOSINT_PPCF128_I64;
342 if (RetVT == MVT::i128)
343 return FPTOSINT_PPCF128_I128;
344 }
345 return UNKNOWN_LIBCALL;
346 }
347
348 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
349 /// UNKNOWN_LIBCALL if there is none.
getFPTOUINT(EVT OpVT,EVT RetVT)350 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
351 if (OpVT == MVT::f16) {
352 if (RetVT == MVT::i32)
353 return FPTOUINT_F16_I32;
354 if (RetVT == MVT::i64)
355 return FPTOUINT_F16_I64;
356 if (RetVT == MVT::i128)
357 return FPTOUINT_F16_I128;
358 } else if (OpVT == MVT::f32) {
359 if (RetVT == MVT::i32)
360 return FPTOUINT_F32_I32;
361 if (RetVT == MVT::i64)
362 return FPTOUINT_F32_I64;
363 if (RetVT == MVT::i128)
364 return FPTOUINT_F32_I128;
365 } else if (OpVT == MVT::f64) {
366 if (RetVT == MVT::i32)
367 return FPTOUINT_F64_I32;
368 if (RetVT == MVT::i64)
369 return FPTOUINT_F64_I64;
370 if (RetVT == MVT::i128)
371 return FPTOUINT_F64_I128;
372 } else if (OpVT == MVT::f80) {
373 if (RetVT == MVT::i32)
374 return FPTOUINT_F80_I32;
375 if (RetVT == MVT::i64)
376 return FPTOUINT_F80_I64;
377 if (RetVT == MVT::i128)
378 return FPTOUINT_F80_I128;
379 } else if (OpVT == MVT::f128) {
380 if (RetVT == MVT::i32)
381 return FPTOUINT_F128_I32;
382 if (RetVT == MVT::i64)
383 return FPTOUINT_F128_I64;
384 if (RetVT == MVT::i128)
385 return FPTOUINT_F128_I128;
386 } else if (OpVT == MVT::ppcf128) {
387 if (RetVT == MVT::i32)
388 return FPTOUINT_PPCF128_I32;
389 if (RetVT == MVT::i64)
390 return FPTOUINT_PPCF128_I64;
391 if (RetVT == MVT::i128)
392 return FPTOUINT_PPCF128_I128;
393 }
394 return UNKNOWN_LIBCALL;
395 }
396
397 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
398 /// UNKNOWN_LIBCALL if there is none.
getSINTTOFP(EVT OpVT,EVT RetVT)399 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
400 if (OpVT == MVT::i32) {
401 if (RetVT == MVT::f16)
402 return SINTTOFP_I32_F16;
403 if (RetVT == MVT::f32)
404 return SINTTOFP_I32_F32;
405 if (RetVT == MVT::f64)
406 return SINTTOFP_I32_F64;
407 if (RetVT == MVT::f80)
408 return SINTTOFP_I32_F80;
409 if (RetVT == MVT::f128)
410 return SINTTOFP_I32_F128;
411 if (RetVT == MVT::ppcf128)
412 return SINTTOFP_I32_PPCF128;
413 } else if (OpVT == MVT::i64) {
414 if (RetVT == MVT::f16)
415 return SINTTOFP_I64_F16;
416 if (RetVT == MVT::f32)
417 return SINTTOFP_I64_F32;
418 if (RetVT == MVT::f64)
419 return SINTTOFP_I64_F64;
420 if (RetVT == MVT::f80)
421 return SINTTOFP_I64_F80;
422 if (RetVT == MVT::f128)
423 return SINTTOFP_I64_F128;
424 if (RetVT == MVT::ppcf128)
425 return SINTTOFP_I64_PPCF128;
426 } else if (OpVT == MVT::i128) {
427 if (RetVT == MVT::f16)
428 return SINTTOFP_I128_F16;
429 if (RetVT == MVT::f32)
430 return SINTTOFP_I128_F32;
431 if (RetVT == MVT::f64)
432 return SINTTOFP_I128_F64;
433 if (RetVT == MVT::f80)
434 return SINTTOFP_I128_F80;
435 if (RetVT == MVT::f128)
436 return SINTTOFP_I128_F128;
437 if (RetVT == MVT::ppcf128)
438 return SINTTOFP_I128_PPCF128;
439 }
440 return UNKNOWN_LIBCALL;
441 }
442
443 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
444 /// UNKNOWN_LIBCALL if there is none.
getUINTTOFP(EVT OpVT,EVT RetVT)445 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
446 if (OpVT == MVT::i32) {
447 if (RetVT == MVT::f16)
448 return UINTTOFP_I32_F16;
449 if (RetVT == MVT::f32)
450 return UINTTOFP_I32_F32;
451 if (RetVT == MVT::f64)
452 return UINTTOFP_I32_F64;
453 if (RetVT == MVT::f80)
454 return UINTTOFP_I32_F80;
455 if (RetVT == MVT::f128)
456 return UINTTOFP_I32_F128;
457 if (RetVT == MVT::ppcf128)
458 return UINTTOFP_I32_PPCF128;
459 } else if (OpVT == MVT::i64) {
460 if (RetVT == MVT::f16)
461 return UINTTOFP_I64_F16;
462 if (RetVT == MVT::f32)
463 return UINTTOFP_I64_F32;
464 if (RetVT == MVT::f64)
465 return UINTTOFP_I64_F64;
466 if (RetVT == MVT::f80)
467 return UINTTOFP_I64_F80;
468 if (RetVT == MVT::f128)
469 return UINTTOFP_I64_F128;
470 if (RetVT == MVT::ppcf128)
471 return UINTTOFP_I64_PPCF128;
472 } else if (OpVT == MVT::i128) {
473 if (RetVT == MVT::f16)
474 return UINTTOFP_I128_F16;
475 if (RetVT == MVT::f32)
476 return UINTTOFP_I128_F32;
477 if (RetVT == MVT::f64)
478 return UINTTOFP_I128_F64;
479 if (RetVT == MVT::f80)
480 return UINTTOFP_I128_F80;
481 if (RetVT == MVT::f128)
482 return UINTTOFP_I128_F128;
483 if (RetVT == MVT::ppcf128)
484 return UINTTOFP_I128_PPCF128;
485 }
486 return UNKNOWN_LIBCALL;
487 }
488
getPOWI(EVT RetVT)489 RTLIB::Libcall RTLIB::getPOWI(EVT RetVT) {
490 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128,
491 POWI_PPCF128);
492 }
493
getOUTLINE_ATOMIC(unsigned Opc,AtomicOrdering Order,MVT VT)494 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order,
495 MVT VT) {
496 unsigned ModeN, ModelN;
497 switch (VT.SimpleTy) {
498 case MVT::i8:
499 ModeN = 0;
500 break;
501 case MVT::i16:
502 ModeN = 1;
503 break;
504 case MVT::i32:
505 ModeN = 2;
506 break;
507 case MVT::i64:
508 ModeN = 3;
509 break;
510 case MVT::i128:
511 ModeN = 4;
512 break;
513 default:
514 return UNKNOWN_LIBCALL;
515 }
516
517 switch (Order) {
518 case AtomicOrdering::Monotonic:
519 ModelN = 0;
520 break;
521 case AtomicOrdering::Acquire:
522 ModelN = 1;
523 break;
524 case AtomicOrdering::Release:
525 ModelN = 2;
526 break;
527 case AtomicOrdering::AcquireRelease:
528 case AtomicOrdering::SequentiallyConsistent:
529 ModelN = 3;
530 break;
531 default:
532 return UNKNOWN_LIBCALL;
533 }
534
535 #define LCALLS(A, B) \
536 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
537 #define LCALL5(A) \
538 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
539 switch (Opc) {
540 case ISD::ATOMIC_CMP_SWAP: {
541 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)};
542 return LC[ModeN][ModelN];
543 }
544 case ISD::ATOMIC_SWAP: {
545 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)};
546 return LC[ModeN][ModelN];
547 }
548 case ISD::ATOMIC_LOAD_ADD: {
549 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)};
550 return LC[ModeN][ModelN];
551 }
552 case ISD::ATOMIC_LOAD_OR: {
553 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)};
554 return LC[ModeN][ModelN];
555 }
556 case ISD::ATOMIC_LOAD_CLR: {
557 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)};
558 return LC[ModeN][ModelN];
559 }
560 case ISD::ATOMIC_LOAD_XOR: {
561 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)};
562 return LC[ModeN][ModelN];
563 }
564 default:
565 return UNKNOWN_LIBCALL;
566 }
567 #undef LCALLS
568 #undef LCALL5
569 }
570
getSYNC(unsigned Opc,MVT VT)571 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
572 #define OP_TO_LIBCALL(Name, Enum) \
573 case Name: \
574 switch (VT.SimpleTy) { \
575 default: \
576 return UNKNOWN_LIBCALL; \
577 case MVT::i8: \
578 return Enum##_1; \
579 case MVT::i16: \
580 return Enum##_2; \
581 case MVT::i32: \
582 return Enum##_4; \
583 case MVT::i64: \
584 return Enum##_8; \
585 case MVT::i128: \
586 return Enum##_16; \
587 }
588
589 switch (Opc) {
590 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
591 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
592 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
593 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
594 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
595 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
596 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
597 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
598 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
599 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
600 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
601 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
602 }
603
604 #undef OP_TO_LIBCALL
605
606 return UNKNOWN_LIBCALL;
607 }
608
getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)609 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
610 switch (ElementSize) {
611 case 1:
612 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
613 case 2:
614 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
615 case 4:
616 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
617 case 8:
618 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
619 case 16:
620 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
621 default:
622 return UNKNOWN_LIBCALL;
623 }
624 }
625
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)626 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
627 switch (ElementSize) {
628 case 1:
629 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
630 case 2:
631 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
632 case 4:
633 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
634 case 8:
635 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
636 case 16:
637 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
638 default:
639 return UNKNOWN_LIBCALL;
640 }
641 }
642
getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)643 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
644 switch (ElementSize) {
645 case 1:
646 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
647 case 2:
648 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
649 case 4:
650 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
651 case 8:
652 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
653 case 16:
654 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
655 default:
656 return UNKNOWN_LIBCALL;
657 }
658 }
659
660 /// InitCmpLibcallCCs - Set default comparison libcall CC.
InitCmpLibcallCCs(ISD::CondCode * CCs)661 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
662 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
663 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
664 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
665 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
666 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
667 CCs[RTLIB::UNE_F32] = ISD::SETNE;
668 CCs[RTLIB::UNE_F64] = ISD::SETNE;
669 CCs[RTLIB::UNE_F128] = ISD::SETNE;
670 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
671 CCs[RTLIB::OGE_F32] = ISD::SETGE;
672 CCs[RTLIB::OGE_F64] = ISD::SETGE;
673 CCs[RTLIB::OGE_F128] = ISD::SETGE;
674 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
675 CCs[RTLIB::OLT_F32] = ISD::SETLT;
676 CCs[RTLIB::OLT_F64] = ISD::SETLT;
677 CCs[RTLIB::OLT_F128] = ISD::SETLT;
678 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
679 CCs[RTLIB::OLE_F32] = ISD::SETLE;
680 CCs[RTLIB::OLE_F64] = ISD::SETLE;
681 CCs[RTLIB::OLE_F128] = ISD::SETLE;
682 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
683 CCs[RTLIB::OGT_F32] = ISD::SETGT;
684 CCs[RTLIB::OGT_F64] = ISD::SETGT;
685 CCs[RTLIB::OGT_F128] = ISD::SETGT;
686 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
687 CCs[RTLIB::UO_F32] = ISD::SETNE;
688 CCs[RTLIB::UO_F64] = ISD::SETNE;
689 CCs[RTLIB::UO_F128] = ISD::SETNE;
690 CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
691 }
692
693 /// NOTE: The TargetMachine owns TLOF.
TargetLoweringBase(const TargetMachine & tm)694 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
695 initActions();
696
697 // Perform these initializations only once.
698 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
699 MaxLoadsPerMemcmp = 8;
700 MaxGluedStoresPerMemcpy = 0;
701 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
702 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
703 HasMultipleConditionRegisters = false;
704 HasExtractBitsInsn = false;
705 JumpIsExpensive = JumpIsExpensiveOverride;
706 PredictableSelectIsExpensive = false;
707 EnableExtLdPromotion = false;
708 StackPointerRegisterToSaveRestore = 0;
709 BooleanContents = UndefinedBooleanContent;
710 BooleanFloatContents = UndefinedBooleanContent;
711 BooleanVectorContents = UndefinedBooleanContent;
712 SchedPreferenceInfo = Sched::ILP;
713 GatherAllAliasesMaxDepth = 18;
714 IsStrictFPEnabled = DisableStrictNodeMutation;
715 // TODO: the default will be switched to 0 in the next commit, along
716 // with the Target-specific changes necessary.
717 MaxAtomicSizeInBitsSupported = 1024;
718
719 MinCmpXchgSizeInBits = 0;
720 SupportsUnalignedAtomics = false;
721
722 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
723
724 InitLibcalls(TM.getTargetTriple());
725 InitCmpLibcallCCs(CmpLibcallCCs);
726 }
727
initActions()728 void TargetLoweringBase::initActions() {
729 // All operations default to being supported.
730 memset(OpActions, 0, sizeof(OpActions));
731 memset(LoadExtActions, 0, sizeof(LoadExtActions));
732 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
733 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
734 memset(CondCodeActions, 0, sizeof(CondCodeActions));
735 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
736 std::fill(std::begin(TargetDAGCombineArray),
737 std::end(TargetDAGCombineArray), 0);
738
739 for (MVT VT : MVT::fp_valuetypes()) {
740 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits());
741 if (IntVT.isValid()) {
742 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote);
743 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT);
744 }
745 }
746
747 // Set default actions for various operations.
748 for (MVT VT : MVT::all_valuetypes()) {
749 // Default all indexed load / store to expand.
750 for (unsigned IM = (unsigned)ISD::PRE_INC;
751 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
752 setIndexedLoadAction(IM, VT, Expand);
753 setIndexedStoreAction(IM, VT, Expand);
754 setIndexedMaskedLoadAction(IM, VT, Expand);
755 setIndexedMaskedStoreAction(IM, VT, Expand);
756 }
757
758 // Most backends expect to see the node which just returns the value loaded.
759 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
760
761 // These operations default to expand.
762 setOperationAction(ISD::FGETSIGN, VT, Expand);
763 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
764 setOperationAction(ISD::FMINNUM, VT, Expand);
765 setOperationAction(ISD::FMAXNUM, VT, Expand);
766 setOperationAction(ISD::FMINNUM_IEEE, VT, Expand);
767 setOperationAction(ISD::FMAXNUM_IEEE, VT, Expand);
768 setOperationAction(ISD::FMINIMUM, VT, Expand);
769 setOperationAction(ISD::FMAXIMUM, VT, Expand);
770 setOperationAction(ISD::FMAD, VT, Expand);
771 setOperationAction(ISD::SMIN, VT, Expand);
772 setOperationAction(ISD::SMAX, VT, Expand);
773 setOperationAction(ISD::UMIN, VT, Expand);
774 setOperationAction(ISD::UMAX, VT, Expand);
775 setOperationAction(ISD::ABS, VT, Expand);
776 setOperationAction(ISD::FSHL, VT, Expand);
777 setOperationAction(ISD::FSHR, VT, Expand);
778 setOperationAction(ISD::SADDSAT, VT, Expand);
779 setOperationAction(ISD::UADDSAT, VT, Expand);
780 setOperationAction(ISD::SSUBSAT, VT, Expand);
781 setOperationAction(ISD::USUBSAT, VT, Expand);
782 setOperationAction(ISD::SSHLSAT, VT, Expand);
783 setOperationAction(ISD::USHLSAT, VT, Expand);
784 setOperationAction(ISD::SMULFIX, VT, Expand);
785 setOperationAction(ISD::SMULFIXSAT, VT, Expand);
786 setOperationAction(ISD::UMULFIX, VT, Expand);
787 setOperationAction(ISD::UMULFIXSAT, VT, Expand);
788 setOperationAction(ISD::SDIVFIX, VT, Expand);
789 setOperationAction(ISD::SDIVFIXSAT, VT, Expand);
790 setOperationAction(ISD::UDIVFIX, VT, Expand);
791 setOperationAction(ISD::UDIVFIXSAT, VT, Expand);
792 setOperationAction(ISD::FP_TO_SINT_SAT, VT, Expand);
793 setOperationAction(ISD::FP_TO_UINT_SAT, VT, Expand);
794
795 // Overflow operations default to expand
796 setOperationAction(ISD::SADDO, VT, Expand);
797 setOperationAction(ISD::SSUBO, VT, Expand);
798 setOperationAction(ISD::UADDO, VT, Expand);
799 setOperationAction(ISD::USUBO, VT, Expand);
800 setOperationAction(ISD::SMULO, VT, Expand);
801 setOperationAction(ISD::UMULO, VT, Expand);
802
803 // ADDCARRY operations default to expand
804 setOperationAction(ISD::ADDCARRY, VT, Expand);
805 setOperationAction(ISD::SUBCARRY, VT, Expand);
806 setOperationAction(ISD::SETCCCARRY, VT, Expand);
807 setOperationAction(ISD::SADDO_CARRY, VT, Expand);
808 setOperationAction(ISD::SSUBO_CARRY, VT, Expand);
809
810 // ADDC/ADDE/SUBC/SUBE default to expand.
811 setOperationAction(ISD::ADDC, VT, Expand);
812 setOperationAction(ISD::ADDE, VT, Expand);
813 setOperationAction(ISD::SUBC, VT, Expand);
814 setOperationAction(ISD::SUBE, VT, Expand);
815
816 // Absolute difference
817 setOperationAction(ISD::ABDS, VT, Expand);
818 setOperationAction(ISD::ABDU, VT, Expand);
819
820 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
821 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
822 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
823
824 setOperationAction(ISD::BITREVERSE, VT, Expand);
825 setOperationAction(ISD::PARITY, VT, Expand);
826
827 // These library functions default to expand.
828 setOperationAction(ISD::FROUND, VT, Expand);
829 setOperationAction(ISD::FROUNDEVEN, VT, Expand);
830 setOperationAction(ISD::FPOWI, VT, Expand);
831
832 // These operations default to expand for vector types.
833 if (VT.isVector()) {
834 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
835 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
836 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
837 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
838 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
839 setOperationAction(ISD::SPLAT_VECTOR, VT, Expand);
840 }
841
842 // Constrained floating-point operations default to expand.
843 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
844 setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
845 #include "llvm/IR/ConstrainedOps.def"
846
847 // For most targets @llvm.get.dynamic.area.offset just returns 0.
848 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
849
850 // Vector reduction default to expand.
851 setOperationAction(ISD::VECREDUCE_FADD, VT, Expand);
852 setOperationAction(ISD::VECREDUCE_FMUL, VT, Expand);
853 setOperationAction(ISD::VECREDUCE_ADD, VT, Expand);
854 setOperationAction(ISD::VECREDUCE_MUL, VT, Expand);
855 setOperationAction(ISD::VECREDUCE_AND, VT, Expand);
856 setOperationAction(ISD::VECREDUCE_OR, VT, Expand);
857 setOperationAction(ISD::VECREDUCE_XOR, VT, Expand);
858 setOperationAction(ISD::VECREDUCE_SMAX, VT, Expand);
859 setOperationAction(ISD::VECREDUCE_SMIN, VT, Expand);
860 setOperationAction(ISD::VECREDUCE_UMAX, VT, Expand);
861 setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand);
862 setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand);
863 setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand);
864 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand);
865 setOperationAction(ISD::VECREDUCE_SEQ_FMUL, VT, Expand);
866
867 // Named vector shuffles default to expand.
868 setOperationAction(ISD::VECTOR_SPLICE, VT, Expand);
869 }
870
871 // Most targets ignore the @llvm.prefetch intrinsic.
872 setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
873
874 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
875 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
876
877 // ConstantFP nodes default to expand. Targets can either change this to
878 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
879 // to optimize expansions for certain constants.
880 setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
881 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
882 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
883 setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
884 setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
885
886 // These library functions default to expand.
887 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
888 setOperationAction(ISD::FCBRT, VT, Expand);
889 setOperationAction(ISD::FLOG , VT, Expand);
890 setOperationAction(ISD::FLOG2, VT, Expand);
891 setOperationAction(ISD::FLOG10, VT, Expand);
892 setOperationAction(ISD::FEXP , VT, Expand);
893 setOperationAction(ISD::FEXP2, VT, Expand);
894 setOperationAction(ISD::FFLOOR, VT, Expand);
895 setOperationAction(ISD::FNEARBYINT, VT, Expand);
896 setOperationAction(ISD::FCEIL, VT, Expand);
897 setOperationAction(ISD::FRINT, VT, Expand);
898 setOperationAction(ISD::FTRUNC, VT, Expand);
899 setOperationAction(ISD::FROUND, VT, Expand);
900 setOperationAction(ISD::FROUNDEVEN, VT, Expand);
901 setOperationAction(ISD::LROUND, VT, Expand);
902 setOperationAction(ISD::LLROUND, VT, Expand);
903 setOperationAction(ISD::LRINT, VT, Expand);
904 setOperationAction(ISD::LLRINT, VT, Expand);
905 }
906
907 // Default ISD::TRAP to expand (which turns it into abort).
908 setOperationAction(ISD::TRAP, MVT::Other, Expand);
909
910 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
911 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
912 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
913
914 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
915 }
916
getScalarShiftAmountTy(const DataLayout & DL,EVT) const917 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
918 EVT) const {
919 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
920 }
921
getShiftAmountTy(EVT LHSTy,const DataLayout & DL,bool LegalTypes) const922 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
923 bool LegalTypes) const {
924 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
925 if (LHSTy.isVector())
926 return LHSTy;
927 return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy)
928 : getPointerTy(DL);
929 }
930
canOpTrap(unsigned Op,EVT VT) const931 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
932 assert(isTypeLegal(VT));
933 switch (Op) {
934 default:
935 return false;
936 case ISD::SDIV:
937 case ISD::UDIV:
938 case ISD::SREM:
939 case ISD::UREM:
940 return true;
941 }
942 }
943
isFreeAddrSpaceCast(unsigned SrcAS,unsigned DestAS) const944 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS,
945 unsigned DestAS) const {
946 return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
947 }
948
setJumpIsExpensive(bool isExpensive)949 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
950 // If the command-line option was specified, ignore this request.
951 if (!JumpIsExpensiveOverride.getNumOccurrences())
952 JumpIsExpensive = isExpensive;
953 }
954
955 TargetLoweringBase::LegalizeKind
getTypeConversion(LLVMContext & Context,EVT VT) const956 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
957 // If this is a simple type, use the ComputeRegisterProp mechanism.
958 if (VT.isSimple()) {
959 MVT SVT = VT.getSimpleVT();
960 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
961 MVT NVT = TransformToType[SVT.SimpleTy];
962 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
963
964 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
965 LA == TypeSoftPromoteHalf ||
966 (NVT.isVector() ||
967 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) &&
968 "Promote may not follow Expand or Promote");
969
970 if (LA == TypeSplitVector)
971 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context));
972 if (LA == TypeScalarizeVector)
973 return LegalizeKind(LA, SVT.getVectorElementType());
974 return LegalizeKind(LA, NVT);
975 }
976
977 // Handle Extended Scalar Types.
978 if (!VT.isVector()) {
979 assert(VT.isInteger() && "Float types must be simple");
980 unsigned BitSize = VT.getSizeInBits();
981 // First promote to a power-of-two size, then expand if necessary.
982 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
983 EVT NVT = VT.getRoundIntegerType(Context);
984 assert(NVT != VT && "Unable to round integer VT");
985 LegalizeKind NextStep = getTypeConversion(Context, NVT);
986 // Avoid multi-step promotion.
987 if (NextStep.first == TypePromoteInteger)
988 return NextStep;
989 // Return rounded integer type.
990 return LegalizeKind(TypePromoteInteger, NVT);
991 }
992
993 return LegalizeKind(TypeExpandInteger,
994 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
995 }
996
997 // Handle vector types.
998 ElementCount NumElts = VT.getVectorElementCount();
999 EVT EltVT = VT.getVectorElementType();
1000
1001 // Vectors with only one element are always scalarized.
1002 if (NumElts.isScalar())
1003 return LegalizeKind(TypeScalarizeVector, EltVT);
1004
1005 // Try to widen vector elements until the element type is a power of two and
1006 // promote it to a legal type later on, for example:
1007 // <3 x i8> -> <4 x i8> -> <4 x i32>
1008 if (EltVT.isInteger()) {
1009 // Vectors with a number of elements that is not a power of two are always
1010 // widened, for example <3 x i8> -> <4 x i8>.
1011 if (!VT.isPow2VectorType()) {
1012 NumElts = NumElts.coefficientNextPowerOf2();
1013 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1014 return LegalizeKind(TypeWidenVector, NVT);
1015 }
1016
1017 // Examine the element type.
1018 LegalizeKind LK = getTypeConversion(Context, EltVT);
1019
1020 // If type is to be expanded, split the vector.
1021 // <4 x i140> -> <2 x i140>
1022 if (LK.first == TypeExpandInteger) {
1023 if (VT.getVectorElementCount().isScalable())
1024 return LegalizeKind(TypeScalarizeScalableVector, EltVT);
1025 return LegalizeKind(TypeSplitVector,
1026 VT.getHalfNumVectorElementsVT(Context));
1027 }
1028
1029 // Promote the integer element types until a legal vector type is found
1030 // or until the element integer type is too big. If a legal type was not
1031 // found, fallback to the usual mechanism of widening/splitting the
1032 // vector.
1033 EVT OldEltVT = EltVT;
1034 while (true) {
1035 // Increase the bitwidth of the element to the next pow-of-two
1036 // (which is greater than 8 bits).
1037 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1038 .getRoundIntegerType(Context);
1039
1040 // Stop trying when getting a non-simple element type.
1041 // Note that vector elements may be greater than legal vector element
1042 // types. Example: X86 XMM registers hold 64bit element on 32bit
1043 // systems.
1044 if (!EltVT.isSimple())
1045 break;
1046
1047 // Build a new vector type and check if it is legal.
1048 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1049 // Found a legal promoted vector type.
1050 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1051 return LegalizeKind(TypePromoteInteger,
1052 EVT::getVectorVT(Context, EltVT, NumElts));
1053 }
1054
1055 // Reset the type to the unexpanded type if we did not find a legal vector
1056 // type with a promoted vector element type.
1057 EltVT = OldEltVT;
1058 }
1059
1060 // Try to widen the vector until a legal type is found.
1061 // If there is no wider legal type, split the vector.
1062 while (true) {
1063 // Round up to the next power of 2.
1064 NumElts = NumElts.coefficientNextPowerOf2();
1065
1066 // If there is no simple vector type with this many elements then there
1067 // cannot be a larger legal vector type. Note that this assumes that
1068 // there are no skipped intermediate vector types in the simple types.
1069 if (!EltVT.isSimple())
1070 break;
1071 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1072 if (LargerVector == MVT())
1073 break;
1074
1075 // If this type is legal then widen the vector.
1076 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1077 return LegalizeKind(TypeWidenVector, LargerVector);
1078 }
1079
1080 // Widen odd vectors to next power of two.
1081 if (!VT.isPow2VectorType()) {
1082 EVT NVT = VT.getPow2VectorType(Context);
1083 return LegalizeKind(TypeWidenVector, NVT);
1084 }
1085
1086 if (VT.getVectorElementCount() == ElementCount::getScalable(1))
1087 return LegalizeKind(TypeScalarizeScalableVector, EltVT);
1088
1089 // Vectors with illegal element types are expanded.
1090 EVT NVT = EVT::getVectorVT(Context, EltVT,
1091 VT.getVectorElementCount().divideCoefficientBy(2));
1092 return LegalizeKind(TypeSplitVector, NVT);
1093 }
1094
getVectorTypeBreakdownMVT(MVT VT,MVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT,TargetLoweringBase * TLI)1095 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1096 unsigned &NumIntermediates,
1097 MVT &RegisterVT,
1098 TargetLoweringBase *TLI) {
1099 // Figure out the right, legal destination reg to copy into.
1100 ElementCount EC = VT.getVectorElementCount();
1101 MVT EltTy = VT.getVectorElementType();
1102
1103 unsigned NumVectorRegs = 1;
1104
1105 // Scalable vectors cannot be scalarized, so splitting or widening is
1106 // required.
1107 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue()))
1108 llvm_unreachable(
1109 "Splitting or widening of non-power-of-2 MVTs is not implemented.");
1110
1111 // FIXME: We don't support non-power-of-2-sized vectors for now.
1112 // Ideally we could break down into LHS/RHS like LegalizeDAG does.
1113 if (!isPowerOf2_32(EC.getKnownMinValue())) {
1114 // Split EC to unit size (scalable property is preserved).
1115 NumVectorRegs = EC.getKnownMinValue();
1116 EC = ElementCount::getFixed(1);
1117 }
1118
1119 // Divide the input until we get to a supported size. This will
1120 // always end up with an EC that represent a scalar or a scalable
1121 // scalar.
1122 while (EC.getKnownMinValue() > 1 &&
1123 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
1124 EC = EC.divideCoefficientBy(2);
1125 NumVectorRegs <<= 1;
1126 }
1127
1128 NumIntermediates = NumVectorRegs;
1129
1130 MVT NewVT = MVT::getVectorVT(EltTy, EC);
1131 if (!TLI->isTypeLegal(NewVT))
1132 NewVT = EltTy;
1133 IntermediateVT = NewVT;
1134
1135 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits();
1136
1137 // Convert sizes such as i33 to i64.
1138 if (!isPowerOf2_32(LaneSizeInBits))
1139 LaneSizeInBits = NextPowerOf2(LaneSizeInBits);
1140
1141 MVT DestVT = TLI->getRegisterType(NewVT);
1142 RegisterVT = DestVT;
1143 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1144 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits());
1145
1146 // Otherwise, promotion or legal types use the same number of registers as
1147 // the vector decimated to the appropriate level.
1148 return NumVectorRegs;
1149 }
1150
1151 /// isLegalRC - Return true if the value types that can be represented by the
1152 /// specified register class are all legal.
isLegalRC(const TargetRegisterInfo & TRI,const TargetRegisterClass & RC) const1153 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
1154 const TargetRegisterClass &RC) const {
1155 for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1156 if (isTypeLegal(*I))
1157 return true;
1158 return false;
1159 }
1160
1161 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1162 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1163 MachineBasicBlock *
emitPatchPoint(MachineInstr & InitialMI,MachineBasicBlock * MBB) const1164 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
1165 MachineBasicBlock *MBB) const {
1166 MachineInstr *MI = &InitialMI;
1167 MachineFunction &MF = *MI->getMF();
1168 MachineFrameInfo &MFI = MF.getFrameInfo();
1169
1170 // We're handling multiple types of operands here:
1171 // PATCHPOINT MetaArgs - live-in, read only, direct
1172 // STATEPOINT Deopt Spill - live-through, read only, indirect
1173 // STATEPOINT Deopt Alloca - live-through, read only, direct
1174 // (We're currently conservative and mark the deopt slots read/write in
1175 // practice.)
1176 // STATEPOINT GC Spill - live-through, read/write, indirect
1177 // STATEPOINT GC Alloca - live-through, read/write, direct
1178 // The live-in vs live-through is handled already (the live through ones are
1179 // all stack slots), but we need to handle the different type of stackmap
1180 // operands and memory effects here.
1181
1182 if (!llvm::any_of(MI->operands(),
1183 [](MachineOperand &Operand) { return Operand.isFI(); }))
1184 return MBB;
1185
1186 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1187
1188 // Inherit previous memory operands.
1189 MIB.cloneMemRefs(*MI);
1190
1191 for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
1192 MachineOperand &MO = MI->getOperand(i);
1193 if (!MO.isFI()) {
1194 // Index of Def operand this Use it tied to.
1195 // Since Defs are coming before Uses, if Use is tied, then
1196 // index of Def must be smaller that index of that Use.
1197 // Also, Defs preserve their position in new MI.
1198 unsigned TiedTo = i;
1199 if (MO.isReg() && MO.isTied())
1200 TiedTo = MI->findTiedOperandIdx(i);
1201 MIB.add(MO);
1202 if (TiedTo < i)
1203 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1);
1204 continue;
1205 }
1206
1207 // foldMemoryOperand builds a new MI after replacing a single FI operand
1208 // with the canonical set of five x86 addressing-mode operands.
1209 int FI = MO.getIndex();
1210
1211 // Add frame index operands recognized by stackmaps.cpp
1212 if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
1213 // indirect-mem-ref tag, size, #FI, offset.
1214 // Used for spills inserted by StatepointLowering. This codepath is not
1215 // used for patchpoints/stackmaps at all, for these spilling is done via
1216 // foldMemoryOperand callback only.
1217 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1218 MIB.addImm(StackMaps::IndirectMemRefOp);
1219 MIB.addImm(MFI.getObjectSize(FI));
1220 MIB.add(MO);
1221 MIB.addImm(0);
1222 } else {
1223 // direct-mem-ref tag, #FI, offset.
1224 // Used by patchpoint, and direct alloca arguments to statepoints
1225 MIB.addImm(StackMaps::DirectMemRefOp);
1226 MIB.add(MO);
1227 MIB.addImm(0);
1228 }
1229
1230 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1231
1232 // Add a new memory operand for this FI.
1233 assert(MFI.getObjectOffset(FI) != -1);
1234
1235 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and
1236 // PATCHPOINT should be updated to do the same. (TODO)
1237 if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
1238 auto Flags = MachineMemOperand::MOLoad;
1239 MachineMemOperand *MMO = MF.getMachineMemOperand(
1240 MachinePointerInfo::getFixedStack(MF, FI), Flags,
1241 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI));
1242 MIB->addMemOperand(MF, MMO);
1243 }
1244 }
1245 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1246 MI->eraseFromParent();
1247 return MBB;
1248 }
1249
1250 /// findRepresentativeClass - Return the largest legal super-reg register class
1251 /// of the register class for the specified type and its associated "cost".
1252 // This function is in TargetLowering because it uses RegClassForVT which would
1253 // need to be moved to TargetRegisterInfo and would necessitate moving
1254 // isTypeLegal over as well - a massive change that would just require
1255 // TargetLowering having a TargetRegisterInfo class member that it would use.
1256 std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo * TRI,MVT VT) const1257 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1258 MVT VT) const {
1259 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1260 if (!RC)
1261 return std::make_pair(RC, 0);
1262
1263 // Compute the set of all super-register classes.
1264 BitVector SuperRegRC(TRI->getNumRegClasses());
1265 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1266 SuperRegRC.setBitsInMask(RCI.getMask());
1267
1268 // Find the first legal register class with the largest spill size.
1269 const TargetRegisterClass *BestRC = RC;
1270 for (unsigned i : SuperRegRC.set_bits()) {
1271 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1272 // We want the largest possible spill size.
1273 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1274 continue;
1275 if (!isLegalRC(*TRI, *SuperRC))
1276 continue;
1277 BestRC = SuperRC;
1278 }
1279 return std::make_pair(BestRC, 1);
1280 }
1281
1282 /// computeRegisterProperties - Once all of the register classes are added,
1283 /// this allows us to compute derived properties we expose.
computeRegisterProperties(const TargetRegisterInfo * TRI)1284 void TargetLoweringBase::computeRegisterProperties(
1285 const TargetRegisterInfo *TRI) {
1286 static_assert(MVT::VALUETYPE_SIZE <= MVT::MAX_ALLOWED_VALUETYPE,
1287 "Too many value types for ValueTypeActions to hold!");
1288
1289 // Everything defaults to needing one register.
1290 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1291 NumRegistersForVT[i] = 1;
1292 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1293 }
1294 // ...except isVoid, which doesn't need any registers.
1295 NumRegistersForVT[MVT::isVoid] = 0;
1296
1297 // Find the largest integer register class.
1298 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1299 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1300 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1301
1302 // Every integer value type larger than this largest register takes twice as
1303 // many registers to represent as the previous ValueType.
1304 for (unsigned ExpandedReg = LargestIntReg + 1;
1305 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1306 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1307 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1308 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1309 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1310 TypeExpandInteger);
1311 }
1312
1313 // Inspect all of the ValueType's smaller than the largest integer
1314 // register to see which ones need promotion.
1315 unsigned LegalIntReg = LargestIntReg;
1316 for (unsigned IntReg = LargestIntReg - 1;
1317 IntReg >= (unsigned)MVT::i1; --IntReg) {
1318 MVT IVT = (MVT::SimpleValueType)IntReg;
1319 if (isTypeLegal(IVT)) {
1320 LegalIntReg = IntReg;
1321 } else {
1322 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1323 (MVT::SimpleValueType)LegalIntReg;
1324 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1325 }
1326 }
1327
1328 // ppcf128 type is really two f64's.
1329 if (!isTypeLegal(MVT::ppcf128)) {
1330 if (isTypeLegal(MVT::f64)) {
1331 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1332 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1333 TransformToType[MVT::ppcf128] = MVT::f64;
1334 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1335 } else {
1336 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1337 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1338 TransformToType[MVT::ppcf128] = MVT::i128;
1339 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1340 }
1341 }
1342
1343 // Decide how to handle f128. If the target does not have native f128 support,
1344 // expand it to i128 and we will be generating soft float library calls.
1345 if (!isTypeLegal(MVT::f128)) {
1346 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1347 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1348 TransformToType[MVT::f128] = MVT::i128;
1349 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1350 }
1351
1352 // Decide how to handle f64. If the target does not have native f64 support,
1353 // expand it to i64 and we will be generating soft float library calls.
1354 if (!isTypeLegal(MVT::f64)) {
1355 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1356 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1357 TransformToType[MVT::f64] = MVT::i64;
1358 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1359 }
1360
1361 // Decide how to handle f32. If the target does not have native f32 support,
1362 // expand it to i32 and we will be generating soft float library calls.
1363 if (!isTypeLegal(MVT::f32)) {
1364 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1365 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1366 TransformToType[MVT::f32] = MVT::i32;
1367 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1368 }
1369
1370 // Decide how to handle f16. If the target does not have native f16 support,
1371 // promote it to f32, because there are no f16 library calls (except for
1372 // conversions).
1373 if (!isTypeLegal(MVT::f16)) {
1374 // Allow targets to control how we legalize half.
1375 if (softPromoteHalfType()) {
1376 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1377 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1378 TransformToType[MVT::f16] = MVT::f32;
1379 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf);
1380 } else {
1381 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1382 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1383 TransformToType[MVT::f16] = MVT::f32;
1384 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1385 }
1386 }
1387
1388 // Loop over all of the vector value types to see which need transformations.
1389 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1390 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1391 MVT VT = (MVT::SimpleValueType) i;
1392 if (isTypeLegal(VT))
1393 continue;
1394
1395 MVT EltVT = VT.getVectorElementType();
1396 ElementCount EC = VT.getVectorElementCount();
1397 bool IsLegalWiderType = false;
1398 bool IsScalable = VT.isScalableVector();
1399 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1400 switch (PreferredAction) {
1401 case TypePromoteInteger: {
1402 MVT::SimpleValueType EndVT = IsScalable ?
1403 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE :
1404 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE;
1405 // Try to promote the elements of integer vectors. If no legal
1406 // promotion was found, fall through to the widen-vector method.
1407 for (unsigned nVT = i + 1;
1408 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) {
1409 MVT SVT = (MVT::SimpleValueType) nVT;
1410 // Promote vectors of integers to vectors with the same number
1411 // of elements, with a wider element type.
1412 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() &&
1413 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
1414 TransformToType[i] = SVT;
1415 RegisterTypeForVT[i] = SVT;
1416 NumRegistersForVT[i] = 1;
1417 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1418 IsLegalWiderType = true;
1419 break;
1420 }
1421 }
1422 if (IsLegalWiderType)
1423 break;
1424 LLVM_FALLTHROUGH;
1425 }
1426
1427 case TypeWidenVector:
1428 if (isPowerOf2_32(EC.getKnownMinValue())) {
1429 // Try to widen the vector.
1430 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1431 MVT SVT = (MVT::SimpleValueType) nVT;
1432 if (SVT.getVectorElementType() == EltVT &&
1433 SVT.isScalableVector() == IsScalable &&
1434 SVT.getVectorElementCount().getKnownMinValue() >
1435 EC.getKnownMinValue() &&
1436 isTypeLegal(SVT)) {
1437 TransformToType[i] = SVT;
1438 RegisterTypeForVT[i] = SVT;
1439 NumRegistersForVT[i] = 1;
1440 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1441 IsLegalWiderType = true;
1442 break;
1443 }
1444 }
1445 if (IsLegalWiderType)
1446 break;
1447 } else {
1448 // Only widen to the next power of 2 to keep consistency with EVT.
1449 MVT NVT = VT.getPow2VectorType();
1450 if (isTypeLegal(NVT)) {
1451 TransformToType[i] = NVT;
1452 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1453 RegisterTypeForVT[i] = NVT;
1454 NumRegistersForVT[i] = 1;
1455 break;
1456 }
1457 }
1458 LLVM_FALLTHROUGH;
1459
1460 case TypeSplitVector:
1461 case TypeScalarizeVector: {
1462 MVT IntermediateVT;
1463 MVT RegisterVT;
1464 unsigned NumIntermediates;
1465 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1466 NumIntermediates, RegisterVT, this);
1467 NumRegistersForVT[i] = NumRegisters;
1468 assert(NumRegistersForVT[i] == NumRegisters &&
1469 "NumRegistersForVT size cannot represent NumRegisters!");
1470 RegisterTypeForVT[i] = RegisterVT;
1471
1472 MVT NVT = VT.getPow2VectorType();
1473 if (NVT == VT) {
1474 // Type is already a power of 2. The default action is to split.
1475 TransformToType[i] = MVT::Other;
1476 if (PreferredAction == TypeScalarizeVector)
1477 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1478 else if (PreferredAction == TypeSplitVector)
1479 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1480 else if (EC.getKnownMinValue() > 1)
1481 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1482 else
1483 ValueTypeActions.setTypeAction(VT, EC.isScalable()
1484 ? TypeScalarizeScalableVector
1485 : TypeScalarizeVector);
1486 } else {
1487 TransformToType[i] = NVT;
1488 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1489 }
1490 break;
1491 }
1492 default:
1493 llvm_unreachable("Unknown vector legalization action!");
1494 }
1495 }
1496
1497 // Determine the 'representative' register class for each value type.
1498 // An representative register class is the largest (meaning one which is
1499 // not a sub-register class / subreg register class) legal register class for
1500 // a group of value types. For example, on i386, i8, i16, and i32
1501 // representative would be GR32; while on x86_64 it's GR64.
1502 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1503 const TargetRegisterClass* RRC;
1504 uint8_t Cost;
1505 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1506 RepRegClassForVT[i] = RRC;
1507 RepRegClassCostForVT[i] = Cost;
1508 }
1509 }
1510
getSetCCResultType(const DataLayout & DL,LLVMContext &,EVT VT) const1511 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1512 EVT VT) const {
1513 assert(!VT.isVector() && "No default SetCC type for vectors!");
1514 return getPointerTy(DL).SimpleTy;
1515 }
1516
getCmpLibcallReturnType() const1517 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1518 return MVT::i32; // return the default value
1519 }
1520
1521 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1522 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1523 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1524 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1525 ///
1526 /// This method returns the number of registers needed, and the VT for each
1527 /// register. It also returns the VT and quantity of the intermediate values
1528 /// before they are promoted/expanded.
getVectorTypeBreakdown(LLVMContext & Context,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT) const1529 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context,
1530 EVT VT, EVT &IntermediateVT,
1531 unsigned &NumIntermediates,
1532 MVT &RegisterVT) const {
1533 ElementCount EltCnt = VT.getVectorElementCount();
1534
1535 // If there is a wider vector type with the same element type as this one,
1536 // or a promoted vector type that has the same number of elements which
1537 // are wider, then we should convert to that legal vector type.
1538 // This handles things like <2 x float> -> <4 x float> and
1539 // <4 x i1> -> <4 x i32>.
1540 LegalizeTypeAction TA = getTypeAction(Context, VT);
1541 if (!EltCnt.isScalar() &&
1542 (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1543 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1544 if (isTypeLegal(RegisterEVT)) {
1545 IntermediateVT = RegisterEVT;
1546 RegisterVT = RegisterEVT.getSimpleVT();
1547 NumIntermediates = 1;
1548 return 1;
1549 }
1550 }
1551
1552 // Figure out the right, legal destination reg to copy into.
1553 EVT EltTy = VT.getVectorElementType();
1554
1555 unsigned NumVectorRegs = 1;
1556
1557 // Scalable vectors cannot be scalarized, so handle the legalisation of the
1558 // types like done elsewhere in SelectionDAG.
1559 if (VT.isScalableVector() && !isPowerOf2_32(EltCnt.getKnownMinValue())) {
1560 LegalizeKind LK;
1561 EVT PartVT = VT;
1562 do {
1563 // Iterate until we've found a legal (part) type to hold VT.
1564 LK = getTypeConversion(Context, PartVT);
1565 PartVT = LK.second;
1566 } while (LK.first != TypeLegal);
1567
1568 NumIntermediates = VT.getVectorElementCount().getKnownMinValue() /
1569 PartVT.getVectorElementCount().getKnownMinValue();
1570
1571 // FIXME: This code needs to be extended to handle more complex vector
1572 // breakdowns, like nxv7i64 -> nxv8i64 -> 4 x nxv2i64. Currently the only
1573 // supported cases are vectors that are broken down into equal parts
1574 // such as nxv6i64 -> 3 x nxv2i64.
1575 assert((PartVT.getVectorElementCount() * NumIntermediates) ==
1576 VT.getVectorElementCount() &&
1577 "Expected an integer multiple of PartVT");
1578 IntermediateVT = PartVT;
1579 RegisterVT = getRegisterType(Context, IntermediateVT);
1580 return NumIntermediates;
1581 }
1582
1583 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally
1584 // we could break down into LHS/RHS like LegalizeDAG does.
1585 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) {
1586 NumVectorRegs = EltCnt.getKnownMinValue();
1587 EltCnt = ElementCount::getFixed(1);
1588 }
1589
1590 // Divide the input until we get to a supported size. This will always
1591 // end with a scalar if the target doesn't support vectors.
1592 while (EltCnt.getKnownMinValue() > 1 &&
1593 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
1594 EltCnt = EltCnt.divideCoefficientBy(2);
1595 NumVectorRegs <<= 1;
1596 }
1597
1598 NumIntermediates = NumVectorRegs;
1599
1600 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt);
1601 if (!isTypeLegal(NewVT))
1602 NewVT = EltTy;
1603 IntermediateVT = NewVT;
1604
1605 MVT DestVT = getRegisterType(Context, NewVT);
1606 RegisterVT = DestVT;
1607
1608 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16.
1609 TypeSize NewVTSize = NewVT.getSizeInBits();
1610 // Convert sizes such as i33 to i64.
1611 if (!isPowerOf2_32(NewVTSize.getKnownMinSize()))
1612 NewVTSize = NewVTSize.coefficientNextPowerOf2();
1613 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1614 }
1615
1616 // Otherwise, promotion or legal types use the same number of registers as
1617 // the vector decimated to the appropriate level.
1618 return NumVectorRegs;
1619 }
1620
isSuitableForJumpTable(const SwitchInst * SI,uint64_t NumCases,uint64_t Range,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI) const1621 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI,
1622 uint64_t NumCases,
1623 uint64_t Range,
1624 ProfileSummaryInfo *PSI,
1625 BlockFrequencyInfo *BFI) const {
1626 // FIXME: This function check the maximum table size and density, but the
1627 // minimum size is not checked. It would be nice if the minimum size is
1628 // also combined within this function. Currently, the minimum size check is
1629 // performed in findJumpTable() in SelectionDAGBuiler and
1630 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
1631 const bool OptForSize =
1632 SI->getParent()->getParent()->hasOptSize() ||
1633 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI);
1634 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
1635 const unsigned MaxJumpTableSize = getMaximumJumpTableSize();
1636
1637 // Check whether the number of cases is small enough and
1638 // the range is dense enough for a jump table.
1639 return (OptForSize || Range <= MaxJumpTableSize) &&
1640 (NumCases * 100 >= Range * MinDensity);
1641 }
1642
1643 /// Get the EVTs and ArgFlags collections that represent the legalized return
1644 /// type of the given function. This does not require a DAG or a return value,
1645 /// and is suitable for use before any DAGs for the function are constructed.
1646 /// TODO: Move this out of TargetLowering.cpp.
GetReturnInfo(CallingConv::ID CC,Type * ReturnType,AttributeList attr,SmallVectorImpl<ISD::OutputArg> & Outs,const TargetLowering & TLI,const DataLayout & DL)1647 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
1648 AttributeList attr,
1649 SmallVectorImpl<ISD::OutputArg> &Outs,
1650 const TargetLowering &TLI, const DataLayout &DL) {
1651 SmallVector<EVT, 4> ValueVTs;
1652 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1653 unsigned NumValues = ValueVTs.size();
1654 if (NumValues == 0) return;
1655
1656 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1657 EVT VT = ValueVTs[j];
1658 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1659
1660 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1661 ExtendKind = ISD::SIGN_EXTEND;
1662 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1663 ExtendKind = ISD::ZERO_EXTEND;
1664
1665 // FIXME: C calling convention requires the return type to be promoted to
1666 // at least 32-bit. But this is not necessary for non-C calling
1667 // conventions. The frontend should mark functions whose return values
1668 // require promoting with signext or zeroext attributes.
1669 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1670 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1671 if (VT.bitsLT(MinVT))
1672 VT = MinVT;
1673 }
1674
1675 unsigned NumParts =
1676 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
1677 MVT PartVT =
1678 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
1679
1680 // 'inreg' on function refers to return value
1681 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1682 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1683 Flags.setInReg();
1684
1685 // Propagate extension type if any
1686 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1687 Flags.setSExt();
1688 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1689 Flags.setZExt();
1690
1691 for (unsigned i = 0; i < NumParts; ++i)
1692 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0));
1693 }
1694 }
1695
1696 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1697 /// function arguments in the caller parameter area. This is the actual
1698 /// alignment, not its logarithm.
getByValTypeAlignment(Type * Ty,const DataLayout & DL) const1699 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1700 const DataLayout &DL) const {
1701 return DL.getABITypeAlign(Ty).value();
1702 }
1703
allowsMemoryAccessForAlignment(LLVMContext & Context,const DataLayout & DL,EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,bool * Fast) const1704 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1705 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
1706 Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const {
1707 // Check if the specified alignment is sufficient based on the data layout.
1708 // TODO: While using the data layout works in practice, a better solution
1709 // would be to implement this check directly (make this a virtual function).
1710 // For example, the ABI alignment may change based on software platform while
1711 // this function should only be affected by hardware implementation.
1712 Type *Ty = VT.getTypeForEVT(Context);
1713 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) {
1714 // Assume that an access that meets the ABI-specified alignment is fast.
1715 if (Fast != nullptr)
1716 *Fast = true;
1717 return true;
1718 }
1719
1720 // This is a misaligned access.
1721 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
1722 }
1723
allowsMemoryAccessForAlignment(LLVMContext & Context,const DataLayout & DL,EVT VT,const MachineMemOperand & MMO,bool * Fast) const1724 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1725 LLVMContext &Context, const DataLayout &DL, EVT VT,
1726 const MachineMemOperand &MMO, bool *Fast) const {
1727 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
1728 MMO.getAlign(), MMO.getFlags(), Fast);
1729 }
1730
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,bool * Fast) const1731 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1732 const DataLayout &DL, EVT VT,
1733 unsigned AddrSpace, Align Alignment,
1734 MachineMemOperand::Flags Flags,
1735 bool *Fast) const {
1736 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
1737 Flags, Fast);
1738 }
1739
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,const MachineMemOperand & MMO,bool * Fast) const1740 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1741 const DataLayout &DL, EVT VT,
1742 const MachineMemOperand &MMO,
1743 bool *Fast) const {
1744 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1745 MMO.getFlags(), Fast);
1746 }
1747
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,LLT Ty,const MachineMemOperand & MMO,bool * Fast) const1748 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1749 const DataLayout &DL, LLT Ty,
1750 const MachineMemOperand &MMO,
1751 bool *Fast) const {
1752 return allowsMemoryAccess(Context, DL, getMVTForLLT(Ty), MMO.getAddrSpace(),
1753 MMO.getAlign(), MMO.getFlags(), Fast);
1754 }
1755
1756 //===----------------------------------------------------------------------===//
1757 // TargetTransformInfo Helpers
1758 //===----------------------------------------------------------------------===//
1759
InstructionOpcodeToISD(unsigned Opcode) const1760 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1761 enum InstructionOpcodes {
1762 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1763 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1764 #include "llvm/IR/Instruction.def"
1765 };
1766 switch (static_cast<InstructionOpcodes>(Opcode)) {
1767 case Ret: return 0;
1768 case Br: return 0;
1769 case Switch: return 0;
1770 case IndirectBr: return 0;
1771 case Invoke: return 0;
1772 case CallBr: return 0;
1773 case Resume: return 0;
1774 case Unreachable: return 0;
1775 case CleanupRet: return 0;
1776 case CatchRet: return 0;
1777 case CatchPad: return 0;
1778 case CatchSwitch: return 0;
1779 case CleanupPad: return 0;
1780 case FNeg: return ISD::FNEG;
1781 case Add: return ISD::ADD;
1782 case FAdd: return ISD::FADD;
1783 case Sub: return ISD::SUB;
1784 case FSub: return ISD::FSUB;
1785 case Mul: return ISD::MUL;
1786 case FMul: return ISD::FMUL;
1787 case UDiv: return ISD::UDIV;
1788 case SDiv: return ISD::SDIV;
1789 case FDiv: return ISD::FDIV;
1790 case URem: return ISD::UREM;
1791 case SRem: return ISD::SREM;
1792 case FRem: return ISD::FREM;
1793 case Shl: return ISD::SHL;
1794 case LShr: return ISD::SRL;
1795 case AShr: return ISD::SRA;
1796 case And: return ISD::AND;
1797 case Or: return ISD::OR;
1798 case Xor: return ISD::XOR;
1799 case Alloca: return 0;
1800 case Load: return ISD::LOAD;
1801 case Store: return ISD::STORE;
1802 case GetElementPtr: return 0;
1803 case Fence: return 0;
1804 case AtomicCmpXchg: return 0;
1805 case AtomicRMW: return 0;
1806 case Trunc: return ISD::TRUNCATE;
1807 case ZExt: return ISD::ZERO_EXTEND;
1808 case SExt: return ISD::SIGN_EXTEND;
1809 case FPToUI: return ISD::FP_TO_UINT;
1810 case FPToSI: return ISD::FP_TO_SINT;
1811 case UIToFP: return ISD::UINT_TO_FP;
1812 case SIToFP: return ISD::SINT_TO_FP;
1813 case FPTrunc: return ISD::FP_ROUND;
1814 case FPExt: return ISD::FP_EXTEND;
1815 case PtrToInt: return ISD::BITCAST;
1816 case IntToPtr: return ISD::BITCAST;
1817 case BitCast: return ISD::BITCAST;
1818 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1819 case ICmp: return ISD::SETCC;
1820 case FCmp: return ISD::SETCC;
1821 case PHI: return 0;
1822 case Call: return 0;
1823 case Select: return ISD::SELECT;
1824 case UserOp1: return 0;
1825 case UserOp2: return 0;
1826 case VAArg: return 0;
1827 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1828 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1829 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1830 case ExtractValue: return ISD::MERGE_VALUES;
1831 case InsertValue: return ISD::MERGE_VALUES;
1832 case LandingPad: return 0;
1833 case Freeze: return ISD::FREEZE;
1834 }
1835
1836 llvm_unreachable("Unknown instruction type encountered!");
1837 }
1838
1839 std::pair<InstructionCost, MVT>
getTypeLegalizationCost(const DataLayout & DL,Type * Ty) const1840 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
1841 Type *Ty) const {
1842 LLVMContext &C = Ty->getContext();
1843 EVT MTy = getValueType(DL, Ty);
1844
1845 InstructionCost Cost = 1;
1846 // We keep legalizing the type until we find a legal kind. We assume that
1847 // the only operation that costs anything is the split. After splitting
1848 // we need to handle two types.
1849 while (true) {
1850 LegalizeKind LK = getTypeConversion(C, MTy);
1851
1852 if (LK.first == TypeScalarizeScalableVector)
1853 return std::make_pair(InstructionCost::getInvalid(), MVT::getVT(Ty));
1854
1855 if (LK.first == TypeLegal)
1856 return std::make_pair(Cost, MTy.getSimpleVT());
1857
1858 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1859 Cost *= 2;
1860
1861 // Do not loop with f128 type.
1862 if (MTy == LK.second)
1863 return std::make_pair(Cost, MTy.getSimpleVT());
1864
1865 // Keep legalizing the type.
1866 MTy = LK.second;
1867 }
1868 }
1869
1870 Value *
getDefaultSafeStackPointerLocation(IRBuilderBase & IRB,bool UseTLS) const1871 TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilderBase &IRB,
1872 bool UseTLS) const {
1873 // compiler-rt provides a variable with a magic name. Targets that do not
1874 // link with compiler-rt may also provide such a variable.
1875 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1876 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1877 auto UnsafeStackPtr =
1878 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1879
1880 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1881
1882 if (!UnsafeStackPtr) {
1883 auto TLSModel = UseTLS ?
1884 GlobalValue::InitialExecTLSModel :
1885 GlobalValue::NotThreadLocal;
1886 // The global variable is not defined yet, define it ourselves.
1887 // We use the initial-exec TLS model because we do not support the
1888 // variable living anywhere other than in the main executable.
1889 UnsafeStackPtr = new GlobalVariable(
1890 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1891 UnsafeStackPtrVar, nullptr, TLSModel);
1892 } else {
1893 // The variable exists, check its type and attributes.
1894 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1895 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1896 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1897 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1898 (UseTLS ? "" : "not ") + "be thread-local");
1899 }
1900 return UnsafeStackPtr;
1901 }
1902
1903 Value *
getSafeStackPointerLocation(IRBuilderBase & IRB) const1904 TargetLoweringBase::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
1905 if (!TM.getTargetTriple().isAndroid())
1906 return getDefaultSafeStackPointerLocation(IRB, true);
1907
1908 // Android provides a libc function to retrieve the address of the current
1909 // thread's unsafe stack pointer.
1910 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1911 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1912 FunctionCallee Fn = M->getOrInsertFunction("__safestack_pointer_address",
1913 StackPtrTy->getPointerTo(0));
1914 return IRB.CreateCall(Fn);
1915 }
1916
1917 //===----------------------------------------------------------------------===//
1918 // Loop Strength Reduction hooks
1919 //===----------------------------------------------------------------------===//
1920
1921 /// isLegalAddressingMode - Return true if the addressing mode represented
1922 /// by AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const1923 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1924 const AddrMode &AM, Type *Ty,
1925 unsigned AS, Instruction *I) const {
1926 // The default implementation of this implements a conservative RISCy, r+r and
1927 // r+i addr mode.
1928
1929 // Allows a sign-extended 16-bit immediate field.
1930 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1931 return false;
1932
1933 // No global is ever allowed as a base.
1934 if (AM.BaseGV)
1935 return false;
1936
1937 // Only support r+r,
1938 switch (AM.Scale) {
1939 case 0: // "r+i" or just "i", depending on HasBaseReg.
1940 break;
1941 case 1:
1942 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1943 return false;
1944 // Otherwise we have r+r or r+i.
1945 break;
1946 case 2:
1947 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1948 return false;
1949 // Allow 2*r as r+r.
1950 break;
1951 default: // Don't allow n * r
1952 return false;
1953 }
1954
1955 return true;
1956 }
1957
1958 //===----------------------------------------------------------------------===//
1959 // Stack Protector
1960 //===----------------------------------------------------------------------===//
1961
1962 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1963 // so that SelectionDAG handle SSP.
getIRStackGuard(IRBuilderBase & IRB) const1964 Value *TargetLoweringBase::getIRStackGuard(IRBuilderBase &IRB) const {
1965 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1966 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1967 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
1968 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy);
1969 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C))
1970 G->setVisibility(GlobalValue::HiddenVisibility);
1971 return C;
1972 }
1973 return nullptr;
1974 }
1975
1976 // Currently only support "standard" __stack_chk_guard.
1977 // TODO: add LOAD_STACK_GUARD support.
insertSSPDeclarations(Module & M) const1978 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
1979 if (!M.getNamedValue("__stack_chk_guard")) {
1980 auto *GV = new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
1981 GlobalVariable::ExternalLinkage, nullptr,
1982 "__stack_chk_guard");
1983 if (TM.getRelocationModel() == Reloc::Static &&
1984 !TM.getTargetTriple().isWindowsGNUEnvironment())
1985 GV->setDSOLocal(true);
1986 }
1987 }
1988
1989 // Currently only support "standard" __stack_chk_guard.
1990 // TODO: add LOAD_STACK_GUARD support.
getSDagStackGuard(const Module & M) const1991 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
1992 return M.getNamedValue("__stack_chk_guard");
1993 }
1994
getSSPStackGuardCheck(const Module & M) const1995 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
1996 return nullptr;
1997 }
1998
getMinimumJumpTableEntries() const1999 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
2000 return MinimumJumpTableEntries;
2001 }
2002
setMinimumJumpTableEntries(unsigned Val)2003 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
2004 MinimumJumpTableEntries = Val;
2005 }
2006
getMinimumJumpTableDensity(bool OptForSize) const2007 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
2008 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
2009 }
2010
getMaximumJumpTableSize() const2011 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
2012 return MaximumJumpTableSize;
2013 }
2014
setMaximumJumpTableSize(unsigned Val)2015 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
2016 MaximumJumpTableSize = Val;
2017 }
2018
isJumpTableRelative() const2019 bool TargetLoweringBase::isJumpTableRelative() const {
2020 return getTargetMachine().isPositionIndependent();
2021 }
2022
2023 //===----------------------------------------------------------------------===//
2024 // Reciprocal Estimates
2025 //===----------------------------------------------------------------------===//
2026
2027 /// Get the reciprocal estimate attribute string for a function that will
2028 /// override the target defaults.
getRecipEstimateForFunc(MachineFunction & MF)2029 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
2030 const Function &F = MF.getFunction();
2031 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
2032 }
2033
2034 /// Construct a string for the given reciprocal operation of the given type.
2035 /// This string should match the corresponding option to the front-end's
2036 /// "-mrecip" flag assuming those strings have been passed through in an
2037 /// attribute string. For example, "vec-divf" for a division of a vXf32.
getReciprocalOpName(bool IsSqrt,EVT VT)2038 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
2039 std::string Name = VT.isVector() ? "vec-" : "";
2040
2041 Name += IsSqrt ? "sqrt" : "div";
2042
2043 // TODO: Handle "half" or other float types?
2044 if (VT.getScalarType() == MVT::f64) {
2045 Name += "d";
2046 } else {
2047 assert(VT.getScalarType() == MVT::f32 &&
2048 "Unexpected FP type for reciprocal estimate");
2049 Name += "f";
2050 }
2051
2052 return Name;
2053 }
2054
2055 /// Return the character position and value (a single numeric character) of a
2056 /// customized refinement operation in the input string if it exists. Return
2057 /// false if there is no customized refinement step count.
parseRefinementStep(StringRef In,size_t & Position,uint8_t & Value)2058 static bool parseRefinementStep(StringRef In, size_t &Position,
2059 uint8_t &Value) {
2060 const char RefStepToken = ':';
2061 Position = In.find(RefStepToken);
2062 if (Position == StringRef::npos)
2063 return false;
2064
2065 StringRef RefStepString = In.substr(Position + 1);
2066 // Allow exactly one numeric character for the additional refinement
2067 // step parameter.
2068 if (RefStepString.size() == 1) {
2069 char RefStepChar = RefStepString[0];
2070 if (isDigit(RefStepChar)) {
2071 Value = RefStepChar - '0';
2072 return true;
2073 }
2074 }
2075 report_fatal_error("Invalid refinement step for -recip.");
2076 }
2077
2078 /// For the input attribute string, return one of the ReciprocalEstimate enum
2079 /// status values (enabled, disabled, or not specified) for this operation on
2080 /// the specified data type.
getOpEnabled(bool IsSqrt,EVT VT,StringRef Override)2081 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
2082 if (Override.empty())
2083 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2084
2085 SmallVector<StringRef, 4> OverrideVector;
2086 Override.split(OverrideVector, ',');
2087 unsigned NumArgs = OverrideVector.size();
2088
2089 // Check if "all", "none", or "default" was specified.
2090 if (NumArgs == 1) {
2091 // Look for an optional setting of the number of refinement steps needed
2092 // for this type of reciprocal operation.
2093 size_t RefPos;
2094 uint8_t RefSteps;
2095 if (parseRefinementStep(Override, RefPos, RefSteps)) {
2096 // Split the string for further processing.
2097 Override = Override.substr(0, RefPos);
2098 }
2099
2100 // All reciprocal types are enabled.
2101 if (Override == "all")
2102 return TargetLoweringBase::ReciprocalEstimate::Enabled;
2103
2104 // All reciprocal types are disabled.
2105 if (Override == "none")
2106 return TargetLoweringBase::ReciprocalEstimate::Disabled;
2107
2108 // Target defaults for enablement are used.
2109 if (Override == "default")
2110 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2111 }
2112
2113 // The attribute string may omit the size suffix ('f'/'d').
2114 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2115 std::string VTNameNoSize = VTName;
2116 VTNameNoSize.pop_back();
2117 static const char DisabledPrefix = '!';
2118
2119 for (StringRef RecipType : OverrideVector) {
2120 size_t RefPos;
2121 uint8_t RefSteps;
2122 if (parseRefinementStep(RecipType, RefPos, RefSteps))
2123 RecipType = RecipType.substr(0, RefPos);
2124
2125 // Ignore the disablement token for string matching.
2126 bool IsDisabled = RecipType[0] == DisabledPrefix;
2127 if (IsDisabled)
2128 RecipType = RecipType.substr(1);
2129
2130 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2131 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
2132 : TargetLoweringBase::ReciprocalEstimate::Enabled;
2133 }
2134
2135 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2136 }
2137
2138 /// For the input attribute string, return the customized refinement step count
2139 /// for this operation on the specified data type. If the step count does not
2140 /// exist, return the ReciprocalEstimate enum value for unspecified.
getOpRefinementSteps(bool IsSqrt,EVT VT,StringRef Override)2141 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2142 if (Override.empty())
2143 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2144
2145 SmallVector<StringRef, 4> OverrideVector;
2146 Override.split(OverrideVector, ',');
2147 unsigned NumArgs = OverrideVector.size();
2148
2149 // Check if "all", "default", or "none" was specified.
2150 if (NumArgs == 1) {
2151 // Look for an optional setting of the number of refinement steps needed
2152 // for this type of reciprocal operation.
2153 size_t RefPos;
2154 uint8_t RefSteps;
2155 if (!parseRefinementStep(Override, RefPos, RefSteps))
2156 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2157
2158 // Split the string for further processing.
2159 Override = Override.substr(0, RefPos);
2160 assert(Override != "none" &&
2161 "Disabled reciprocals, but specifed refinement steps?");
2162
2163 // If this is a general override, return the specified number of steps.
2164 if (Override == "all" || Override == "default")
2165 return RefSteps;
2166 }
2167
2168 // The attribute string may omit the size suffix ('f'/'d').
2169 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2170 std::string VTNameNoSize = VTName;
2171 VTNameNoSize.pop_back();
2172
2173 for (StringRef RecipType : OverrideVector) {
2174 size_t RefPos;
2175 uint8_t RefSteps;
2176 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2177 continue;
2178
2179 RecipType = RecipType.substr(0, RefPos);
2180 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2181 return RefSteps;
2182 }
2183
2184 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2185 }
2186
getRecipEstimateSqrtEnabled(EVT VT,MachineFunction & MF) const2187 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
2188 MachineFunction &MF) const {
2189 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2190 }
2191
getRecipEstimateDivEnabled(EVT VT,MachineFunction & MF) const2192 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
2193 MachineFunction &MF) const {
2194 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2195 }
2196
getSqrtRefinementSteps(EVT VT,MachineFunction & MF) const2197 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
2198 MachineFunction &MF) const {
2199 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2200 }
2201
getDivRefinementSteps(EVT VT,MachineFunction & MF) const2202 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
2203 MachineFunction &MF) const {
2204 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2205 }
2206
finalizeLowering(MachineFunction & MF) const2207 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
2208 MF.getRegInfo().freezeReservedRegs(MF);
2209 }
2210
2211 MachineMemOperand::Flags
getLoadMemOperandFlags(const LoadInst & LI,const DataLayout & DL) const2212 TargetLoweringBase::getLoadMemOperandFlags(const LoadInst &LI,
2213 const DataLayout &DL) const {
2214 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad;
2215 if (LI.isVolatile())
2216 Flags |= MachineMemOperand::MOVolatile;
2217
2218 if (LI.hasMetadata(LLVMContext::MD_nontemporal))
2219 Flags |= MachineMemOperand::MONonTemporal;
2220
2221 if (LI.hasMetadata(LLVMContext::MD_invariant_load))
2222 Flags |= MachineMemOperand::MOInvariant;
2223
2224 if (isDereferenceablePointer(LI.getPointerOperand(), LI.getType(), DL))
2225 Flags |= MachineMemOperand::MODereferenceable;
2226
2227 Flags |= getTargetMMOFlags(LI);
2228 return Flags;
2229 }
2230
2231 MachineMemOperand::Flags
getStoreMemOperandFlags(const StoreInst & SI,const DataLayout & DL) const2232 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI,
2233 const DataLayout &DL) const {
2234 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore;
2235
2236 if (SI.isVolatile())
2237 Flags |= MachineMemOperand::MOVolatile;
2238
2239 if (SI.hasMetadata(LLVMContext::MD_nontemporal))
2240 Flags |= MachineMemOperand::MONonTemporal;
2241
2242 // FIXME: Not preserving dereferenceable
2243 Flags |= getTargetMMOFlags(SI);
2244 return Flags;
2245 }
2246
2247 MachineMemOperand::Flags
getAtomicMemOperandFlags(const Instruction & AI,const DataLayout & DL) const2248 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI,
2249 const DataLayout &DL) const {
2250 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
2251
2252 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2253 if (RMW->isVolatile())
2254 Flags |= MachineMemOperand::MOVolatile;
2255 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) {
2256 if (CmpX->isVolatile())
2257 Flags |= MachineMemOperand::MOVolatile;
2258 } else
2259 llvm_unreachable("not an atomic instruction");
2260
2261 // FIXME: Not preserving dereferenceable
2262 Flags |= getTargetMMOFlags(AI);
2263 return Flags;
2264 }
2265
emitLeadingFence(IRBuilderBase & Builder,Instruction * Inst,AtomicOrdering Ord) const2266 Instruction *TargetLoweringBase::emitLeadingFence(IRBuilderBase &Builder,
2267 Instruction *Inst,
2268 AtomicOrdering Ord) const {
2269 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
2270 return Builder.CreateFence(Ord);
2271 else
2272 return nullptr;
2273 }
2274
emitTrailingFence(IRBuilderBase & Builder,Instruction * Inst,AtomicOrdering Ord) const2275 Instruction *TargetLoweringBase::emitTrailingFence(IRBuilderBase &Builder,
2276 Instruction *Inst,
2277 AtomicOrdering Ord) const {
2278 if (isAcquireOrStronger(Ord))
2279 return Builder.CreateFence(Ord);
2280 else
2281 return nullptr;
2282 }
2283
2284 //===----------------------------------------------------------------------===//
2285 // GlobalISel Hooks
2286 //===----------------------------------------------------------------------===//
2287
shouldLocalize(const MachineInstr & MI,const TargetTransformInfo * TTI) const2288 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI,
2289 const TargetTransformInfo *TTI) const {
2290 auto &MF = *MI.getMF();
2291 auto &MRI = MF.getRegInfo();
2292 // Assuming a spill and reload of a value has a cost of 1 instruction each,
2293 // this helper function computes the maximum number of uses we should consider
2294 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
2295 // break even in terms of code size when the original MI has 2 users vs
2296 // choosing to potentially spill. Any more than 2 users we we have a net code
2297 // size increase. This doesn't take into account register pressure though.
2298 auto maxUses = [](unsigned RematCost) {
2299 // A cost of 1 means remats are basically free.
2300 if (RematCost == 1)
2301 return UINT_MAX;
2302 if (RematCost == 2)
2303 return 2U;
2304
2305 // Remat is too expensive, only sink if there's one user.
2306 if (RematCost > 2)
2307 return 1U;
2308 llvm_unreachable("Unexpected remat cost");
2309 };
2310
2311 // Helper to walk through uses and terminate if we've reached a limit. Saves
2312 // us spending time traversing uses if all we want to know is if it's >= min.
2313 auto isUsesAtMost = [&](unsigned Reg, unsigned MaxUses) {
2314 unsigned NumUses = 0;
2315 auto UI = MRI.use_instr_nodbg_begin(Reg), UE = MRI.use_instr_nodbg_end();
2316 for (; UI != UE && NumUses < MaxUses; ++UI) {
2317 NumUses++;
2318 }
2319 // If we haven't reached the end yet then there are more than MaxUses users.
2320 return UI == UE;
2321 };
2322
2323 switch (MI.getOpcode()) {
2324 default:
2325 return false;
2326 // Constants-like instructions should be close to their users.
2327 // We don't want long live-ranges for them.
2328 case TargetOpcode::G_CONSTANT:
2329 case TargetOpcode::G_FCONSTANT:
2330 case TargetOpcode::G_FRAME_INDEX:
2331 case TargetOpcode::G_INTTOPTR:
2332 return true;
2333 case TargetOpcode::G_GLOBAL_VALUE: {
2334 unsigned RematCost = TTI->getGISelRematGlobalCost();
2335 Register Reg = MI.getOperand(0).getReg();
2336 unsigned MaxUses = maxUses(RematCost);
2337 if (MaxUses == UINT_MAX)
2338 return true; // Remats are "free" so always localize.
2339 bool B = isUsesAtMost(Reg, MaxUses);
2340 return B;
2341 }
2342 }
2343 }
2344