1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/BlockFrequencyInfo.h"
31 #include "llvm/Analysis/BranchProbabilityInfo.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/EHPersonalities.h"
34 #include "llvm/Analysis/Loads.h"
35 #include "llvm/Analysis/MemoryLocation.h"
36 #include "llvm/Analysis/ProfileSummaryInfo.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/ValueTracking.h"
39 #include "llvm/Analysis/VectorUtils.h"
40 #include "llvm/CodeGen/Analysis.h"
41 #include "llvm/CodeGen/FunctionLoweringInfo.h"
42 #include "llvm/CodeGen/GCMetadata.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/MachineBasicBlock.h"
45 #include "llvm/CodeGen/MachineFrameInfo.h"
46 #include "llvm/CodeGen/MachineFunction.h"
47 #include "llvm/CodeGen/MachineInstr.h"
48 #include "llvm/CodeGen/MachineInstrBuilder.h"
49 #include "llvm/CodeGen/MachineJumpTableInfo.h"
50 #include "llvm/CodeGen/MachineMemOperand.h"
51 #include "llvm/CodeGen/MachineModuleInfo.h"
52 #include "llvm/CodeGen/MachineOperand.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/RuntimeLibcalls.h"
55 #include "llvm/CodeGen/SelectionDAG.h"
56 #include "llvm/CodeGen/SelectionDAGNodes.h"
57 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
58 #include "llvm/CodeGen/StackMaps.h"
59 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
60 #include "llvm/CodeGen/TargetFrameLowering.h"
61 #include "llvm/CodeGen/TargetInstrInfo.h"
62 #include "llvm/CodeGen/TargetLowering.h"
63 #include "llvm/CodeGen/TargetOpcodes.h"
64 #include "llvm/CodeGen/TargetRegisterInfo.h"
65 #include "llvm/CodeGen/TargetSubtargetInfo.h"
66 #include "llvm/CodeGen/ValueTypes.h"
67 #include "llvm/CodeGen/WinEHFuncInfo.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CFG.h"
72 #include "llvm/IR/CallingConv.h"
73 #include "llvm/IR/Constant.h"
74 #include "llvm/IR/ConstantRange.h"
75 #include "llvm/IR/Constants.h"
76 #include "llvm/IR/DataLayout.h"
77 #include "llvm/IR/DebugInfoMetadata.h"
78 #include "llvm/IR/DebugLoc.h"
79 #include "llvm/IR/DerivedTypes.h"
80 #include "llvm/IR/Function.h"
81 #include "llvm/IR/GetElementPtrTypeIterator.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstrTypes.h"
84 #include "llvm/IR/Instruction.h"
85 #include "llvm/IR/Instructions.h"
86 #include "llvm/IR/IntrinsicInst.h"
87 #include "llvm/IR/Intrinsics.h"
88 #include "llvm/IR/IntrinsicsAArch64.h"
89 #include "llvm/IR/IntrinsicsWebAssembly.h"
90 #include "llvm/IR/LLVMContext.h"
91 #include "llvm/IR/Metadata.h"
92 #include "llvm/IR/Module.h"
93 #include "llvm/IR/Operator.h"
94 #include "llvm/IR/PatternMatch.h"
95 #include "llvm/IR/Statepoint.h"
96 #include "llvm/IR/Type.h"
97 #include "llvm/IR/User.h"
98 #include "llvm/IR/Value.h"
99 #include "llvm/MC/MCContext.h"
100 #include "llvm/MC/MCSymbol.h"
101 #include "llvm/Support/AtomicOrdering.h"
102 #include "llvm/Support/BranchProbability.h"
103 #include "llvm/Support/Casting.h"
104 #include "llvm/Support/CodeGen.h"
105 #include "llvm/Support/CommandLine.h"
106 #include "llvm/Support/Compiler.h"
107 #include "llvm/Support/Debug.h"
108 #include "llvm/Support/ErrorHandling.h"
109 #include "llvm/Support/MachineValueType.h"
110 #include "llvm/Support/MathExtras.h"
111 #include "llvm/Support/raw_ostream.h"
112 #include "llvm/Target/TargetIntrinsicInfo.h"
113 #include "llvm/Target/TargetMachine.h"
114 #include "llvm/Target/TargetOptions.h"
115 #include "llvm/Transforms/Utils/Local.h"
116 #include <algorithm>
117 #include <cassert>
118 #include <cstddef>
119 #include <cstdint>
120 #include <cstring>
121 #include <iterator>
122 #include <limits>
123 #include <numeric>
124 #include <tuple>
125 #include <utility>
126 #include <vector>
127
128 using namespace llvm;
129 using namespace PatternMatch;
130 using namespace SwitchCG;
131
132 #define DEBUG_TYPE "isel"
133
134 /// LimitFloatPrecision - Generate low-precision inline sequences for
135 /// some float libcalls (6, 8 or 12 bits).
136 static unsigned LimitFloatPrecision;
137
138 static cl::opt<bool>
139 InsertAssertAlign("insert-assert-align", cl::init(true),
140 cl::desc("Insert the experimental `assertalign` node."),
141 cl::ReallyHidden);
142
143 static cl::opt<unsigned, true>
144 LimitFPPrecision("limit-float-precision",
145 cl::desc("Generate low-precision inline sequences "
146 "for some float libcalls"),
147 cl::location(LimitFloatPrecision), cl::Hidden,
148 cl::init(0));
149
150 static cl::opt<unsigned> SwitchPeelThreshold(
151 "switch-peel-threshold", cl::Hidden, cl::init(66),
152 cl::desc("Set the case probability threshold for peeling the case from a "
153 "switch statement. A value greater than 100 will void this "
154 "optimization"));
155
156 // Limit the width of DAG chains. This is important in general to prevent
157 // DAG-based analysis from blowing up. For example, alias analysis and
158 // load clustering may not complete in reasonable time. It is difficult to
159 // recognize and avoid this situation within each individual analysis, and
160 // future analyses are likely to have the same behavior. Limiting DAG width is
161 // the safe approach and will be especially important with global DAGs.
162 //
163 // MaxParallelChains default is arbitrarily high to avoid affecting
164 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
165 // sequence over this should have been converted to llvm.memcpy by the
166 // frontend. It is easy to induce this behavior with .ll code such as:
167 // %buffer = alloca [4096 x i8]
168 // %data = load [4096 x i8]* %argPtr
169 // store [4096 x i8] %data, [4096 x i8]* %buffer
170 static const unsigned MaxParallelChains = 64;
171
172 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
173 const SDValue *Parts, unsigned NumParts,
174 MVT PartVT, EVT ValueVT, const Value *V,
175 Optional<CallingConv::ID> CC);
176
177 /// getCopyFromParts - Create a value that contains the specified legal parts
178 /// combined into the value they represent. If the parts combine to a type
179 /// larger than ValueVT then AssertOp can be used to specify whether the extra
180 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
181 /// (ISD::AssertSext).
getCopyFromParts(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,const Value * V,Optional<CallingConv::ID> CC=None,Optional<ISD::NodeType> AssertOp=None)182 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
183 const SDValue *Parts, unsigned NumParts,
184 MVT PartVT, EVT ValueVT, const Value *V,
185 Optional<CallingConv::ID> CC = None,
186 Optional<ISD::NodeType> AssertOp = None) {
187 // Let the target assemble the parts if it wants to
188 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
189 if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
190 PartVT, ValueVT, CC))
191 return Val;
192
193 if (ValueVT.isVector())
194 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
195 CC);
196
197 assert(NumParts > 0 && "No parts to assemble!");
198 SDValue Val = Parts[0];
199
200 if (NumParts > 1) {
201 // Assemble the value from multiple parts.
202 if (ValueVT.isInteger()) {
203 unsigned PartBits = PartVT.getSizeInBits();
204 unsigned ValueBits = ValueVT.getSizeInBits();
205
206 // Assemble the power of 2 part.
207 unsigned RoundParts =
208 (NumParts & (NumParts - 1)) ? 1 << Log2_32(NumParts) : NumParts;
209 unsigned RoundBits = PartBits * RoundParts;
210 EVT RoundVT = RoundBits == ValueBits ?
211 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
212 SDValue Lo, Hi;
213
214 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
215
216 if (RoundParts > 2) {
217 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
218 PartVT, HalfVT, V);
219 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
220 RoundParts / 2, PartVT, HalfVT, V);
221 } else {
222 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
223 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
224 }
225
226 if (DAG.getDataLayout().isBigEndian())
227 std::swap(Lo, Hi);
228
229 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
230
231 if (RoundParts < NumParts) {
232 // Assemble the trailing non-power-of-2 part.
233 unsigned OddParts = NumParts - RoundParts;
234 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
235 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
236 OddVT, V, CC);
237
238 // Combine the round and odd parts.
239 Lo = Val;
240 if (DAG.getDataLayout().isBigEndian())
241 std::swap(Lo, Hi);
242 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
243 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
244 Hi =
245 DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
246 DAG.getConstant(Lo.getValueSizeInBits(), DL,
247 TLI.getPointerTy(DAG.getDataLayout())));
248 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
249 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
250 }
251 } else if (PartVT.isFloatingPoint()) {
252 // FP split into multiple FP parts (for ppcf128)
253 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
254 "Unexpected split");
255 SDValue Lo, Hi;
256 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
257 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
258 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
259 std::swap(Lo, Hi);
260 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
261 } else {
262 // FP split into integer parts (soft fp)
263 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
264 !PartVT.isVector() && "Unexpected split");
265 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
266 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
267 }
268 }
269
270 // There is now one part, held in Val. Correct it to match ValueVT.
271 // PartEVT is the type of the register class that holds the value.
272 // ValueVT is the type of the inline asm operation.
273 EVT PartEVT = Val.getValueType();
274
275 if (PartEVT == ValueVT)
276 return Val;
277
278 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
279 ValueVT.bitsLT(PartEVT)) {
280 // For an FP value in an integer part, we need to truncate to the right
281 // width first.
282 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
283 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
284 }
285
286 // Handle types that have the same size.
287 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
288 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
289
290 // Handle types with different sizes.
291 if (PartEVT.isInteger() && ValueVT.isInteger()) {
292 if (ValueVT.bitsLT(PartEVT)) {
293 // For a truncate, see if we have any information to
294 // indicate whether the truncated bits will always be
295 // zero or sign-extension.
296 if (AssertOp.hasValue())
297 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
298 DAG.getValueType(ValueVT));
299 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
300 }
301 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
302 }
303
304 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
305 // FP_ROUND's are always exact here.
306 if (ValueVT.bitsLT(Val.getValueType()))
307 return DAG.getNode(
308 ISD::FP_ROUND, DL, ValueVT, Val,
309 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
310
311 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
312 }
313
314 // Handle MMX to a narrower integer type by bitcasting MMX to integer and
315 // then truncating.
316 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
317 ValueVT.bitsLT(PartEVT)) {
318 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
319 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
320 }
321
322 report_fatal_error("Unknown mismatch in getCopyFromParts!");
323 }
324
diagnosePossiblyInvalidConstraint(LLVMContext & Ctx,const Value * V,const Twine & ErrMsg)325 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
326 const Twine &ErrMsg) {
327 const Instruction *I = dyn_cast_or_null<Instruction>(V);
328 if (!V)
329 return Ctx.emitError(ErrMsg);
330
331 const char *AsmError = ", possible invalid constraint for vector type";
332 if (const CallInst *CI = dyn_cast<CallInst>(I))
333 if (CI->isInlineAsm())
334 return Ctx.emitError(I, ErrMsg + AsmError);
335
336 return Ctx.emitError(I, ErrMsg);
337 }
338
339 /// getCopyFromPartsVector - Create a value that contains the specified legal
340 /// parts combined into the value they represent. If the parts combine to a
341 /// type larger than ValueVT then AssertOp can be used to specify whether the
342 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
343 /// ValueVT (ISD::AssertSext).
getCopyFromPartsVector(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,const Value * V,Optional<CallingConv::ID> CallConv)344 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
345 const SDValue *Parts, unsigned NumParts,
346 MVT PartVT, EVT ValueVT, const Value *V,
347 Optional<CallingConv::ID> CallConv) {
348 assert(ValueVT.isVector() && "Not a vector value");
349 assert(NumParts > 0 && "No parts to assemble!");
350 const bool IsABIRegCopy = CallConv.hasValue();
351
352 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
353 SDValue Val = Parts[0];
354
355 // Handle a multi-element vector.
356 if (NumParts > 1) {
357 EVT IntermediateVT;
358 MVT RegisterVT;
359 unsigned NumIntermediates;
360 unsigned NumRegs;
361
362 if (IsABIRegCopy) {
363 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
364 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
365 NumIntermediates, RegisterVT);
366 } else {
367 NumRegs =
368 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
369 NumIntermediates, RegisterVT);
370 }
371
372 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
373 NumParts = NumRegs; // Silence a compiler warning.
374 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
375 assert(RegisterVT.getSizeInBits() ==
376 Parts[0].getSimpleValueType().getSizeInBits() &&
377 "Part type sizes don't match!");
378
379 // Assemble the parts into intermediate operands.
380 SmallVector<SDValue, 8> Ops(NumIntermediates);
381 if (NumIntermediates == NumParts) {
382 // If the register was not expanded, truncate or copy the value,
383 // as appropriate.
384 for (unsigned i = 0; i != NumParts; ++i)
385 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
386 PartVT, IntermediateVT, V, CallConv);
387 } else if (NumParts > 0) {
388 // If the intermediate type was expanded, build the intermediate
389 // operands from the parts.
390 assert(NumParts % NumIntermediates == 0 &&
391 "Must expand into a divisible number of parts!");
392 unsigned Factor = NumParts / NumIntermediates;
393 for (unsigned i = 0; i != NumIntermediates; ++i)
394 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
395 PartVT, IntermediateVT, V, CallConv);
396 }
397
398 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
399 // intermediate operands.
400 EVT BuiltVectorTy =
401 IntermediateVT.isVector()
402 ? EVT::getVectorVT(
403 *DAG.getContext(), IntermediateVT.getScalarType(),
404 IntermediateVT.getVectorElementCount() * NumParts)
405 : EVT::getVectorVT(*DAG.getContext(),
406 IntermediateVT.getScalarType(),
407 NumIntermediates);
408 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
409 : ISD::BUILD_VECTOR,
410 DL, BuiltVectorTy, Ops);
411 }
412
413 // There is now one part, held in Val. Correct it to match ValueVT.
414 EVT PartEVT = Val.getValueType();
415
416 if (PartEVT == ValueVT)
417 return Val;
418
419 if (PartEVT.isVector()) {
420 // If the element type of the source/dest vectors are the same, but the
421 // parts vector has more elements than the value vector, then we have a
422 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
423 // elements we want.
424 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
425 assert((PartEVT.getVectorElementCount().Min >
426 ValueVT.getVectorElementCount().Min) &&
427 (PartEVT.getVectorElementCount().Scalable ==
428 ValueVT.getVectorElementCount().Scalable) &&
429 "Cannot narrow, it would be a lossy transformation");
430 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
431 DAG.getVectorIdxConstant(0, DL));
432 }
433
434 // Vector/Vector bitcast.
435 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
436 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
437
438 assert(PartEVT.getVectorElementCount() == ValueVT.getVectorElementCount() &&
439 "Cannot handle this kind of promotion");
440 // Promoted vector extract
441 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
442
443 }
444
445 // Trivial bitcast if the types are the same size and the destination
446 // vector type is legal.
447 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
448 TLI.isTypeLegal(ValueVT))
449 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
450
451 if (ValueVT.getVectorNumElements() != 1) {
452 // Certain ABIs require that vectors are passed as integers. For vectors
453 // are the same size, this is an obvious bitcast.
454 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
455 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
456 } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
457 // Bitcast Val back the original type and extract the corresponding
458 // vector we want.
459 unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
460 EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
461 ValueVT.getVectorElementType(), Elts);
462 Val = DAG.getBitcast(WiderVecType, Val);
463 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
464 DAG.getVectorIdxConstant(0, DL));
465 }
466
467 diagnosePossiblyInvalidConstraint(
468 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
469 return DAG.getUNDEF(ValueVT);
470 }
471
472 // Handle cases such as i8 -> <1 x i1>
473 EVT ValueSVT = ValueVT.getVectorElementType();
474 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
475 if (ValueSVT.getSizeInBits() == PartEVT.getSizeInBits())
476 Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
477 else
478 Val = ValueVT.isFloatingPoint()
479 ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
480 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
481 }
482
483 return DAG.getBuildVector(ValueVT, DL, Val);
484 }
485
486 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
487 SDValue Val, SDValue *Parts, unsigned NumParts,
488 MVT PartVT, const Value *V,
489 Optional<CallingConv::ID> CallConv);
490
491 /// getCopyToParts - Create a series of nodes that contain the specified value
492 /// split into legal parts. If the parts contain more bits than Val, then, for
493 /// integers, ExtendKind can be used to specify how to generate the extra bits.
getCopyToParts(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,const Value * V,Optional<CallingConv::ID> CallConv=None,ISD::NodeType ExtendKind=ISD::ANY_EXTEND)494 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
495 SDValue *Parts, unsigned NumParts, MVT PartVT,
496 const Value *V,
497 Optional<CallingConv::ID> CallConv = None,
498 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
499 // Let the target split the parts if it wants to
500 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
501 if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
502 CallConv))
503 return;
504 EVT ValueVT = Val.getValueType();
505
506 // Handle the vector case separately.
507 if (ValueVT.isVector())
508 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
509 CallConv);
510
511 unsigned PartBits = PartVT.getSizeInBits();
512 unsigned OrigNumParts = NumParts;
513 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
514 "Copying to an illegal type!");
515
516 if (NumParts == 0)
517 return;
518
519 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
520 EVT PartEVT = PartVT;
521 if (PartEVT == ValueVT) {
522 assert(NumParts == 1 && "No-op copy with multiple parts!");
523 Parts[0] = Val;
524 return;
525 }
526
527 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
528 // If the parts cover more bits than the value has, promote the value.
529 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
530 assert(NumParts == 1 && "Do not know what to promote to!");
531 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
532 } else {
533 if (ValueVT.isFloatingPoint()) {
534 // FP values need to be bitcast, then extended if they are being put
535 // into a larger container.
536 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
537 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
538 }
539 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
540 ValueVT.isInteger() &&
541 "Unknown mismatch!");
542 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
543 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
544 if (PartVT == MVT::x86mmx)
545 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
546 }
547 } else if (PartBits == ValueVT.getSizeInBits()) {
548 // Different types of the same size.
549 assert(NumParts == 1 && PartEVT != ValueVT);
550 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
551 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
552 // If the parts cover less bits than value has, truncate the value.
553 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
554 ValueVT.isInteger() &&
555 "Unknown mismatch!");
556 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
557 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
558 if (PartVT == MVT::x86mmx)
559 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
560 }
561
562 // The value may have changed - recompute ValueVT.
563 ValueVT = Val.getValueType();
564 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
565 "Failed to tile the value with PartVT!");
566
567 if (NumParts == 1) {
568 if (PartEVT != ValueVT) {
569 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
570 "scalar-to-vector conversion failed");
571 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
572 }
573
574 Parts[0] = Val;
575 return;
576 }
577
578 // Expand the value into multiple parts.
579 if (NumParts & (NumParts - 1)) {
580 // The number of parts is not a power of 2. Split off and copy the tail.
581 assert(PartVT.isInteger() && ValueVT.isInteger() &&
582 "Do not know what to expand to!");
583 unsigned RoundParts = 1 << Log2_32(NumParts);
584 unsigned RoundBits = RoundParts * PartBits;
585 unsigned OddParts = NumParts - RoundParts;
586 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
587 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false));
588
589 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
590 CallConv);
591
592 if (DAG.getDataLayout().isBigEndian())
593 // The odd parts were reversed by getCopyToParts - unreverse them.
594 std::reverse(Parts + RoundParts, Parts + NumParts);
595
596 NumParts = RoundParts;
597 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
598 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
599 }
600
601 // The number of parts is a power of 2. Repeatedly bisect the value using
602 // EXTRACT_ELEMENT.
603 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
604 EVT::getIntegerVT(*DAG.getContext(),
605 ValueVT.getSizeInBits()),
606 Val);
607
608 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
609 for (unsigned i = 0; i < NumParts; i += StepSize) {
610 unsigned ThisBits = StepSize * PartBits / 2;
611 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
612 SDValue &Part0 = Parts[i];
613 SDValue &Part1 = Parts[i+StepSize/2];
614
615 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
616 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
617 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
618 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
619
620 if (ThisBits == PartBits && ThisVT != PartVT) {
621 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
622 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
623 }
624 }
625 }
626
627 if (DAG.getDataLayout().isBigEndian())
628 std::reverse(Parts, Parts + OrigNumParts);
629 }
630
widenVectorToPartType(SelectionDAG & DAG,SDValue Val,const SDLoc & DL,EVT PartVT)631 static SDValue widenVectorToPartType(SelectionDAG &DAG,
632 SDValue Val, const SDLoc &DL, EVT PartVT) {
633 if (!PartVT.isFixedLengthVector())
634 return SDValue();
635
636 EVT ValueVT = Val.getValueType();
637 unsigned PartNumElts = PartVT.getVectorNumElements();
638 unsigned ValueNumElts = ValueVT.getVectorNumElements();
639 if (PartNumElts > ValueNumElts &&
640 PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
641 EVT ElementVT = PartVT.getVectorElementType();
642 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
643 // undef elements.
644 SmallVector<SDValue, 16> Ops;
645 DAG.ExtractVectorElements(Val, Ops);
646 SDValue EltUndef = DAG.getUNDEF(ElementVT);
647 for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
648 Ops.push_back(EltUndef);
649
650 // FIXME: Use CONCAT for 2x -> 4x.
651 return DAG.getBuildVector(PartVT, DL, Ops);
652 }
653
654 return SDValue();
655 }
656
657 /// getCopyToPartsVector - Create a series of nodes that contain the specified
658 /// value split into legal parts.
getCopyToPartsVector(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,const Value * V,Optional<CallingConv::ID> CallConv)659 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
660 SDValue Val, SDValue *Parts, unsigned NumParts,
661 MVT PartVT, const Value *V,
662 Optional<CallingConv::ID> CallConv) {
663 EVT ValueVT = Val.getValueType();
664 assert(ValueVT.isVector() && "Not a vector");
665 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
666 const bool IsABIRegCopy = CallConv.hasValue();
667
668 if (NumParts == 1) {
669 EVT PartEVT = PartVT;
670 if (PartEVT == ValueVT) {
671 // Nothing to do.
672 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
673 // Bitconvert vector->vector case.
674 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
675 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
676 Val = Widened;
677 } else if (PartVT.isVector() &&
678 PartEVT.getVectorElementType().bitsGE(
679 ValueVT.getVectorElementType()) &&
680 PartEVT.getVectorElementCount() ==
681 ValueVT.getVectorElementCount()) {
682
683 // Promoted vector extract
684 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
685 } else {
686 if (ValueVT.getVectorNumElements() == 1) {
687 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
688 DAG.getVectorIdxConstant(0, DL));
689 } else {
690 assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
691 "lossy conversion of vector to scalar type");
692 EVT IntermediateType =
693 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
694 Val = DAG.getBitcast(IntermediateType, Val);
695 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
696 }
697 }
698
699 assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
700 Parts[0] = Val;
701 return;
702 }
703
704 // Handle a multi-element vector.
705 EVT IntermediateVT;
706 MVT RegisterVT;
707 unsigned NumIntermediates;
708 unsigned NumRegs;
709 if (IsABIRegCopy) {
710 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
711 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
712 NumIntermediates, RegisterVT);
713 } else {
714 NumRegs =
715 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
716 NumIntermediates, RegisterVT);
717 }
718
719 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
720 NumParts = NumRegs; // Silence a compiler warning.
721 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
722
723 assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
724 "Mixing scalable and fixed vectors when copying in parts");
725
726 ElementCount DestEltCnt;
727
728 if (IntermediateVT.isVector())
729 DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
730 else
731 DestEltCnt = ElementCount(NumIntermediates, false);
732
733 EVT BuiltVectorTy = EVT::getVectorVT(
734 *DAG.getContext(), IntermediateVT.getScalarType(), DestEltCnt);
735 if (ValueVT != BuiltVectorTy) {
736 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
737 Val = Widened;
738
739 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
740 }
741
742 // Split the vector into intermediate operands.
743 SmallVector<SDValue, 8> Ops(NumIntermediates);
744 for (unsigned i = 0; i != NumIntermediates; ++i) {
745 if (IntermediateVT.isVector()) {
746 // This does something sensible for scalable vectors - see the
747 // definition of EXTRACT_SUBVECTOR for further details.
748 unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
749 Ops[i] =
750 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
751 DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
752 } else {
753 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
754 DAG.getVectorIdxConstant(i, DL));
755 }
756 }
757
758 // Split the intermediate operands into legal parts.
759 if (NumParts == NumIntermediates) {
760 // If the register was not expanded, promote or copy the value,
761 // as appropriate.
762 for (unsigned i = 0; i != NumParts; ++i)
763 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
764 } else if (NumParts > 0) {
765 // If the intermediate type was expanded, split each the value into
766 // legal parts.
767 assert(NumIntermediates != 0 && "division by zero");
768 assert(NumParts % NumIntermediates == 0 &&
769 "Must expand into a divisible number of parts!");
770 unsigned Factor = NumParts / NumIntermediates;
771 for (unsigned i = 0; i != NumIntermediates; ++i)
772 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
773 CallConv);
774 }
775 }
776
RegsForValue(const SmallVector<unsigned,4> & regs,MVT regvt,EVT valuevt,Optional<CallingConv::ID> CC)777 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt,
778 EVT valuevt, Optional<CallingConv::ID> CC)
779 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
780 RegCount(1, regs.size()), CallConv(CC) {}
781
RegsForValue(LLVMContext & Context,const TargetLowering & TLI,const DataLayout & DL,unsigned Reg,Type * Ty,Optional<CallingConv::ID> CC)782 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
783 const DataLayout &DL, unsigned Reg, Type *Ty,
784 Optional<CallingConv::ID> CC) {
785 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
786
787 CallConv = CC;
788
789 for (EVT ValueVT : ValueVTs) {
790 unsigned NumRegs =
791 isABIMangled()
792 ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
793 : TLI.getNumRegisters(Context, ValueVT);
794 MVT RegisterVT =
795 isABIMangled()
796 ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
797 : TLI.getRegisterType(Context, ValueVT);
798 for (unsigned i = 0; i != NumRegs; ++i)
799 Regs.push_back(Reg + i);
800 RegVTs.push_back(RegisterVT);
801 RegCount.push_back(NumRegs);
802 Reg += NumRegs;
803 }
804 }
805
getCopyFromRegs(SelectionDAG & DAG,FunctionLoweringInfo & FuncInfo,const SDLoc & dl,SDValue & Chain,SDValue * Flag,const Value * V) const806 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
807 FunctionLoweringInfo &FuncInfo,
808 const SDLoc &dl, SDValue &Chain,
809 SDValue *Flag, const Value *V) const {
810 // A Value with type {} or [0 x %t] needs no registers.
811 if (ValueVTs.empty())
812 return SDValue();
813
814 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
815
816 // Assemble the legal parts into the final values.
817 SmallVector<SDValue, 4> Values(ValueVTs.size());
818 SmallVector<SDValue, 8> Parts;
819 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
820 // Copy the legal parts from the registers.
821 EVT ValueVT = ValueVTs[Value];
822 unsigned NumRegs = RegCount[Value];
823 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
824 *DAG.getContext(),
825 CallConv.getValue(), RegVTs[Value])
826 : RegVTs[Value];
827
828 Parts.resize(NumRegs);
829 for (unsigned i = 0; i != NumRegs; ++i) {
830 SDValue P;
831 if (!Flag) {
832 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
833 } else {
834 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
835 *Flag = P.getValue(2);
836 }
837
838 Chain = P.getValue(1);
839 Parts[i] = P;
840
841 // If the source register was virtual and if we know something about it,
842 // add an assert node.
843 if (!Register::isVirtualRegister(Regs[Part + i]) ||
844 !RegisterVT.isInteger())
845 continue;
846
847 const FunctionLoweringInfo::LiveOutInfo *LOI =
848 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
849 if (!LOI)
850 continue;
851
852 unsigned RegSize = RegisterVT.getScalarSizeInBits();
853 unsigned NumSignBits = LOI->NumSignBits;
854 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
855
856 if (NumZeroBits == RegSize) {
857 // The current value is a zero.
858 // Explicitly express that as it would be easier for
859 // optimizations to kick in.
860 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
861 continue;
862 }
863
864 // FIXME: We capture more information than the dag can represent. For
865 // now, just use the tightest assertzext/assertsext possible.
866 bool isSExt;
867 EVT FromVT(MVT::Other);
868 if (NumZeroBits) {
869 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
870 isSExt = false;
871 } else if (NumSignBits > 1) {
872 FromVT =
873 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
874 isSExt = true;
875 } else {
876 continue;
877 }
878 // Add an assertion node.
879 assert(FromVT != MVT::Other);
880 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
881 RegisterVT, P, DAG.getValueType(FromVT));
882 }
883
884 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
885 RegisterVT, ValueVT, V, CallConv);
886 Part += NumRegs;
887 Parts.clear();
888 }
889
890 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
891 }
892
getCopyToRegs(SDValue Val,SelectionDAG & DAG,const SDLoc & dl,SDValue & Chain,SDValue * Flag,const Value * V,ISD::NodeType PreferredExtendType) const893 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
894 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
895 const Value *V,
896 ISD::NodeType PreferredExtendType) const {
897 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
898 ISD::NodeType ExtendKind = PreferredExtendType;
899
900 // Get the list of the values's legal parts.
901 unsigned NumRegs = Regs.size();
902 SmallVector<SDValue, 8> Parts(NumRegs);
903 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
904 unsigned NumParts = RegCount[Value];
905
906 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
907 *DAG.getContext(),
908 CallConv.getValue(), RegVTs[Value])
909 : RegVTs[Value];
910
911 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
912 ExtendKind = ISD::ZERO_EXTEND;
913
914 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
915 NumParts, RegisterVT, V, CallConv, ExtendKind);
916 Part += NumParts;
917 }
918
919 // Copy the parts into the registers.
920 SmallVector<SDValue, 8> Chains(NumRegs);
921 for (unsigned i = 0; i != NumRegs; ++i) {
922 SDValue Part;
923 if (!Flag) {
924 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
925 } else {
926 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
927 *Flag = Part.getValue(1);
928 }
929
930 Chains[i] = Part.getValue(0);
931 }
932
933 if (NumRegs == 1 || Flag)
934 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
935 // flagged to it. That is the CopyToReg nodes and the user are considered
936 // a single scheduling unit. If we create a TokenFactor and return it as
937 // chain, then the TokenFactor is both a predecessor (operand) of the
938 // user as well as a successor (the TF operands are flagged to the user).
939 // c1, f1 = CopyToReg
940 // c2, f2 = CopyToReg
941 // c3 = TokenFactor c1, c2
942 // ...
943 // = op c3, ..., f2
944 Chain = Chains[NumRegs-1];
945 else
946 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
947 }
948
AddInlineAsmOperands(unsigned Code,bool HasMatching,unsigned MatchingIdx,const SDLoc & dl,SelectionDAG & DAG,std::vector<SDValue> & Ops) const949 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
950 unsigned MatchingIdx, const SDLoc &dl,
951 SelectionDAG &DAG,
952 std::vector<SDValue> &Ops) const {
953 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
954
955 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
956 if (HasMatching)
957 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
958 else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
959 // Put the register class of the virtual registers in the flag word. That
960 // way, later passes can recompute register class constraints for inline
961 // assembly as well as normal instructions.
962 // Don't do this for tied operands that can use the regclass information
963 // from the def.
964 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
965 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
966 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
967 }
968
969 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
970 Ops.push_back(Res);
971
972 if (Code == InlineAsm::Kind_Clobber) {
973 // Clobbers should always have a 1:1 mapping with registers, and may
974 // reference registers that have illegal (e.g. vector) types. Hence, we
975 // shouldn't try to apply any sort of splitting logic to them.
976 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
977 "No 1:1 mapping from clobbers to regs?");
978 unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
979 (void)SP;
980 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
981 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
982 assert(
983 (Regs[I] != SP ||
984 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
985 "If we clobbered the stack pointer, MFI should know about it.");
986 }
987 return;
988 }
989
990 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
991 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
992 MVT RegisterVT = RegVTs[Value];
993 for (unsigned i = 0; i != NumRegs; ++i) {
994 assert(Reg < Regs.size() && "Mismatch in # registers expected");
995 unsigned TheReg = Regs[Reg++];
996 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
997 }
998 }
999 }
1000
1001 SmallVector<std::pair<unsigned, unsigned>, 4>
getRegsAndSizes() const1002 RegsForValue::getRegsAndSizes() const {
1003 SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
1004 unsigned I = 0;
1005 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1006 unsigned RegCount = std::get<0>(CountAndVT);
1007 MVT RegisterVT = std::get<1>(CountAndVT);
1008 unsigned RegisterSize = RegisterVT.getSizeInBits();
1009 for (unsigned E = I + RegCount; I != E; ++I)
1010 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1011 }
1012 return OutVec;
1013 }
1014
init(GCFunctionInfo * gfi,AliasAnalysis * aa,const TargetLibraryInfo * li)1015 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1016 const TargetLibraryInfo *li) {
1017 AA = aa;
1018 GFI = gfi;
1019 LibInfo = li;
1020 DL = &DAG.getDataLayout();
1021 Context = DAG.getContext();
1022 LPadToCallSiteMap.clear();
1023 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1024 }
1025
clear()1026 void SelectionDAGBuilder::clear() {
1027 NodeMap.clear();
1028 UnusedArgNodeMap.clear();
1029 PendingLoads.clear();
1030 PendingExports.clear();
1031 PendingConstrainedFP.clear();
1032 PendingConstrainedFPStrict.clear();
1033 CurInst = nullptr;
1034 HasTailCall = false;
1035 SDNodeOrder = LowestSDNodeOrder;
1036 StatepointLowering.clear();
1037 }
1038
clearDanglingDebugInfo()1039 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1040 DanglingDebugInfoMap.clear();
1041 }
1042
1043 // Update DAG root to include dependencies on Pending chains.
updateRoot(SmallVectorImpl<SDValue> & Pending)1044 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1045 SDValue Root = DAG.getRoot();
1046
1047 if (Pending.empty())
1048 return Root;
1049
1050 // Add current root to PendingChains, unless we already indirectly
1051 // depend on it.
1052 if (Root.getOpcode() != ISD::EntryToken) {
1053 unsigned i = 0, e = Pending.size();
1054 for (; i != e; ++i) {
1055 assert(Pending[i].getNode()->getNumOperands() > 1);
1056 if (Pending[i].getNode()->getOperand(0) == Root)
1057 break; // Don't add the root if we already indirectly depend on it.
1058 }
1059
1060 if (i == e)
1061 Pending.push_back(Root);
1062 }
1063
1064 if (Pending.size() == 1)
1065 Root = Pending[0];
1066 else
1067 Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1068
1069 DAG.setRoot(Root);
1070 Pending.clear();
1071 return Root;
1072 }
1073
getMemoryRoot()1074 SDValue SelectionDAGBuilder::getMemoryRoot() {
1075 return updateRoot(PendingLoads);
1076 }
1077
getRoot()1078 SDValue SelectionDAGBuilder::getRoot() {
1079 // Chain up all pending constrained intrinsics together with all
1080 // pending loads, by simply appending them to PendingLoads and
1081 // then calling getMemoryRoot().
1082 PendingLoads.reserve(PendingLoads.size() +
1083 PendingConstrainedFP.size() +
1084 PendingConstrainedFPStrict.size());
1085 PendingLoads.append(PendingConstrainedFP.begin(),
1086 PendingConstrainedFP.end());
1087 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1088 PendingConstrainedFPStrict.end());
1089 PendingConstrainedFP.clear();
1090 PendingConstrainedFPStrict.clear();
1091 return getMemoryRoot();
1092 }
1093
getControlRoot()1094 SDValue SelectionDAGBuilder::getControlRoot() {
1095 // We need to emit pending fpexcept.strict constrained intrinsics,
1096 // so append them to the PendingExports list.
1097 PendingExports.append(PendingConstrainedFPStrict.begin(),
1098 PendingConstrainedFPStrict.end());
1099 PendingConstrainedFPStrict.clear();
1100 return updateRoot(PendingExports);
1101 }
1102
visit(const Instruction & I)1103 void SelectionDAGBuilder::visit(const Instruction &I) {
1104 // Set up outgoing PHI node register values before emitting the terminator.
1105 if (I.isTerminator()) {
1106 HandlePHINodesInSuccessorBlocks(I.getParent());
1107 }
1108
1109 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1110 if (!isa<DbgInfoIntrinsic>(I))
1111 ++SDNodeOrder;
1112
1113 CurInst = &I;
1114
1115 visit(I.getOpcode(), I);
1116
1117 if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1118 // ConstrainedFPIntrinsics handle their own FMF.
1119 if (!isa<ConstrainedFPIntrinsic>(&I)) {
1120 // Propagate the fast-math-flags of this IR instruction to the DAG node that
1121 // maps to this instruction.
1122 // TODO: We could handle all flags (nsw, etc) here.
1123 // TODO: If an IR instruction maps to >1 node, only the final node will have
1124 // flags set.
1125 if (SDNode *Node = getNodeForIRValue(&I)) {
1126 SDNodeFlags IncomingFlags;
1127 IncomingFlags.copyFMF(*FPMO);
1128 if (!Node->getFlags().isDefined())
1129 Node->setFlags(IncomingFlags);
1130 else
1131 Node->intersectFlagsWith(IncomingFlags);
1132 }
1133 }
1134 }
1135
1136 if (!I.isTerminator() && !HasTailCall &&
1137 !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1138 CopyToExportRegsIfNeeded(&I);
1139
1140 CurInst = nullptr;
1141 }
1142
visitPHI(const PHINode &)1143 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1144 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1145 }
1146
visit(unsigned Opcode,const User & I)1147 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1148 // Note: this doesn't use InstVisitor, because it has to work with
1149 // ConstantExpr's in addition to instructions.
1150 switch (Opcode) {
1151 default: llvm_unreachable("Unknown instruction type encountered!");
1152 // Build the switch statement using the Instruction.def file.
1153 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1154 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1155 #include "llvm/IR/Instruction.def"
1156 }
1157 }
1158
dropDanglingDebugInfo(const DILocalVariable * Variable,const DIExpression * Expr)1159 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1160 const DIExpression *Expr) {
1161 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1162 const DbgValueInst *DI = DDI.getDI();
1163 DIVariable *DanglingVariable = DI->getVariable();
1164 DIExpression *DanglingExpr = DI->getExpression();
1165 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1166 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
1167 return true;
1168 }
1169 return false;
1170 };
1171
1172 for (auto &DDIMI : DanglingDebugInfoMap) {
1173 DanglingDebugInfoVector &DDIV = DDIMI.second;
1174
1175 // If debug info is to be dropped, run it through final checks to see
1176 // whether it can be salvaged.
1177 for (auto &DDI : DDIV)
1178 if (isMatchingDbgValue(DDI))
1179 salvageUnresolvedDbgValue(DDI);
1180
1181 DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1182 }
1183 }
1184
1185 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1186 // generate the debug data structures now that we've seen its definition.
resolveDanglingDebugInfo(const Value * V,SDValue Val)1187 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1188 SDValue Val) {
1189 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1190 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1191 return;
1192
1193 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1194 for (auto &DDI : DDIV) {
1195 const DbgValueInst *DI = DDI.getDI();
1196 assert(DI && "Ill-formed DanglingDebugInfo");
1197 DebugLoc dl = DDI.getdl();
1198 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1199 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1200 DILocalVariable *Variable = DI->getVariable();
1201 DIExpression *Expr = DI->getExpression();
1202 assert(Variable->isValidLocationForIntrinsic(dl) &&
1203 "Expected inlined-at fields to agree");
1204 SDDbgValue *SDV;
1205 if (Val.getNode()) {
1206 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1207 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1208 // we couldn't resolve it directly when examining the DbgValue intrinsic
1209 // in the first place we should not be more successful here). Unless we
1210 // have some test case that prove this to be correct we should avoid
1211 // calling EmitFuncArgumentDbgValue here.
1212 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1213 LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1214 << DbgSDNodeOrder << "] for:\n " << *DI << "\n");
1215 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
1216 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1217 // inserted after the definition of Val when emitting the instructions
1218 // after ISel. An alternative could be to teach
1219 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1220 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1221 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1222 << ValSDNodeOrder << "\n");
1223 SDV = getDbgValue(Val, Variable, Expr, dl,
1224 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1225 DAG.AddDbgValue(SDV, Val.getNode(), false);
1226 } else
1227 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1228 << "in EmitFuncArgumentDbgValue\n");
1229 } else {
1230 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1231 auto Undef =
1232 UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1233 auto SDV =
1234 DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder);
1235 DAG.AddDbgValue(SDV, nullptr, false);
1236 }
1237 }
1238 DDIV.clear();
1239 }
1240
salvageUnresolvedDbgValue(DanglingDebugInfo & DDI)1241 void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1242 Value *V = DDI.getDI()->getValue();
1243 DILocalVariable *Var = DDI.getDI()->getVariable();
1244 DIExpression *Expr = DDI.getDI()->getExpression();
1245 DebugLoc DL = DDI.getdl();
1246 DebugLoc InstDL = DDI.getDI()->getDebugLoc();
1247 unsigned SDOrder = DDI.getSDNodeOrder();
1248
1249 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1250 // that DW_OP_stack_value is desired.
1251 assert(isa<DbgValueInst>(DDI.getDI()));
1252 bool StackValue = true;
1253
1254 // Can this Value can be encoded without any further work?
1255 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder))
1256 return;
1257
1258 // Attempt to salvage back through as many instructions as possible. Bail if
1259 // a non-instruction is seen, such as a constant expression or global
1260 // variable. FIXME: Further work could recover those too.
1261 while (isa<Instruction>(V)) {
1262 Instruction &VAsInst = *cast<Instruction>(V);
1263 DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue);
1264
1265 // If we cannot salvage any further, and haven't yet found a suitable debug
1266 // expression, bail out.
1267 if (!NewExpr)
1268 break;
1269
1270 // New value and expr now represent this debuginfo.
1271 V = VAsInst.getOperand(0);
1272 Expr = NewExpr;
1273
1274 // Some kind of simplification occurred: check whether the operand of the
1275 // salvaged debug expression can be encoded in this DAG.
1276 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) {
1277 LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n "
1278 << DDI.getDI() << "\nBy stripping back to:\n " << V);
1279 return;
1280 }
1281 }
1282
1283 // This was the final opportunity to salvage this debug information, and it
1284 // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1285 // any earlier variable location.
1286 auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1287 auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1288 DAG.AddDbgValue(SDV, nullptr, false);
1289
1290 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << DDI.getDI()
1291 << "\n");
1292 LLVM_DEBUG(dbgs() << " Last seen at:\n " << *DDI.getDI()->getOperand(0)
1293 << "\n");
1294 }
1295
handleDebugValue(const Value * V,DILocalVariable * Var,DIExpression * Expr,DebugLoc dl,DebugLoc InstDL,unsigned Order)1296 bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
1297 DIExpression *Expr, DebugLoc dl,
1298 DebugLoc InstDL, unsigned Order) {
1299 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1300 SDDbgValue *SDV;
1301 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1302 isa<ConstantPointerNull>(V)) {
1303 SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder);
1304 DAG.AddDbgValue(SDV, nullptr, false);
1305 return true;
1306 }
1307
1308 // If the Value is a frame index, we can create a FrameIndex debug value
1309 // without relying on the DAG at all.
1310 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1311 auto SI = FuncInfo.StaticAllocaMap.find(AI);
1312 if (SI != FuncInfo.StaticAllocaMap.end()) {
1313 auto SDV =
1314 DAG.getFrameIndexDbgValue(Var, Expr, SI->second,
1315 /*IsIndirect*/ false, dl, SDNodeOrder);
1316 // Do not attach the SDNodeDbgValue to an SDNode: this variable location
1317 // is still available even if the SDNode gets optimized out.
1318 DAG.AddDbgValue(SDV, nullptr, false);
1319 return true;
1320 }
1321 }
1322
1323 // Do not use getValue() in here; we don't want to generate code at
1324 // this point if it hasn't been done yet.
1325 SDValue N = NodeMap[V];
1326 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1327 N = UnusedArgNodeMap[V];
1328 if (N.getNode()) {
1329 if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
1330 return true;
1331 SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder);
1332 DAG.AddDbgValue(SDV, N.getNode(), false);
1333 return true;
1334 }
1335
1336 // Special rules apply for the first dbg.values of parameter variables in a
1337 // function. Identify them by the fact they reference Argument Values, that
1338 // they're parameters, and they are parameters of the current function. We
1339 // need to let them dangle until they get an SDNode.
1340 bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() &&
1341 !InstDL.getInlinedAt();
1342 if (!IsParamOfFunc) {
1343 // The value is not used in this block yet (or it would have an SDNode).
1344 // We still want the value to appear for the user if possible -- if it has
1345 // an associated VReg, we can refer to that instead.
1346 auto VMI = FuncInfo.ValueMap.find(V);
1347 if (VMI != FuncInfo.ValueMap.end()) {
1348 unsigned Reg = VMI->second;
1349 // If this is a PHI node, it may be split up into several MI PHI nodes
1350 // (in FunctionLoweringInfo::set).
1351 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1352 V->getType(), None);
1353 if (RFV.occupiesMultipleRegs()) {
1354 unsigned Offset = 0;
1355 unsigned BitsToDescribe = 0;
1356 if (auto VarSize = Var->getSizeInBits())
1357 BitsToDescribe = *VarSize;
1358 if (auto Fragment = Expr->getFragmentInfo())
1359 BitsToDescribe = Fragment->SizeInBits;
1360 for (auto RegAndSize : RFV.getRegsAndSizes()) {
1361 unsigned RegisterSize = RegAndSize.second;
1362 // Bail out if all bits are described already.
1363 if (Offset >= BitsToDescribe)
1364 break;
1365 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1366 ? BitsToDescribe - Offset
1367 : RegisterSize;
1368 auto FragmentExpr = DIExpression::createFragmentExpression(
1369 Expr, Offset, FragmentSize);
1370 if (!FragmentExpr)
1371 continue;
1372 SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first,
1373 false, dl, SDNodeOrder);
1374 DAG.AddDbgValue(SDV, nullptr, false);
1375 Offset += RegisterSize;
1376 }
1377 } else {
1378 SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder);
1379 DAG.AddDbgValue(SDV, nullptr, false);
1380 }
1381 return true;
1382 }
1383 }
1384
1385 return false;
1386 }
1387
resolveOrClearDbgInfo()1388 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1389 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1390 for (auto &Pair : DanglingDebugInfoMap)
1391 for (auto &DDI : Pair.second)
1392 salvageUnresolvedDbgValue(DDI);
1393 clearDanglingDebugInfo();
1394 }
1395
1396 /// getCopyFromRegs - If there was virtual register allocated for the value V
1397 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
getCopyFromRegs(const Value * V,Type * Ty)1398 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1399 DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1400 SDValue Result;
1401
1402 if (It != FuncInfo.ValueMap.end()) {
1403 Register InReg = It->second;
1404
1405 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1406 DAG.getDataLayout(), InReg, Ty,
1407 None); // This is not an ABI copy.
1408 SDValue Chain = DAG.getEntryNode();
1409 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1410 V);
1411 resolveDanglingDebugInfo(V, Result);
1412 }
1413
1414 return Result;
1415 }
1416
1417 /// getValue - Return an SDValue for the given Value.
getValue(const Value * V)1418 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1419 // If we already have an SDValue for this value, use it. It's important
1420 // to do this first, so that we don't create a CopyFromReg if we already
1421 // have a regular SDValue.
1422 SDValue &N = NodeMap[V];
1423 if (N.getNode()) return N;
1424
1425 // If there's a virtual register allocated and initialized for this
1426 // value, use it.
1427 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1428 return copyFromReg;
1429
1430 // Otherwise create a new SDValue and remember it.
1431 SDValue Val = getValueImpl(V);
1432 NodeMap[V] = Val;
1433 resolveDanglingDebugInfo(V, Val);
1434 return Val;
1435 }
1436
1437 /// getNonRegisterValue - Return an SDValue for the given Value, but
1438 /// don't look in FuncInfo.ValueMap for a virtual register.
getNonRegisterValue(const Value * V)1439 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1440 // If we already have an SDValue for this value, use it.
1441 SDValue &N = NodeMap[V];
1442 if (N.getNode()) {
1443 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1444 // Remove the debug location from the node as the node is about to be used
1445 // in a location which may differ from the original debug location. This
1446 // is relevant to Constant and ConstantFP nodes because they can appear
1447 // as constant expressions inside PHI nodes.
1448 N->setDebugLoc(DebugLoc());
1449 }
1450 return N;
1451 }
1452
1453 // Otherwise create a new SDValue and remember it.
1454 SDValue Val = getValueImpl(V);
1455 NodeMap[V] = Val;
1456 resolveDanglingDebugInfo(V, Val);
1457 return Val;
1458 }
1459
1460 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1461 /// Create an SDValue for the given value.
getValueImpl(const Value * V)1462 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1463 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1464
1465 if (const Constant *C = dyn_cast<Constant>(V)) {
1466 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1467
1468 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1469 return DAG.getConstant(*CI, getCurSDLoc(), VT);
1470
1471 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1472 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1473
1474 if (isa<ConstantPointerNull>(C)) {
1475 unsigned AS = V->getType()->getPointerAddressSpace();
1476 return DAG.getConstant(0, getCurSDLoc(),
1477 TLI.getPointerTy(DAG.getDataLayout(), AS));
1478 }
1479
1480 if (match(C, m_VScale(DAG.getDataLayout())))
1481 return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1482
1483 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1484 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1485
1486 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1487 return DAG.getUNDEF(VT);
1488
1489 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1490 visit(CE->getOpcode(), *CE);
1491 SDValue N1 = NodeMap[V];
1492 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1493 return N1;
1494 }
1495
1496 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1497 SmallVector<SDValue, 4> Constants;
1498 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1499 OI != OE; ++OI) {
1500 SDNode *Val = getValue(*OI).getNode();
1501 // If the operand is an empty aggregate, there are no values.
1502 if (!Val) continue;
1503 // Add each leaf value from the operand to the Constants list
1504 // to form a flattened list of all the values.
1505 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1506 Constants.push_back(SDValue(Val, i));
1507 }
1508
1509 return DAG.getMergeValues(Constants, getCurSDLoc());
1510 }
1511
1512 if (const ConstantDataSequential *CDS =
1513 dyn_cast<ConstantDataSequential>(C)) {
1514 SmallVector<SDValue, 4> Ops;
1515 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1516 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1517 // Add each leaf value from the operand to the Constants list
1518 // to form a flattened list of all the values.
1519 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1520 Ops.push_back(SDValue(Val, i));
1521 }
1522
1523 if (isa<ArrayType>(CDS->getType()))
1524 return DAG.getMergeValues(Ops, getCurSDLoc());
1525 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1526 }
1527
1528 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1529 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1530 "Unknown struct or array constant!");
1531
1532 SmallVector<EVT, 4> ValueVTs;
1533 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1534 unsigned NumElts = ValueVTs.size();
1535 if (NumElts == 0)
1536 return SDValue(); // empty struct
1537 SmallVector<SDValue, 4> Constants(NumElts);
1538 for (unsigned i = 0; i != NumElts; ++i) {
1539 EVT EltVT = ValueVTs[i];
1540 if (isa<UndefValue>(C))
1541 Constants[i] = DAG.getUNDEF(EltVT);
1542 else if (EltVT.isFloatingPoint())
1543 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1544 else
1545 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1546 }
1547
1548 return DAG.getMergeValues(Constants, getCurSDLoc());
1549 }
1550
1551 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1552 return DAG.getBlockAddress(BA, VT);
1553
1554 VectorType *VecTy = cast<VectorType>(V->getType());
1555
1556 // Now that we know the number and type of the elements, get that number of
1557 // elements into the Ops array based on what kind of constant it is.
1558 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1559 SmallVector<SDValue, 16> Ops;
1560 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1561 for (unsigned i = 0; i != NumElements; ++i)
1562 Ops.push_back(getValue(CV->getOperand(i)));
1563
1564 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1565 } else if (isa<ConstantAggregateZero>(C)) {
1566 EVT EltVT =
1567 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1568
1569 SDValue Op;
1570 if (EltVT.isFloatingPoint())
1571 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1572 else
1573 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1574
1575 if (isa<ScalableVectorType>(VecTy))
1576 return NodeMap[V] = DAG.getSplatVector(VT, getCurSDLoc(), Op);
1577 else {
1578 SmallVector<SDValue, 16> Ops;
1579 Ops.assign(cast<FixedVectorType>(VecTy)->getNumElements(), Op);
1580 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1581 }
1582 }
1583 llvm_unreachable("Unknown vector constant");
1584 }
1585
1586 // If this is a static alloca, generate it as the frameindex instead of
1587 // computation.
1588 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1589 DenseMap<const AllocaInst*, int>::iterator SI =
1590 FuncInfo.StaticAllocaMap.find(AI);
1591 if (SI != FuncInfo.StaticAllocaMap.end())
1592 return DAG.getFrameIndex(SI->second,
1593 TLI.getFrameIndexTy(DAG.getDataLayout()));
1594 }
1595
1596 // If this is an instruction which fast-isel has deferred, select it now.
1597 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1598 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1599
1600 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1601 Inst->getType(), None);
1602 SDValue Chain = DAG.getEntryNode();
1603 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1604 }
1605
1606 if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V)) {
1607 return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1608 }
1609 llvm_unreachable("Can't get register for value!");
1610 }
1611
visitCatchPad(const CatchPadInst & I)1612 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1613 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1614 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1615 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1616 bool IsSEH = isAsynchronousEHPersonality(Pers);
1617 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1618 if (!IsSEH)
1619 CatchPadMBB->setIsEHScopeEntry();
1620 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1621 if (IsMSVCCXX || IsCoreCLR)
1622 CatchPadMBB->setIsEHFuncletEntry();
1623 }
1624
visitCatchRet(const CatchReturnInst & I)1625 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1626 // Update machine-CFG edge.
1627 MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1628 FuncInfo.MBB->addSuccessor(TargetMBB);
1629
1630 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1631 bool IsSEH = isAsynchronousEHPersonality(Pers);
1632 if (IsSEH) {
1633 // If this is not a fall-through branch or optimizations are switched off,
1634 // emit the branch.
1635 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1636 TM.getOptLevel() == CodeGenOpt::None)
1637 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1638 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1639 return;
1640 }
1641
1642 // Figure out the funclet membership for the catchret's successor.
1643 // This will be used by the FuncletLayout pass to determine how to order the
1644 // BB's.
1645 // A 'catchret' returns to the outer scope's color.
1646 Value *ParentPad = I.getCatchSwitchParentPad();
1647 const BasicBlock *SuccessorColor;
1648 if (isa<ConstantTokenNone>(ParentPad))
1649 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1650 else
1651 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1652 assert(SuccessorColor && "No parent funclet for catchret!");
1653 MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1654 assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1655
1656 // Create the terminator node.
1657 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1658 getControlRoot(), DAG.getBasicBlock(TargetMBB),
1659 DAG.getBasicBlock(SuccessorColorMBB));
1660 DAG.setRoot(Ret);
1661 }
1662
visitCleanupPad(const CleanupPadInst & CPI)1663 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1664 // Don't emit any special code for the cleanuppad instruction. It just marks
1665 // the start of an EH scope/funclet.
1666 FuncInfo.MBB->setIsEHScopeEntry();
1667 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1668 if (Pers != EHPersonality::Wasm_CXX) {
1669 FuncInfo.MBB->setIsEHFuncletEntry();
1670 FuncInfo.MBB->setIsCleanupFuncletEntry();
1671 }
1672 }
1673
1674 // For wasm, there's alwyas a single catch pad attached to a catchswitch, and
1675 // the control flow always stops at the single catch pad, as it does for a
1676 // cleanup pad. In case the exception caught is not of the types the catch pad
1677 // catches, it will be rethrown by a rethrow.
findWasmUnwindDestinations(FunctionLoweringInfo & FuncInfo,const BasicBlock * EHPadBB,BranchProbability Prob,SmallVectorImpl<std::pair<MachineBasicBlock *,BranchProbability>> & UnwindDests)1678 static void findWasmUnwindDestinations(
1679 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1680 BranchProbability Prob,
1681 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1682 &UnwindDests) {
1683 while (EHPadBB) {
1684 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1685 if (isa<CleanupPadInst>(Pad)) {
1686 // Stop on cleanup pads.
1687 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1688 UnwindDests.back().first->setIsEHScopeEntry();
1689 break;
1690 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1691 // Add the catchpad handlers to the possible destinations. We don't
1692 // continue to the unwind destination of the catchswitch for wasm.
1693 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1694 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1695 UnwindDests.back().first->setIsEHScopeEntry();
1696 }
1697 break;
1698 } else {
1699 continue;
1700 }
1701 }
1702 }
1703
1704 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1705 /// many places it could ultimately go. In the IR, we have a single unwind
1706 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1707 /// This function skips over imaginary basic blocks that hold catchswitch
1708 /// instructions, and finds all the "real" machine
1709 /// basic block destinations. As those destinations may not be successors of
1710 /// EHPadBB, here we also calculate the edge probability to those destinations.
1711 /// The passed-in Prob is the edge probability to EHPadBB.
findUnwindDestinations(FunctionLoweringInfo & FuncInfo,const BasicBlock * EHPadBB,BranchProbability Prob,SmallVectorImpl<std::pair<MachineBasicBlock *,BranchProbability>> & UnwindDests)1712 static void findUnwindDestinations(
1713 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1714 BranchProbability Prob,
1715 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1716 &UnwindDests) {
1717 EHPersonality Personality =
1718 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1719 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1720 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1721 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1722 bool IsSEH = isAsynchronousEHPersonality(Personality);
1723
1724 if (IsWasmCXX) {
1725 findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1726 assert(UnwindDests.size() <= 1 &&
1727 "There should be at most one unwind destination for wasm");
1728 return;
1729 }
1730
1731 while (EHPadBB) {
1732 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1733 BasicBlock *NewEHPadBB = nullptr;
1734 if (isa<LandingPadInst>(Pad)) {
1735 // Stop on landingpads. They are not funclets.
1736 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1737 break;
1738 } else if (isa<CleanupPadInst>(Pad)) {
1739 // Stop on cleanup pads. Cleanups are always funclet entries for all known
1740 // personalities.
1741 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1742 UnwindDests.back().first->setIsEHScopeEntry();
1743 UnwindDests.back().first->setIsEHFuncletEntry();
1744 break;
1745 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1746 // Add the catchpad handlers to the possible destinations.
1747 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1748 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1749 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1750 if (IsMSVCCXX || IsCoreCLR)
1751 UnwindDests.back().first->setIsEHFuncletEntry();
1752 if (!IsSEH)
1753 UnwindDests.back().first->setIsEHScopeEntry();
1754 }
1755 NewEHPadBB = CatchSwitch->getUnwindDest();
1756 } else {
1757 continue;
1758 }
1759
1760 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1761 if (BPI && NewEHPadBB)
1762 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1763 EHPadBB = NewEHPadBB;
1764 }
1765 }
1766
visitCleanupRet(const CleanupReturnInst & I)1767 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1768 // Update successor info.
1769 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1770 auto UnwindDest = I.getUnwindDest();
1771 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1772 BranchProbability UnwindDestProb =
1773 (BPI && UnwindDest)
1774 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1775 : BranchProbability::getZero();
1776 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1777 for (auto &UnwindDest : UnwindDests) {
1778 UnwindDest.first->setIsEHPad();
1779 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1780 }
1781 FuncInfo.MBB->normalizeSuccProbs();
1782
1783 // Create the terminator node.
1784 SDValue Ret =
1785 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1786 DAG.setRoot(Ret);
1787 }
1788
visitCatchSwitch(const CatchSwitchInst & CSI)1789 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1790 report_fatal_error("visitCatchSwitch not yet implemented!");
1791 }
1792
visitRet(const ReturnInst & I)1793 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1794 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1795 auto &DL = DAG.getDataLayout();
1796 SDValue Chain = getControlRoot();
1797 SmallVector<ISD::OutputArg, 8> Outs;
1798 SmallVector<SDValue, 8> OutVals;
1799
1800 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1801 // lower
1802 //
1803 // %val = call <ty> @llvm.experimental.deoptimize()
1804 // ret <ty> %val
1805 //
1806 // differently.
1807 if (I.getParent()->getTerminatingDeoptimizeCall()) {
1808 LowerDeoptimizingReturn();
1809 return;
1810 }
1811
1812 if (!FuncInfo.CanLowerReturn) {
1813 unsigned DemoteReg = FuncInfo.DemoteRegister;
1814 const Function *F = I.getParent()->getParent();
1815
1816 // Emit a store of the return value through the virtual register.
1817 // Leave Outs empty so that LowerReturn won't try to load return
1818 // registers the usual way.
1819 SmallVector<EVT, 1> PtrValueVTs;
1820 ComputeValueVTs(TLI, DL,
1821 F->getReturnType()->getPointerTo(
1822 DAG.getDataLayout().getAllocaAddrSpace()),
1823 PtrValueVTs);
1824
1825 SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1826 DemoteReg, PtrValueVTs[0]);
1827 SDValue RetOp = getValue(I.getOperand(0));
1828
1829 SmallVector<EVT, 4> ValueVTs, MemVTs;
1830 SmallVector<uint64_t, 4> Offsets;
1831 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
1832 &Offsets);
1833 unsigned NumValues = ValueVTs.size();
1834
1835 SmallVector<SDValue, 4> Chains(NumValues);
1836 Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
1837 for (unsigned i = 0; i != NumValues; ++i) {
1838 // An aggregate return value cannot wrap around the address space, so
1839 // offsets to its parts don't wrap either.
1840 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1841
1842 SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
1843 if (MemVTs[i] != ValueVTs[i])
1844 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
1845 Chains[i] = DAG.getStore(
1846 Chain, getCurSDLoc(), Val,
1847 // FIXME: better loc info would be nice.
1848 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
1849 commonAlignment(BaseAlign, Offsets[i]));
1850 }
1851
1852 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1853 MVT::Other, Chains);
1854 } else if (I.getNumOperands() != 0) {
1855 SmallVector<EVT, 4> ValueVTs;
1856 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1857 unsigned NumValues = ValueVTs.size();
1858 if (NumValues) {
1859 SDValue RetOp = getValue(I.getOperand(0));
1860
1861 const Function *F = I.getParent()->getParent();
1862
1863 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1864 I.getOperand(0)->getType(), F->getCallingConv(),
1865 /*IsVarArg*/ false);
1866
1867 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1868 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1869 Attribute::SExt))
1870 ExtendKind = ISD::SIGN_EXTEND;
1871 else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1872 Attribute::ZExt))
1873 ExtendKind = ISD::ZERO_EXTEND;
1874
1875 LLVMContext &Context = F->getContext();
1876 bool RetInReg = F->getAttributes().hasAttribute(
1877 AttributeList::ReturnIndex, Attribute::InReg);
1878
1879 for (unsigned j = 0; j != NumValues; ++j) {
1880 EVT VT = ValueVTs[j];
1881
1882 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1883 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1884
1885 CallingConv::ID CC = F->getCallingConv();
1886
1887 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1888 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1889 SmallVector<SDValue, 4> Parts(NumParts);
1890 getCopyToParts(DAG, getCurSDLoc(),
1891 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1892 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1893
1894 // 'inreg' on function refers to return value
1895 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1896 if (RetInReg)
1897 Flags.setInReg();
1898
1899 if (I.getOperand(0)->getType()->isPointerTy()) {
1900 Flags.setPointer();
1901 Flags.setPointerAddrSpace(
1902 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
1903 }
1904
1905 if (NeedsRegBlock) {
1906 Flags.setInConsecutiveRegs();
1907 if (j == NumValues - 1)
1908 Flags.setInConsecutiveRegsLast();
1909 }
1910
1911 // Propagate extension type if any
1912 if (ExtendKind == ISD::SIGN_EXTEND)
1913 Flags.setSExt();
1914 else if (ExtendKind == ISD::ZERO_EXTEND)
1915 Flags.setZExt();
1916
1917 for (unsigned i = 0; i < NumParts; ++i) {
1918 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1919 VT, /*isfixed=*/true, 0, 0));
1920 OutVals.push_back(Parts[i]);
1921 }
1922 }
1923 }
1924 }
1925
1926 // Push in swifterror virtual register as the last element of Outs. This makes
1927 // sure swifterror virtual register will be returned in the swifterror
1928 // physical register.
1929 const Function *F = I.getParent()->getParent();
1930 if (TLI.supportSwiftError() &&
1931 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1932 assert(SwiftError.getFunctionArg() && "Need a swift error argument");
1933 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1934 Flags.setSwiftError();
1935 Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1936 EVT(TLI.getPointerTy(DL)) /*argvt*/,
1937 true /*isfixed*/, 1 /*origidx*/,
1938 0 /*partOffs*/));
1939 // Create SDNode for the swifterror virtual register.
1940 OutVals.push_back(
1941 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
1942 &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
1943 EVT(TLI.getPointerTy(DL))));
1944 }
1945
1946 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1947 CallingConv::ID CallConv =
1948 DAG.getMachineFunction().getFunction().getCallingConv();
1949 Chain = DAG.getTargetLoweringInfo().LowerReturn(
1950 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1951
1952 // Verify that the target's LowerReturn behaved as expected.
1953 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1954 "LowerReturn didn't return a valid chain!");
1955
1956 // Update the DAG with the new chain value resulting from return lowering.
1957 DAG.setRoot(Chain);
1958 }
1959
1960 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1961 /// created for it, emit nodes to copy the value into the virtual
1962 /// registers.
CopyToExportRegsIfNeeded(const Value * V)1963 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1964 // Skip empty types
1965 if (V->getType()->isEmptyTy())
1966 return;
1967
1968 DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
1969 if (VMI != FuncInfo.ValueMap.end()) {
1970 assert(!V->use_empty() && "Unused value assigned virtual registers!");
1971 CopyValueToVirtualRegister(V, VMI->second);
1972 }
1973 }
1974
1975 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1976 /// the current basic block, add it to ValueMap now so that we'll get a
1977 /// CopyTo/FromReg.
ExportFromCurrentBlock(const Value * V)1978 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1979 // No need to export constants.
1980 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1981
1982 // Already exported?
1983 if (FuncInfo.isExportedInst(V)) return;
1984
1985 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1986 CopyValueToVirtualRegister(V, Reg);
1987 }
1988
isExportableFromCurrentBlock(const Value * V,const BasicBlock * FromBB)1989 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1990 const BasicBlock *FromBB) {
1991 // The operands of the setcc have to be in this block. We don't know
1992 // how to export them from some other block.
1993 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1994 // Can export from current BB.
1995 if (VI->getParent() == FromBB)
1996 return true;
1997
1998 // Is already exported, noop.
1999 return FuncInfo.isExportedInst(V);
2000 }
2001
2002 // If this is an argument, we can export it if the BB is the entry block or
2003 // if it is already exported.
2004 if (isa<Argument>(V)) {
2005 if (FromBB == &FromBB->getParent()->getEntryBlock())
2006 return true;
2007
2008 // Otherwise, can only export this if it is already exported.
2009 return FuncInfo.isExportedInst(V);
2010 }
2011
2012 // Otherwise, constants can always be exported.
2013 return true;
2014 }
2015
2016 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2017 BranchProbability
getEdgeProbability(const MachineBasicBlock * Src,const MachineBasicBlock * Dst) const2018 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2019 const MachineBasicBlock *Dst) const {
2020 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2021 const BasicBlock *SrcBB = Src->getBasicBlock();
2022 const BasicBlock *DstBB = Dst->getBasicBlock();
2023 if (!BPI) {
2024 // If BPI is not available, set the default probability as 1 / N, where N is
2025 // the number of successors.
2026 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2027 return BranchProbability(1, SuccSize);
2028 }
2029 return BPI->getEdgeProbability(SrcBB, DstBB);
2030 }
2031
addSuccessorWithProb(MachineBasicBlock * Src,MachineBasicBlock * Dst,BranchProbability Prob)2032 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2033 MachineBasicBlock *Dst,
2034 BranchProbability Prob) {
2035 if (!FuncInfo.BPI)
2036 Src->addSuccessorWithoutProb(Dst);
2037 else {
2038 if (Prob.isUnknown())
2039 Prob = getEdgeProbability(Src, Dst);
2040 Src->addSuccessor(Dst, Prob);
2041 }
2042 }
2043
InBlock(const Value * V,const BasicBlock * BB)2044 static bool InBlock(const Value *V, const BasicBlock *BB) {
2045 if (const Instruction *I = dyn_cast<Instruction>(V))
2046 return I->getParent() == BB;
2047 return true;
2048 }
2049
2050 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2051 /// This function emits a branch and is used at the leaves of an OR or an
2052 /// AND operator tree.
2053 void
EmitBranchForMergedCondition(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,BranchProbability TProb,BranchProbability FProb,bool InvertCond)2054 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2055 MachineBasicBlock *TBB,
2056 MachineBasicBlock *FBB,
2057 MachineBasicBlock *CurBB,
2058 MachineBasicBlock *SwitchBB,
2059 BranchProbability TProb,
2060 BranchProbability FProb,
2061 bool InvertCond) {
2062 const BasicBlock *BB = CurBB->getBasicBlock();
2063
2064 // If the leaf of the tree is a comparison, merge the condition into
2065 // the caseblock.
2066 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2067 // The operands of the cmp have to be in this block. We don't know
2068 // how to export them from some other block. If this is the first block
2069 // of the sequence, no exporting is needed.
2070 if (CurBB == SwitchBB ||
2071 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2072 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2073 ISD::CondCode Condition;
2074 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2075 ICmpInst::Predicate Pred =
2076 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2077 Condition = getICmpCondCode(Pred);
2078 } else {
2079 const FCmpInst *FC = cast<FCmpInst>(Cond);
2080 FCmpInst::Predicate Pred =
2081 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2082 Condition = getFCmpCondCode(Pred);
2083 if (TM.Options.NoNaNsFPMath)
2084 Condition = getFCmpCodeWithoutNaN(Condition);
2085 }
2086
2087 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2088 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2089 SL->SwitchCases.push_back(CB);
2090 return;
2091 }
2092 }
2093
2094 // Create a CaseBlock record representing this branch.
2095 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2096 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2097 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2098 SL->SwitchCases.push_back(CB);
2099 }
2100
FindMergedConditions(const Value * Cond,MachineBasicBlock * TBB,MachineBasicBlock * FBB,MachineBasicBlock * CurBB,MachineBasicBlock * SwitchBB,Instruction::BinaryOps Opc,BranchProbability TProb,BranchProbability FProb,bool InvertCond)2101 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2102 MachineBasicBlock *TBB,
2103 MachineBasicBlock *FBB,
2104 MachineBasicBlock *CurBB,
2105 MachineBasicBlock *SwitchBB,
2106 Instruction::BinaryOps Opc,
2107 BranchProbability TProb,
2108 BranchProbability FProb,
2109 bool InvertCond) {
2110 // Skip over not part of the tree and remember to invert op and operands at
2111 // next level.
2112 Value *NotCond;
2113 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2114 InBlock(NotCond, CurBB->getBasicBlock())) {
2115 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2116 !InvertCond);
2117 return;
2118 }
2119
2120 const Instruction *BOp = dyn_cast<Instruction>(Cond);
2121 // Compute the effective opcode for Cond, taking into account whether it needs
2122 // to be inverted, e.g.
2123 // and (not (or A, B)), C
2124 // gets lowered as
2125 // and (and (not A, not B), C)
2126 unsigned BOpc = 0;
2127 if (BOp) {
2128 BOpc = BOp->getOpcode();
2129 if (InvertCond) {
2130 if (BOpc == Instruction::And)
2131 BOpc = Instruction::Or;
2132 else if (BOpc == Instruction::Or)
2133 BOpc = Instruction::And;
2134 }
2135 }
2136
2137 // If this node is not part of the or/and tree, emit it as a branch.
2138 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
2139 BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
2140 BOp->getParent() != CurBB->getBasicBlock() ||
2141 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
2142 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
2143 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2144 TProb, FProb, InvertCond);
2145 return;
2146 }
2147
2148 // Create TmpBB after CurBB.
2149 MachineFunction::iterator BBI(CurBB);
2150 MachineFunction &MF = DAG.getMachineFunction();
2151 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2152 CurBB->getParent()->insert(++BBI, TmpBB);
2153
2154 if (Opc == Instruction::Or) {
2155 // Codegen X | Y as:
2156 // BB1:
2157 // jmp_if_X TBB
2158 // jmp TmpBB
2159 // TmpBB:
2160 // jmp_if_Y TBB
2161 // jmp FBB
2162 //
2163
2164 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2165 // The requirement is that
2166 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2167 // = TrueProb for original BB.
2168 // Assuming the original probabilities are A and B, one choice is to set
2169 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2170 // A/(1+B) and 2B/(1+B). This choice assumes that
2171 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2172 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2173 // TmpBB, but the math is more complicated.
2174
2175 auto NewTrueProb = TProb / 2;
2176 auto NewFalseProb = TProb / 2 + FProb;
2177 // Emit the LHS condition.
2178 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
2179 NewTrueProb, NewFalseProb, InvertCond);
2180
2181 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2182 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2183 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2184 // Emit the RHS condition into TmpBB.
2185 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2186 Probs[0], Probs[1], InvertCond);
2187 } else {
2188 assert(Opc == Instruction::And && "Unknown merge op!");
2189 // Codegen X & Y as:
2190 // BB1:
2191 // jmp_if_X TmpBB
2192 // jmp FBB
2193 // TmpBB:
2194 // jmp_if_Y TBB
2195 // jmp FBB
2196 //
2197 // This requires creation of TmpBB after CurBB.
2198
2199 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2200 // The requirement is that
2201 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2202 // = FalseProb for original BB.
2203 // Assuming the original probabilities are A and B, one choice is to set
2204 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2205 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2206 // TrueProb for BB1 * FalseProb for TmpBB.
2207
2208 auto NewTrueProb = TProb + FProb / 2;
2209 auto NewFalseProb = FProb / 2;
2210 // Emit the LHS condition.
2211 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
2212 NewTrueProb, NewFalseProb, InvertCond);
2213
2214 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2215 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2216 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2217 // Emit the RHS condition into TmpBB.
2218 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2219 Probs[0], Probs[1], InvertCond);
2220 }
2221 }
2222
2223 /// If the set of cases should be emitted as a series of branches, return true.
2224 /// If we should emit this as a bunch of and/or'd together conditions, return
2225 /// false.
2226 bool
ShouldEmitAsBranches(const std::vector<CaseBlock> & Cases)2227 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2228 if (Cases.size() != 2) return true;
2229
2230 // If this is two comparisons of the same values or'd or and'd together, they
2231 // will get folded into a single comparison, so don't emit two blocks.
2232 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2233 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2234 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2235 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2236 return false;
2237 }
2238
2239 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2240 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2241 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2242 Cases[0].CC == Cases[1].CC &&
2243 isa<Constant>(Cases[0].CmpRHS) &&
2244 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2245 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2246 return false;
2247 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2248 return false;
2249 }
2250
2251 return true;
2252 }
2253
visitBr(const BranchInst & I)2254 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2255 MachineBasicBlock *BrMBB = FuncInfo.MBB;
2256
2257 // Update machine-CFG edges.
2258 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2259
2260 if (I.isUnconditional()) {
2261 // Update machine-CFG edges.
2262 BrMBB->addSuccessor(Succ0MBB);
2263
2264 // If this is not a fall-through branch or optimizations are switched off,
2265 // emit the branch.
2266 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
2267 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2268 MVT::Other, getControlRoot(),
2269 DAG.getBasicBlock(Succ0MBB)));
2270
2271 return;
2272 }
2273
2274 // If this condition is one of the special cases we handle, do special stuff
2275 // now.
2276 const Value *CondVal = I.getCondition();
2277 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2278
2279 // If this is a series of conditions that are or'd or and'd together, emit
2280 // this as a sequence of branches instead of setcc's with and/or operations.
2281 // As long as jumps are not expensive (exceptions for multi-use logic ops,
2282 // unpredictable branches, and vector extracts because those jumps are likely
2283 // expensive for any target), this should improve performance.
2284 // For example, instead of something like:
2285 // cmp A, B
2286 // C = seteq
2287 // cmp D, E
2288 // F = setle
2289 // or C, F
2290 // jnz foo
2291 // Emit:
2292 // cmp A, B
2293 // je foo
2294 // cmp D, E
2295 // jle foo
2296 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2297 Instruction::BinaryOps Opcode = BOp->getOpcode();
2298 Value *Vec, *BOp0 = BOp->getOperand(0), *BOp1 = BOp->getOperand(1);
2299 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2300 !I.hasMetadata(LLVMContext::MD_unpredictable) &&
2301 (Opcode == Instruction::And || Opcode == Instruction::Or) &&
2302 !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2303 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
2304 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2305 Opcode,
2306 getEdgeProbability(BrMBB, Succ0MBB),
2307 getEdgeProbability(BrMBB, Succ1MBB),
2308 /*InvertCond=*/false);
2309 // If the compares in later blocks need to use values not currently
2310 // exported from this block, export them now. This block should always
2311 // be the first entry.
2312 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2313
2314 // Allow some cases to be rejected.
2315 if (ShouldEmitAsBranches(SL->SwitchCases)) {
2316 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2317 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2318 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2319 }
2320
2321 // Emit the branch for this block.
2322 visitSwitchCase(SL->SwitchCases[0], BrMBB);
2323 SL->SwitchCases.erase(SL->SwitchCases.begin());
2324 return;
2325 }
2326
2327 // Okay, we decided not to do this, remove any inserted MBB's and clear
2328 // SwitchCases.
2329 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2330 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2331
2332 SL->SwitchCases.clear();
2333 }
2334 }
2335
2336 // Create a CaseBlock record representing this branch.
2337 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2338 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2339
2340 // Use visitSwitchCase to actually insert the fast branch sequence for this
2341 // cond branch.
2342 visitSwitchCase(CB, BrMBB);
2343 }
2344
2345 /// visitSwitchCase - Emits the necessary code to represent a single node in
2346 /// the binary search tree resulting from lowering a switch instruction.
visitSwitchCase(CaseBlock & CB,MachineBasicBlock * SwitchBB)2347 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2348 MachineBasicBlock *SwitchBB) {
2349 SDValue Cond;
2350 SDValue CondLHS = getValue(CB.CmpLHS);
2351 SDLoc dl = CB.DL;
2352
2353 if (CB.CC == ISD::SETTRUE) {
2354 // Branch or fall through to TrueBB.
2355 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2356 SwitchBB->normalizeSuccProbs();
2357 if (CB.TrueBB != NextBlock(SwitchBB)) {
2358 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2359 DAG.getBasicBlock(CB.TrueBB)));
2360 }
2361 return;
2362 }
2363
2364 auto &TLI = DAG.getTargetLoweringInfo();
2365 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2366
2367 // Build the setcc now.
2368 if (!CB.CmpMHS) {
2369 // Fold "(X == true)" to X and "(X == false)" to !X to
2370 // handle common cases produced by branch lowering.
2371 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2372 CB.CC == ISD::SETEQ)
2373 Cond = CondLHS;
2374 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2375 CB.CC == ISD::SETEQ) {
2376 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2377 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2378 } else {
2379 SDValue CondRHS = getValue(CB.CmpRHS);
2380
2381 // If a pointer's DAG type is larger than its memory type then the DAG
2382 // values are zero-extended. This breaks signed comparisons so truncate
2383 // back to the underlying type before doing the compare.
2384 if (CondLHS.getValueType() != MemVT) {
2385 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2386 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2387 }
2388 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2389 }
2390 } else {
2391 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2392
2393 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2394 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2395
2396 SDValue CmpOp = getValue(CB.CmpMHS);
2397 EVT VT = CmpOp.getValueType();
2398
2399 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2400 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2401 ISD::SETLE);
2402 } else {
2403 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2404 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2405 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2406 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2407 }
2408 }
2409
2410 // Update successor info
2411 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2412 // TrueBB and FalseBB are always different unless the incoming IR is
2413 // degenerate. This only happens when running llc on weird IR.
2414 if (CB.TrueBB != CB.FalseBB)
2415 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2416 SwitchBB->normalizeSuccProbs();
2417
2418 // If the lhs block is the next block, invert the condition so that we can
2419 // fall through to the lhs instead of the rhs block.
2420 if (CB.TrueBB == NextBlock(SwitchBB)) {
2421 std::swap(CB.TrueBB, CB.FalseBB);
2422 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2423 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2424 }
2425
2426 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2427 MVT::Other, getControlRoot(), Cond,
2428 DAG.getBasicBlock(CB.TrueBB));
2429
2430 // Insert the false branch. Do this even if it's a fall through branch,
2431 // this makes it easier to do DAG optimizations which require inverting
2432 // the branch condition.
2433 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2434 DAG.getBasicBlock(CB.FalseBB));
2435
2436 DAG.setRoot(BrCond);
2437 }
2438
2439 /// visitJumpTable - Emit JumpTable node in the current MBB
visitJumpTable(SwitchCG::JumpTable & JT)2440 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2441 // Emit the code for the jump table
2442 assert(JT.Reg != -1U && "Should lower JT Header first!");
2443 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2444 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2445 JT.Reg, PTy);
2446 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2447 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2448 MVT::Other, Index.getValue(1),
2449 Table, Index);
2450 DAG.setRoot(BrJumpTable);
2451 }
2452
2453 /// visitJumpTableHeader - This function emits necessary code to produce index
2454 /// in the JumpTable from switch case.
visitJumpTableHeader(SwitchCG::JumpTable & JT,JumpTableHeader & JTH,MachineBasicBlock * SwitchBB)2455 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2456 JumpTableHeader &JTH,
2457 MachineBasicBlock *SwitchBB) {
2458 SDLoc dl = getCurSDLoc();
2459
2460 // Subtract the lowest switch case value from the value being switched on.
2461 SDValue SwitchOp = getValue(JTH.SValue);
2462 EVT VT = SwitchOp.getValueType();
2463 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2464 DAG.getConstant(JTH.First, dl, VT));
2465
2466 // The SDNode we just created, which holds the value being switched on minus
2467 // the smallest case value, needs to be copied to a virtual register so it
2468 // can be used as an index into the jump table in a subsequent basic block.
2469 // This value may be smaller or larger than the target's pointer type, and
2470 // therefore require extension or truncating.
2471 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2472 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2473
2474 unsigned JumpTableReg =
2475 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2476 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2477 JumpTableReg, SwitchOp);
2478 JT.Reg = JumpTableReg;
2479
2480 if (!JTH.OmitRangeCheck) {
2481 // Emit the range check for the jump table, and branch to the default block
2482 // for the switch statement if the value being switched on exceeds the
2483 // largest case in the switch.
2484 SDValue CMP = DAG.getSetCC(
2485 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2486 Sub.getValueType()),
2487 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2488
2489 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2490 MVT::Other, CopyTo, CMP,
2491 DAG.getBasicBlock(JT.Default));
2492
2493 // Avoid emitting unnecessary branches to the next block.
2494 if (JT.MBB != NextBlock(SwitchBB))
2495 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2496 DAG.getBasicBlock(JT.MBB));
2497
2498 DAG.setRoot(BrCond);
2499 } else {
2500 // Avoid emitting unnecessary branches to the next block.
2501 if (JT.MBB != NextBlock(SwitchBB))
2502 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2503 DAG.getBasicBlock(JT.MBB)));
2504 else
2505 DAG.setRoot(CopyTo);
2506 }
2507 }
2508
2509 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2510 /// variable if there exists one.
getLoadStackGuard(SelectionDAG & DAG,const SDLoc & DL,SDValue & Chain)2511 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2512 SDValue &Chain) {
2513 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2514 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2515 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2516 MachineFunction &MF = DAG.getMachineFunction();
2517 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2518 MachineSDNode *Node =
2519 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2520 if (Global) {
2521 MachinePointerInfo MPInfo(Global);
2522 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2523 MachineMemOperand::MODereferenceable;
2524 MachineMemOperand *MemRef = MF.getMachineMemOperand(
2525 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2526 DAG.setNodeMemRefs(Node, {MemRef});
2527 }
2528 if (PtrTy != PtrMemTy)
2529 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2530 return SDValue(Node, 0);
2531 }
2532
2533 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2534 /// tail spliced into a stack protector check success bb.
2535 ///
2536 /// For a high level explanation of how this fits into the stack protector
2537 /// generation see the comment on the declaration of class
2538 /// StackProtectorDescriptor.
visitSPDescriptorParent(StackProtectorDescriptor & SPD,MachineBasicBlock * ParentBB)2539 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2540 MachineBasicBlock *ParentBB) {
2541
2542 // First create the loads to the guard/stack slot for the comparison.
2543 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2544 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2545 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2546
2547 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2548 int FI = MFI.getStackProtectorIndex();
2549
2550 SDValue Guard;
2551 SDLoc dl = getCurSDLoc();
2552 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2553 const Module &M = *ParentBB->getParent()->getFunction().getParent();
2554 unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2555
2556 // Generate code to load the content of the guard slot.
2557 SDValue GuardVal = DAG.getLoad(
2558 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2559 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2560 MachineMemOperand::MOVolatile);
2561
2562 if (TLI.useStackGuardXorFP())
2563 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2564
2565 // Retrieve guard check function, nullptr if instrumentation is inlined.
2566 if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2567 // The target provides a guard check function to validate the guard value.
2568 // Generate a call to that function with the content of the guard slot as
2569 // argument.
2570 FunctionType *FnTy = GuardCheckFn->getFunctionType();
2571 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2572
2573 TargetLowering::ArgListTy Args;
2574 TargetLowering::ArgListEntry Entry;
2575 Entry.Node = GuardVal;
2576 Entry.Ty = FnTy->getParamType(0);
2577 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
2578 Entry.IsInReg = true;
2579 Args.push_back(Entry);
2580
2581 TargetLowering::CallLoweringInfo CLI(DAG);
2582 CLI.setDebugLoc(getCurSDLoc())
2583 .setChain(DAG.getEntryNode())
2584 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2585 getValue(GuardCheckFn), std::move(Args));
2586
2587 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2588 DAG.setRoot(Result.second);
2589 return;
2590 }
2591
2592 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2593 // Otherwise, emit a volatile load to retrieve the stack guard value.
2594 SDValue Chain = DAG.getEntryNode();
2595 if (TLI.useLoadStackGuardNode()) {
2596 Guard = getLoadStackGuard(DAG, dl, Chain);
2597 } else {
2598 const Value *IRGuard = TLI.getSDagStackGuard(M);
2599 SDValue GuardPtr = getValue(IRGuard);
2600
2601 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2602 MachinePointerInfo(IRGuard, 0), Align,
2603 MachineMemOperand::MOVolatile);
2604 }
2605
2606 // Perform the comparison via a getsetcc.
2607 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2608 *DAG.getContext(),
2609 Guard.getValueType()),
2610 Guard, GuardVal, ISD::SETNE);
2611
2612 // If the guard/stackslot do not equal, branch to failure MBB.
2613 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2614 MVT::Other, GuardVal.getOperand(0),
2615 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2616 // Otherwise branch to success MBB.
2617 SDValue Br = DAG.getNode(ISD::BR, dl,
2618 MVT::Other, BrCond,
2619 DAG.getBasicBlock(SPD.getSuccessMBB()));
2620
2621 DAG.setRoot(Br);
2622 }
2623
2624 /// Codegen the failure basic block for a stack protector check.
2625 ///
2626 /// A failure stack protector machine basic block consists simply of a call to
2627 /// __stack_chk_fail().
2628 ///
2629 /// For a high level explanation of how this fits into the stack protector
2630 /// generation see the comment on the declaration of class
2631 /// StackProtectorDescriptor.
2632 void
visitSPDescriptorFailure(StackProtectorDescriptor & SPD)2633 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2634 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2635 TargetLowering::MakeLibCallOptions CallOptions;
2636 CallOptions.setDiscardResult(true);
2637 SDValue Chain =
2638 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2639 None, CallOptions, getCurSDLoc()).second;
2640 // On PS4, the "return address" must still be within the calling function,
2641 // even if it's at the very end, so emit an explicit TRAP here.
2642 // Passing 'true' for doesNotReturn above won't generate the trap for us.
2643 if (TM.getTargetTriple().isPS4CPU())
2644 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2645 // WebAssembly needs an unreachable instruction after a non-returning call,
2646 // because the function return type can be different from __stack_chk_fail's
2647 // return type (void).
2648 if (TM.getTargetTriple().isWasm())
2649 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2650
2651 DAG.setRoot(Chain);
2652 }
2653
2654 /// visitBitTestHeader - This function emits necessary code to produce value
2655 /// suitable for "bit tests"
visitBitTestHeader(BitTestBlock & B,MachineBasicBlock * SwitchBB)2656 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2657 MachineBasicBlock *SwitchBB) {
2658 SDLoc dl = getCurSDLoc();
2659
2660 // Subtract the minimum value.
2661 SDValue SwitchOp = getValue(B.SValue);
2662 EVT VT = SwitchOp.getValueType();
2663 SDValue RangeSub =
2664 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2665
2666 // Determine the type of the test operands.
2667 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2668 bool UsePtrType = false;
2669 if (!TLI.isTypeLegal(VT)) {
2670 UsePtrType = true;
2671 } else {
2672 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2673 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2674 // Switch table case range are encoded into series of masks.
2675 // Just use pointer type, it's guaranteed to fit.
2676 UsePtrType = true;
2677 break;
2678 }
2679 }
2680 SDValue Sub = RangeSub;
2681 if (UsePtrType) {
2682 VT = TLI.getPointerTy(DAG.getDataLayout());
2683 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2684 }
2685
2686 B.RegVT = VT.getSimpleVT();
2687 B.Reg = FuncInfo.CreateReg(B.RegVT);
2688 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2689
2690 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2691
2692 if (!B.OmitRangeCheck)
2693 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2694 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2695 SwitchBB->normalizeSuccProbs();
2696
2697 SDValue Root = CopyTo;
2698 if (!B.OmitRangeCheck) {
2699 // Conditional branch to the default block.
2700 SDValue RangeCmp = DAG.getSetCC(dl,
2701 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2702 RangeSub.getValueType()),
2703 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2704 ISD::SETUGT);
2705
2706 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2707 DAG.getBasicBlock(B.Default));
2708 }
2709
2710 // Avoid emitting unnecessary branches to the next block.
2711 if (MBB != NextBlock(SwitchBB))
2712 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2713
2714 DAG.setRoot(Root);
2715 }
2716
2717 /// visitBitTestCase - this function produces one "bit test"
visitBitTestCase(BitTestBlock & BB,MachineBasicBlock * NextMBB,BranchProbability BranchProbToNext,unsigned Reg,BitTestCase & B,MachineBasicBlock * SwitchBB)2718 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2719 MachineBasicBlock* NextMBB,
2720 BranchProbability BranchProbToNext,
2721 unsigned Reg,
2722 BitTestCase &B,
2723 MachineBasicBlock *SwitchBB) {
2724 SDLoc dl = getCurSDLoc();
2725 MVT VT = BB.RegVT;
2726 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2727 SDValue Cmp;
2728 unsigned PopCount = countPopulation(B.Mask);
2729 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2730 if (PopCount == 1) {
2731 // Testing for a single bit; just compare the shift count with what it
2732 // would need to be to shift a 1 bit in that position.
2733 Cmp = DAG.getSetCC(
2734 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2735 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2736 ISD::SETEQ);
2737 } else if (PopCount == BB.Range) {
2738 // There is only one zero bit in the range, test for it directly.
2739 Cmp = DAG.getSetCC(
2740 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2741 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2742 ISD::SETNE);
2743 } else {
2744 // Make desired shift
2745 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2746 DAG.getConstant(1, dl, VT), ShiftOp);
2747
2748 // Emit bit tests and jumps
2749 SDValue AndOp = DAG.getNode(ISD::AND, dl,
2750 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2751 Cmp = DAG.getSetCC(
2752 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2753 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2754 }
2755
2756 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2757 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2758 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2759 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2760 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2761 // one as they are relative probabilities (and thus work more like weights),
2762 // and hence we need to normalize them to let the sum of them become one.
2763 SwitchBB->normalizeSuccProbs();
2764
2765 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2766 MVT::Other, getControlRoot(),
2767 Cmp, DAG.getBasicBlock(B.TargetBB));
2768
2769 // Avoid emitting unnecessary branches to the next block.
2770 if (NextMBB != NextBlock(SwitchBB))
2771 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2772 DAG.getBasicBlock(NextMBB));
2773
2774 DAG.setRoot(BrAnd);
2775 }
2776
visitInvoke(const InvokeInst & I)2777 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2778 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2779
2780 // Retrieve successors. Look through artificial IR level blocks like
2781 // catchswitch for successors.
2782 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2783 const BasicBlock *EHPadBB = I.getSuccessor(1);
2784
2785 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2786 // have to do anything here to lower funclet bundles.
2787 assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt,
2788 LLVMContext::OB_gc_transition,
2789 LLVMContext::OB_gc_live,
2790 LLVMContext::OB_funclet,
2791 LLVMContext::OB_cfguardtarget}) &&
2792 "Cannot lower invokes with arbitrary operand bundles yet!");
2793
2794 const Value *Callee(I.getCalledOperand());
2795 const Function *Fn = dyn_cast<Function>(Callee);
2796 if (isa<InlineAsm>(Callee))
2797 visitInlineAsm(I);
2798 else if (Fn && Fn->isIntrinsic()) {
2799 switch (Fn->getIntrinsicID()) {
2800 default:
2801 llvm_unreachable("Cannot invoke this intrinsic");
2802 case Intrinsic::donothing:
2803 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2804 break;
2805 case Intrinsic::experimental_patchpoint_void:
2806 case Intrinsic::experimental_patchpoint_i64:
2807 visitPatchpoint(I, EHPadBB);
2808 break;
2809 case Intrinsic::experimental_gc_statepoint:
2810 LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
2811 break;
2812 case Intrinsic::wasm_rethrow_in_catch: {
2813 // This is usually done in visitTargetIntrinsic, but this intrinsic is
2814 // special because it can be invoked, so we manually lower it to a DAG
2815 // node here.
2816 SmallVector<SDValue, 8> Ops;
2817 Ops.push_back(getRoot()); // inchain
2818 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2819 Ops.push_back(
2820 DAG.getTargetConstant(Intrinsic::wasm_rethrow_in_catch, getCurSDLoc(),
2821 TLI.getPointerTy(DAG.getDataLayout())));
2822 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
2823 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
2824 break;
2825 }
2826 }
2827 } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2828 // Currently we do not lower any intrinsic calls with deopt operand bundles.
2829 // Eventually we will support lowering the @llvm.experimental.deoptimize
2830 // intrinsic, and right now there are no plans to support other intrinsics
2831 // with deopt state.
2832 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2833 } else {
2834 LowerCallTo(I, getValue(Callee), false, EHPadBB);
2835 }
2836
2837 // If the value of the invoke is used outside of its defining block, make it
2838 // available as a virtual register.
2839 // We already took care of the exported value for the statepoint instruction
2840 // during call to the LowerStatepoint.
2841 if (!isa<GCStatepointInst>(I)) {
2842 CopyToExportRegsIfNeeded(&I);
2843 }
2844
2845 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2846 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2847 BranchProbability EHPadBBProb =
2848 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2849 : BranchProbability::getZero();
2850 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2851
2852 // Update successor info.
2853 addSuccessorWithProb(InvokeMBB, Return);
2854 for (auto &UnwindDest : UnwindDests) {
2855 UnwindDest.first->setIsEHPad();
2856 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2857 }
2858 InvokeMBB->normalizeSuccProbs();
2859
2860 // Drop into normal successor.
2861 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
2862 DAG.getBasicBlock(Return)));
2863 }
2864
visitCallBr(const CallBrInst & I)2865 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
2866 MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
2867
2868 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2869 // have to do anything here to lower funclet bundles.
2870 assert(!I.hasOperandBundlesOtherThan(
2871 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2872 "Cannot lower callbrs with arbitrary operand bundles yet!");
2873
2874 assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
2875 visitInlineAsm(I);
2876 CopyToExportRegsIfNeeded(&I);
2877
2878 // Retrieve successors.
2879 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
2880
2881 // Update successor info.
2882 addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
2883 for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
2884 MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)];
2885 addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
2886 Target->setIsInlineAsmBrIndirectTarget();
2887 }
2888 CallBrMBB->normalizeSuccProbs();
2889
2890 // Drop into default successor.
2891 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2892 MVT::Other, getControlRoot(),
2893 DAG.getBasicBlock(Return)));
2894 }
2895
visitResume(const ResumeInst & RI)2896 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2897 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2898 }
2899
visitLandingPad(const LandingPadInst & LP)2900 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2901 assert(FuncInfo.MBB->isEHPad() &&
2902 "Call to landingpad not in landing pad!");
2903
2904 // If there aren't registers to copy the values into (e.g., during SjLj
2905 // exceptions), then don't bother to create these DAG nodes.
2906 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2907 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2908 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2909 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2910 return;
2911
2912 // If landingpad's return type is token type, we don't create DAG nodes
2913 // for its exception pointer and selector value. The extraction of exception
2914 // pointer or selector value from token type landingpads is not currently
2915 // supported.
2916 if (LP.getType()->isTokenTy())
2917 return;
2918
2919 SmallVector<EVT, 2> ValueVTs;
2920 SDLoc dl = getCurSDLoc();
2921 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2922 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2923
2924 // Get the two live-in registers as SDValues. The physregs have already been
2925 // copied into virtual registers.
2926 SDValue Ops[2];
2927 if (FuncInfo.ExceptionPointerVirtReg) {
2928 Ops[0] = DAG.getZExtOrTrunc(
2929 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2930 FuncInfo.ExceptionPointerVirtReg,
2931 TLI.getPointerTy(DAG.getDataLayout())),
2932 dl, ValueVTs[0]);
2933 } else {
2934 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2935 }
2936 Ops[1] = DAG.getZExtOrTrunc(
2937 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2938 FuncInfo.ExceptionSelectorVirtReg,
2939 TLI.getPointerTy(DAG.getDataLayout())),
2940 dl, ValueVTs[1]);
2941
2942 // Merge into one.
2943 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2944 DAG.getVTList(ValueVTs), Ops);
2945 setValue(&LP, Res);
2946 }
2947
UpdateSplitBlock(MachineBasicBlock * First,MachineBasicBlock * Last)2948 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2949 MachineBasicBlock *Last) {
2950 // Update JTCases.
2951 for (unsigned i = 0, e = SL->JTCases.size(); i != e; ++i)
2952 if (SL->JTCases[i].first.HeaderBB == First)
2953 SL->JTCases[i].first.HeaderBB = Last;
2954
2955 // Update BitTestCases.
2956 for (unsigned i = 0, e = SL->BitTestCases.size(); i != e; ++i)
2957 if (SL->BitTestCases[i].Parent == First)
2958 SL->BitTestCases[i].Parent = Last;
2959 }
2960
visitIndirectBr(const IndirectBrInst & I)2961 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2962 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2963
2964 // Update machine-CFG edges with unique successors.
2965 SmallSet<BasicBlock*, 32> Done;
2966 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2967 BasicBlock *BB = I.getSuccessor(i);
2968 bool Inserted = Done.insert(BB).second;
2969 if (!Inserted)
2970 continue;
2971
2972 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2973 addSuccessorWithProb(IndirectBrMBB, Succ);
2974 }
2975 IndirectBrMBB->normalizeSuccProbs();
2976
2977 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2978 MVT::Other, getControlRoot(),
2979 getValue(I.getAddress())));
2980 }
2981
visitUnreachable(const UnreachableInst & I)2982 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2983 if (!DAG.getTarget().Options.TrapUnreachable)
2984 return;
2985
2986 // We may be able to ignore unreachable behind a noreturn call.
2987 if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2988 const BasicBlock &BB = *I.getParent();
2989 if (&I != &BB.front()) {
2990 BasicBlock::const_iterator PredI =
2991 std::prev(BasicBlock::const_iterator(&I));
2992 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2993 if (Call->doesNotReturn())
2994 return;
2995 }
2996 }
2997 }
2998
2999 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3000 }
3001
visitFSub(const User & I)3002 void SelectionDAGBuilder::visitFSub(const User &I) {
3003 // -0.0 - X --> fneg
3004 Type *Ty = I.getType();
3005 if (isa<Constant>(I.getOperand(0)) &&
3006 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
3007 SDValue Op2 = getValue(I.getOperand(1));
3008 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
3009 Op2.getValueType(), Op2));
3010 return;
3011 }
3012
3013 visitBinary(I, ISD::FSUB);
3014 }
3015
visitUnary(const User & I,unsigned Opcode)3016 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3017 SDNodeFlags Flags;
3018
3019 SDValue Op = getValue(I.getOperand(0));
3020 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3021 Op, Flags);
3022 setValue(&I, UnNodeValue);
3023 }
3024
visitBinary(const User & I,unsigned Opcode)3025 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3026 SDNodeFlags Flags;
3027 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3028 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3029 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3030 }
3031 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
3032 Flags.setExact(ExactOp->isExact());
3033 }
3034
3035 SDValue Op1 = getValue(I.getOperand(0));
3036 SDValue Op2 = getValue(I.getOperand(1));
3037 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3038 Op1, Op2, Flags);
3039 setValue(&I, BinNodeValue);
3040 }
3041
visitShift(const User & I,unsigned Opcode)3042 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3043 SDValue Op1 = getValue(I.getOperand(0));
3044 SDValue Op2 = getValue(I.getOperand(1));
3045
3046 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3047 Op1.getValueType(), DAG.getDataLayout());
3048
3049 // Coerce the shift amount to the right type if we can.
3050 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3051 unsigned ShiftSize = ShiftTy.getSizeInBits();
3052 unsigned Op2Size = Op2.getValueSizeInBits();
3053 SDLoc DL = getCurSDLoc();
3054
3055 // If the operand is smaller than the shift count type, promote it.
3056 if (ShiftSize > Op2Size)
3057 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
3058
3059 // If the operand is larger than the shift count type but the shift
3060 // count type has enough bits to represent any shift value, truncate
3061 // it now. This is a common case and it exposes the truncate to
3062 // optimization early.
3063 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
3064 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
3065 // Otherwise we'll need to temporarily settle for some other convenient
3066 // type. Type legalization will make adjustments once the shiftee is split.
3067 else
3068 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
3069 }
3070
3071 bool nuw = false;
3072 bool nsw = false;
3073 bool exact = false;
3074
3075 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3076
3077 if (const OverflowingBinaryOperator *OFBinOp =
3078 dyn_cast<const OverflowingBinaryOperator>(&I)) {
3079 nuw = OFBinOp->hasNoUnsignedWrap();
3080 nsw = OFBinOp->hasNoSignedWrap();
3081 }
3082 if (const PossiblyExactOperator *ExactOp =
3083 dyn_cast<const PossiblyExactOperator>(&I))
3084 exact = ExactOp->isExact();
3085 }
3086 SDNodeFlags Flags;
3087 Flags.setExact(exact);
3088 Flags.setNoSignedWrap(nsw);
3089 Flags.setNoUnsignedWrap(nuw);
3090 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3091 Flags);
3092 setValue(&I, Res);
3093 }
3094
visitSDiv(const User & I)3095 void SelectionDAGBuilder::visitSDiv(const User &I) {
3096 SDValue Op1 = getValue(I.getOperand(0));
3097 SDValue Op2 = getValue(I.getOperand(1));
3098
3099 SDNodeFlags Flags;
3100 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3101 cast<PossiblyExactOperator>(&I)->isExact());
3102 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3103 Op2, Flags));
3104 }
3105
visitICmp(const User & I)3106 void SelectionDAGBuilder::visitICmp(const User &I) {
3107 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3108 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3109 predicate = IC->getPredicate();
3110 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3111 predicate = ICmpInst::Predicate(IC->getPredicate());
3112 SDValue Op1 = getValue(I.getOperand(0));
3113 SDValue Op2 = getValue(I.getOperand(1));
3114 ISD::CondCode Opcode = getICmpCondCode(predicate);
3115
3116 auto &TLI = DAG.getTargetLoweringInfo();
3117 EVT MemVT =
3118 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3119
3120 // If a pointer's DAG type is larger than its memory type then the DAG values
3121 // are zero-extended. This breaks signed comparisons so truncate back to the
3122 // underlying type before doing the compare.
3123 if (Op1.getValueType() != MemVT) {
3124 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3125 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3126 }
3127
3128 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3129 I.getType());
3130 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3131 }
3132
visitFCmp(const User & I)3133 void SelectionDAGBuilder::visitFCmp(const User &I) {
3134 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3135 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3136 predicate = FC->getPredicate();
3137 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3138 predicate = FCmpInst::Predicate(FC->getPredicate());
3139 SDValue Op1 = getValue(I.getOperand(0));
3140 SDValue Op2 = getValue(I.getOperand(1));
3141
3142 ISD::CondCode Condition = getFCmpCondCode(predicate);
3143 auto *FPMO = dyn_cast<FPMathOperator>(&I);
3144 if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
3145 Condition = getFCmpCodeWithoutNaN(Condition);
3146
3147 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3148 I.getType());
3149 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3150 }
3151
3152 // Check if the condition of the select has one use or two users that are both
3153 // selects with the same condition.
hasOnlySelectUsers(const Value * Cond)3154 static bool hasOnlySelectUsers(const Value *Cond) {
3155 return llvm::all_of(Cond->users(), [](const Value *V) {
3156 return isa<SelectInst>(V);
3157 });
3158 }
3159
visitSelect(const User & I)3160 void SelectionDAGBuilder::visitSelect(const User &I) {
3161 SmallVector<EVT, 4> ValueVTs;
3162 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3163 ValueVTs);
3164 unsigned NumValues = ValueVTs.size();
3165 if (NumValues == 0) return;
3166
3167 SmallVector<SDValue, 4> Values(NumValues);
3168 SDValue Cond = getValue(I.getOperand(0));
3169 SDValue LHSVal = getValue(I.getOperand(1));
3170 SDValue RHSVal = getValue(I.getOperand(2));
3171 SmallVector<SDValue, 1> BaseOps(1, Cond);
3172 ISD::NodeType OpCode =
3173 Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3174
3175 bool IsUnaryAbs = false;
3176
3177 // Min/max matching is only viable if all output VTs are the same.
3178 if (is_splat(ValueVTs)) {
3179 EVT VT = ValueVTs[0];
3180 LLVMContext &Ctx = *DAG.getContext();
3181 auto &TLI = DAG.getTargetLoweringInfo();
3182
3183 // We care about the legality of the operation after it has been type
3184 // legalized.
3185 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3186 VT = TLI.getTypeToTransformTo(Ctx, VT);
3187
3188 // If the vselect is legal, assume we want to leave this as a vector setcc +
3189 // vselect. Otherwise, if this is going to be scalarized, we want to see if
3190 // min/max is legal on the scalar type.
3191 bool UseScalarMinMax = VT.isVector() &&
3192 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3193
3194 Value *LHS, *RHS;
3195 auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3196 ISD::NodeType Opc = ISD::DELETED_NODE;
3197 switch (SPR.Flavor) {
3198 case SPF_UMAX: Opc = ISD::UMAX; break;
3199 case SPF_UMIN: Opc = ISD::UMIN; break;
3200 case SPF_SMAX: Opc = ISD::SMAX; break;
3201 case SPF_SMIN: Opc = ISD::SMIN; break;
3202 case SPF_FMINNUM:
3203 switch (SPR.NaNBehavior) {
3204 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3205 case SPNB_RETURNS_NAN: Opc = ISD::FMINIMUM; break;
3206 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3207 case SPNB_RETURNS_ANY: {
3208 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
3209 Opc = ISD::FMINNUM;
3210 else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
3211 Opc = ISD::FMINIMUM;
3212 else if (UseScalarMinMax)
3213 Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
3214 ISD::FMINNUM : ISD::FMINIMUM;
3215 break;
3216 }
3217 }
3218 break;
3219 case SPF_FMAXNUM:
3220 switch (SPR.NaNBehavior) {
3221 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3222 case SPNB_RETURNS_NAN: Opc = ISD::FMAXIMUM; break;
3223 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3224 case SPNB_RETURNS_ANY:
3225
3226 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3227 Opc = ISD::FMAXNUM;
3228 else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
3229 Opc = ISD::FMAXIMUM;
3230 else if (UseScalarMinMax)
3231 Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3232 ISD::FMAXNUM : ISD::FMAXIMUM;
3233 break;
3234 }
3235 break;
3236 case SPF_ABS:
3237 IsUnaryAbs = true;
3238 Opc = ISD::ABS;
3239 break;
3240 case SPF_NABS:
3241 // TODO: we need to produce sub(0, abs(X)).
3242 default: break;
3243 }
3244
3245 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3246 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3247 (UseScalarMinMax &&
3248 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3249 // If the underlying comparison instruction is used by any other
3250 // instruction, the consumed instructions won't be destroyed, so it is
3251 // not profitable to convert to a min/max.
3252 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3253 OpCode = Opc;
3254 LHSVal = getValue(LHS);
3255 RHSVal = getValue(RHS);
3256 BaseOps.clear();
3257 }
3258
3259 if (IsUnaryAbs) {
3260 OpCode = Opc;
3261 LHSVal = getValue(LHS);
3262 BaseOps.clear();
3263 }
3264 }
3265
3266 if (IsUnaryAbs) {
3267 for (unsigned i = 0; i != NumValues; ++i) {
3268 Values[i] =
3269 DAG.getNode(OpCode, getCurSDLoc(),
3270 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i),
3271 SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3272 }
3273 } else {
3274 for (unsigned i = 0; i != NumValues; ++i) {
3275 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3276 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3277 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3278 Values[i] = DAG.getNode(
3279 OpCode, getCurSDLoc(),
3280 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops);
3281 }
3282 }
3283
3284 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3285 DAG.getVTList(ValueVTs), Values));
3286 }
3287
visitTrunc(const User & I)3288 void SelectionDAGBuilder::visitTrunc(const User &I) {
3289 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3290 SDValue N = getValue(I.getOperand(0));
3291 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3292 I.getType());
3293 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3294 }
3295
visitZExt(const User & I)3296 void SelectionDAGBuilder::visitZExt(const User &I) {
3297 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3298 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3299 SDValue N = getValue(I.getOperand(0));
3300 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3301 I.getType());
3302 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3303 }
3304
visitSExt(const User & I)3305 void SelectionDAGBuilder::visitSExt(const User &I) {
3306 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3307 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3308 SDValue N = getValue(I.getOperand(0));
3309 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3310 I.getType());
3311 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3312 }
3313
visitFPTrunc(const User & I)3314 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3315 // FPTrunc is never a no-op cast, no need to check
3316 SDValue N = getValue(I.getOperand(0));
3317 SDLoc dl = getCurSDLoc();
3318 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3319 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3320 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3321 DAG.getTargetConstant(
3322 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3323 }
3324
visitFPExt(const User & I)3325 void SelectionDAGBuilder::visitFPExt(const User &I) {
3326 // FPExt is never a no-op cast, no need to check
3327 SDValue N = getValue(I.getOperand(0));
3328 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3329 I.getType());
3330 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3331 }
3332
visitFPToUI(const User & I)3333 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3334 // FPToUI is never a no-op cast, no need to check
3335 SDValue N = getValue(I.getOperand(0));
3336 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3337 I.getType());
3338 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3339 }
3340
visitFPToSI(const User & I)3341 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3342 // FPToSI is never a no-op cast, no need to check
3343 SDValue N = getValue(I.getOperand(0));
3344 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3345 I.getType());
3346 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3347 }
3348
visitUIToFP(const User & I)3349 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3350 // UIToFP is never a no-op cast, no need to check
3351 SDValue N = getValue(I.getOperand(0));
3352 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3353 I.getType());
3354 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3355 }
3356
visitSIToFP(const User & I)3357 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3358 // SIToFP is never a no-op cast, no need to check
3359 SDValue N = getValue(I.getOperand(0));
3360 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3361 I.getType());
3362 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3363 }
3364
visitPtrToInt(const User & I)3365 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3366 // What to do depends on the size of the integer and the size of the pointer.
3367 // We can either truncate, zero extend, or no-op, accordingly.
3368 SDValue N = getValue(I.getOperand(0));
3369 auto &TLI = DAG.getTargetLoweringInfo();
3370 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3371 I.getType());
3372 EVT PtrMemVT =
3373 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3374 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3375 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3376 setValue(&I, N);
3377 }
3378
visitIntToPtr(const User & I)3379 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3380 // What to do depends on the size of the integer and the size of the pointer.
3381 // We can either truncate, zero extend, or no-op, accordingly.
3382 SDValue N = getValue(I.getOperand(0));
3383 auto &TLI = DAG.getTargetLoweringInfo();
3384 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3385 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3386 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3387 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3388 setValue(&I, N);
3389 }
3390
visitBitCast(const User & I)3391 void SelectionDAGBuilder::visitBitCast(const User &I) {
3392 SDValue N = getValue(I.getOperand(0));
3393 SDLoc dl = getCurSDLoc();
3394 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3395 I.getType());
3396
3397 // BitCast assures us that source and destination are the same size so this is
3398 // either a BITCAST or a no-op.
3399 if (DestVT != N.getValueType())
3400 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3401 DestVT, N)); // convert types.
3402 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3403 // might fold any kind of constant expression to an integer constant and that
3404 // is not what we are looking for. Only recognize a bitcast of a genuine
3405 // constant integer as an opaque constant.
3406 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3407 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3408 /*isOpaque*/true));
3409 else
3410 setValue(&I, N); // noop cast.
3411 }
3412
visitAddrSpaceCast(const User & I)3413 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3414 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3415 const Value *SV = I.getOperand(0);
3416 SDValue N = getValue(SV);
3417 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3418
3419 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3420 unsigned DestAS = I.getType()->getPointerAddressSpace();
3421
3422 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3423 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3424
3425 setValue(&I, N);
3426 }
3427
visitInsertElement(const User & I)3428 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3429 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3430 SDValue InVec = getValue(I.getOperand(0));
3431 SDValue InVal = getValue(I.getOperand(1));
3432 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3433 TLI.getVectorIdxTy(DAG.getDataLayout()));
3434 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3435 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3436 InVec, InVal, InIdx));
3437 }
3438
visitExtractElement(const User & I)3439 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3440 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3441 SDValue InVec = getValue(I.getOperand(0));
3442 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3443 TLI.getVectorIdxTy(DAG.getDataLayout()));
3444 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3445 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3446 InVec, InIdx));
3447 }
3448
visitShuffleVector(const User & I)3449 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3450 SDValue Src1 = getValue(I.getOperand(0));
3451 SDValue Src2 = getValue(I.getOperand(1));
3452 ArrayRef<int> Mask;
3453 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3454 Mask = SVI->getShuffleMask();
3455 else
3456 Mask = cast<ConstantExpr>(I).getShuffleMask();
3457 SDLoc DL = getCurSDLoc();
3458 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3459 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3460 EVT SrcVT = Src1.getValueType();
3461
3462 if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3463 VT.isScalableVector()) {
3464 // Canonical splat form of first element of first input vector.
3465 SDValue FirstElt =
3466 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3467 DAG.getVectorIdxConstant(0, DL));
3468 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3469 return;
3470 }
3471
3472 // For now, we only handle splats for scalable vectors.
3473 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3474 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3475 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3476
3477 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3478 unsigned MaskNumElts = Mask.size();
3479
3480 if (SrcNumElts == MaskNumElts) {
3481 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3482 return;
3483 }
3484
3485 // Normalize the shuffle vector since mask and vector length don't match.
3486 if (SrcNumElts < MaskNumElts) {
3487 // Mask is longer than the source vectors. We can use concatenate vector to
3488 // make the mask and vectors lengths match.
3489
3490 if (MaskNumElts % SrcNumElts == 0) {
3491 // Mask length is a multiple of the source vector length.
3492 // Check if the shuffle is some kind of concatenation of the input
3493 // vectors.
3494 unsigned NumConcat = MaskNumElts / SrcNumElts;
3495 bool IsConcat = true;
3496 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3497 for (unsigned i = 0; i != MaskNumElts; ++i) {
3498 int Idx = Mask[i];
3499 if (Idx < 0)
3500 continue;
3501 // Ensure the indices in each SrcVT sized piece are sequential and that
3502 // the same source is used for the whole piece.
3503 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3504 (ConcatSrcs[i / SrcNumElts] >= 0 &&
3505 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3506 IsConcat = false;
3507 break;
3508 }
3509 // Remember which source this index came from.
3510 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3511 }
3512
3513 // The shuffle is concatenating multiple vectors together. Just emit
3514 // a CONCAT_VECTORS operation.
3515 if (IsConcat) {
3516 SmallVector<SDValue, 8> ConcatOps;
3517 for (auto Src : ConcatSrcs) {
3518 if (Src < 0)
3519 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3520 else if (Src == 0)
3521 ConcatOps.push_back(Src1);
3522 else
3523 ConcatOps.push_back(Src2);
3524 }
3525 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3526 return;
3527 }
3528 }
3529
3530 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3531 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3532 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3533 PaddedMaskNumElts);
3534
3535 // Pad both vectors with undefs to make them the same length as the mask.
3536 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3537
3538 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3539 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3540 MOps1[0] = Src1;
3541 MOps2[0] = Src2;
3542
3543 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3544 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3545
3546 // Readjust mask for new input vector length.
3547 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3548 for (unsigned i = 0; i != MaskNumElts; ++i) {
3549 int Idx = Mask[i];
3550 if (Idx >= (int)SrcNumElts)
3551 Idx -= SrcNumElts - PaddedMaskNumElts;
3552 MappedOps[i] = Idx;
3553 }
3554
3555 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3556
3557 // If the concatenated vector was padded, extract a subvector with the
3558 // correct number of elements.
3559 if (MaskNumElts != PaddedMaskNumElts)
3560 Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3561 DAG.getVectorIdxConstant(0, DL));
3562
3563 setValue(&I, Result);
3564 return;
3565 }
3566
3567 if (SrcNumElts > MaskNumElts) {
3568 // Analyze the access pattern of the vector to see if we can extract
3569 // two subvectors and do the shuffle.
3570 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from
3571 bool CanExtract = true;
3572 for (int Idx : Mask) {
3573 unsigned Input = 0;
3574 if (Idx < 0)
3575 continue;
3576
3577 if (Idx >= (int)SrcNumElts) {
3578 Input = 1;
3579 Idx -= SrcNumElts;
3580 }
3581
3582 // If all the indices come from the same MaskNumElts sized portion of
3583 // the sources we can use extract. Also make sure the extract wouldn't
3584 // extract past the end of the source.
3585 int NewStartIdx = alignDown(Idx, MaskNumElts);
3586 if (NewStartIdx + MaskNumElts > SrcNumElts ||
3587 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3588 CanExtract = false;
3589 // Make sure we always update StartIdx as we use it to track if all
3590 // elements are undef.
3591 StartIdx[Input] = NewStartIdx;
3592 }
3593
3594 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3595 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3596 return;
3597 }
3598 if (CanExtract) {
3599 // Extract appropriate subvector and generate a vector shuffle
3600 for (unsigned Input = 0; Input < 2; ++Input) {
3601 SDValue &Src = Input == 0 ? Src1 : Src2;
3602 if (StartIdx[Input] < 0)
3603 Src = DAG.getUNDEF(VT);
3604 else {
3605 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3606 DAG.getVectorIdxConstant(StartIdx[Input], DL));
3607 }
3608 }
3609
3610 // Calculate new mask.
3611 SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3612 for (int &Idx : MappedOps) {
3613 if (Idx >= (int)SrcNumElts)
3614 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3615 else if (Idx >= 0)
3616 Idx -= StartIdx[0];
3617 }
3618
3619 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3620 return;
3621 }
3622 }
3623
3624 // We can't use either concat vectors or extract subvectors so fall back to
3625 // replacing the shuffle with extract and build vector.
3626 // to insert and build vector.
3627 EVT EltVT = VT.getVectorElementType();
3628 SmallVector<SDValue,8> Ops;
3629 for (int Idx : Mask) {
3630 SDValue Res;
3631
3632 if (Idx < 0) {
3633 Res = DAG.getUNDEF(EltVT);
3634 } else {
3635 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3636 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3637
3638 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
3639 DAG.getVectorIdxConstant(Idx, DL));
3640 }
3641
3642 Ops.push_back(Res);
3643 }
3644
3645 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3646 }
3647
visitInsertValue(const User & I)3648 void SelectionDAGBuilder::visitInsertValue(const User &I) {
3649 ArrayRef<unsigned> Indices;
3650 if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3651 Indices = IV->getIndices();
3652 else
3653 Indices = cast<ConstantExpr>(&I)->getIndices();
3654
3655 const Value *Op0 = I.getOperand(0);
3656 const Value *Op1 = I.getOperand(1);
3657 Type *AggTy = I.getType();
3658 Type *ValTy = Op1->getType();
3659 bool IntoUndef = isa<UndefValue>(Op0);
3660 bool FromUndef = isa<UndefValue>(Op1);
3661
3662 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3663
3664 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3665 SmallVector<EVT, 4> AggValueVTs;
3666 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3667 SmallVector<EVT, 4> ValValueVTs;
3668 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3669
3670 unsigned NumAggValues = AggValueVTs.size();
3671 unsigned NumValValues = ValValueVTs.size();
3672 SmallVector<SDValue, 4> Values(NumAggValues);
3673
3674 // Ignore an insertvalue that produces an empty object
3675 if (!NumAggValues) {
3676 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3677 return;
3678 }
3679
3680 SDValue Agg = getValue(Op0);
3681 unsigned i = 0;
3682 // Copy the beginning value(s) from the original aggregate.
3683 for (; i != LinearIndex; ++i)
3684 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3685 SDValue(Agg.getNode(), Agg.getResNo() + i);
3686 // Copy values from the inserted value(s).
3687 if (NumValValues) {
3688 SDValue Val = getValue(Op1);
3689 for (; i != LinearIndex + NumValValues; ++i)
3690 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3691 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3692 }
3693 // Copy remaining value(s) from the original aggregate.
3694 for (; i != NumAggValues; ++i)
3695 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3696 SDValue(Agg.getNode(), Agg.getResNo() + i);
3697
3698 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3699 DAG.getVTList(AggValueVTs), Values));
3700 }
3701
visitExtractValue(const User & I)3702 void SelectionDAGBuilder::visitExtractValue(const User &I) {
3703 ArrayRef<unsigned> Indices;
3704 if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3705 Indices = EV->getIndices();
3706 else
3707 Indices = cast<ConstantExpr>(&I)->getIndices();
3708
3709 const Value *Op0 = I.getOperand(0);
3710 Type *AggTy = Op0->getType();
3711 Type *ValTy = I.getType();
3712 bool OutOfUndef = isa<UndefValue>(Op0);
3713
3714 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3715
3716 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3717 SmallVector<EVT, 4> ValValueVTs;
3718 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3719
3720 unsigned NumValValues = ValValueVTs.size();
3721
3722 // Ignore a extractvalue that produces an empty object
3723 if (!NumValValues) {
3724 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3725 return;
3726 }
3727
3728 SmallVector<SDValue, 4> Values(NumValValues);
3729
3730 SDValue Agg = getValue(Op0);
3731 // Copy out the selected value(s).
3732 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3733 Values[i - LinearIndex] =
3734 OutOfUndef ?
3735 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3736 SDValue(Agg.getNode(), Agg.getResNo() + i);
3737
3738 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3739 DAG.getVTList(ValValueVTs), Values));
3740 }
3741
visitGetElementPtr(const User & I)3742 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3743 Value *Op0 = I.getOperand(0);
3744 // Note that the pointer operand may be a vector of pointers. Take the scalar
3745 // element which holds a pointer.
3746 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3747 SDValue N = getValue(Op0);
3748 SDLoc dl = getCurSDLoc();
3749 auto &TLI = DAG.getTargetLoweringInfo();
3750 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
3751 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
3752
3753 // Normalize Vector GEP - all scalar operands should be converted to the
3754 // splat vector.
3755 bool IsVectorGEP = I.getType()->isVectorTy();
3756 ElementCount VectorElementCount =
3757 IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
3758 : ElementCount(0, false);
3759
3760 if (IsVectorGEP && !N.getValueType().isVector()) {
3761 LLVMContext &Context = *DAG.getContext();
3762 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
3763 if (VectorElementCount.Scalable)
3764 N = DAG.getSplatVector(VT, dl, N);
3765 else
3766 N = DAG.getSplatBuildVector(VT, dl, N);
3767 }
3768
3769 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3770 GTI != E; ++GTI) {
3771 const Value *Idx = GTI.getOperand();
3772 if (StructType *StTy = GTI.getStructTypeOrNull()) {
3773 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3774 if (Field) {
3775 // N = N + Offset
3776 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3777
3778 // In an inbounds GEP with an offset that is nonnegative even when
3779 // interpreted as signed, assume there is no unsigned overflow.
3780 SDNodeFlags Flags;
3781 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3782 Flags.setNoUnsignedWrap(true);
3783
3784 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3785 DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3786 }
3787 } else {
3788 // IdxSize is the width of the arithmetic according to IR semantics.
3789 // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
3790 // (and fix up the result later).
3791 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3792 MVT IdxTy = MVT::getIntegerVT(IdxSize);
3793 TypeSize ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
3794 // We intentionally mask away the high bits here; ElementSize may not
3795 // fit in IdxTy.
3796 APInt ElementMul(IdxSize, ElementSize.getKnownMinSize());
3797 bool ElementScalable = ElementSize.isScalable();
3798
3799 // If this is a scalar constant or a splat vector of constants,
3800 // handle it quickly.
3801 const auto *C = dyn_cast<Constant>(Idx);
3802 if (C && isa<VectorType>(C->getType()))
3803 C = C->getSplatValue();
3804
3805 const auto *CI = dyn_cast_or_null<ConstantInt>(C);
3806 if (CI && CI->isZero())
3807 continue;
3808 if (CI && !ElementScalable) {
3809 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
3810 LLVMContext &Context = *DAG.getContext();
3811 SDValue OffsVal;
3812 if (IsVectorGEP)
3813 OffsVal = DAG.getConstant(
3814 Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
3815 else
3816 OffsVal = DAG.getConstant(Offs, dl, IdxTy);
3817
3818 // In an inbounds GEP with an offset that is nonnegative even when
3819 // interpreted as signed, assume there is no unsigned overflow.
3820 SDNodeFlags Flags;
3821 if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3822 Flags.setNoUnsignedWrap(true);
3823
3824 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
3825
3826 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3827 continue;
3828 }
3829
3830 // N = N + Idx * ElementMul;
3831 SDValue IdxN = getValue(Idx);
3832
3833 if (!IdxN.getValueType().isVector() && IsVectorGEP) {
3834 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
3835 VectorElementCount);
3836 if (VectorElementCount.Scalable)
3837 IdxN = DAG.getSplatVector(VT, dl, IdxN);
3838 else
3839 IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3840 }
3841
3842 // If the index is smaller or larger than intptr_t, truncate or extend
3843 // it.
3844 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3845
3846 if (ElementScalable) {
3847 EVT VScaleTy = N.getValueType().getScalarType();
3848 SDValue VScale = DAG.getNode(
3849 ISD::VSCALE, dl, VScaleTy,
3850 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
3851 if (IsVectorGEP)
3852 VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
3853 IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
3854 } else {
3855 // If this is a multiply by a power of two, turn it into a shl
3856 // immediately. This is a very common case.
3857 if (ElementMul != 1) {
3858 if (ElementMul.isPowerOf2()) {
3859 unsigned Amt = ElementMul.logBase2();
3860 IdxN = DAG.getNode(ISD::SHL, dl,
3861 N.getValueType(), IdxN,
3862 DAG.getConstant(Amt, dl, IdxN.getValueType()));
3863 } else {
3864 SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
3865 IdxN.getValueType());
3866 IdxN = DAG.getNode(ISD::MUL, dl,
3867 N.getValueType(), IdxN, Scale);
3868 }
3869 }
3870 }
3871
3872 N = DAG.getNode(ISD::ADD, dl,
3873 N.getValueType(), N, IdxN);
3874 }
3875 }
3876
3877 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
3878 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
3879
3880 setValue(&I, N);
3881 }
3882
visitAlloca(const AllocaInst & I)3883 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3884 // If this is a fixed sized alloca in the entry block of the function,
3885 // allocate it statically on the stack.
3886 if (FuncInfo.StaticAllocaMap.count(&I))
3887 return; // getValue will auto-populate this.
3888
3889 SDLoc dl = getCurSDLoc();
3890 Type *Ty = I.getAllocatedType();
3891 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3892 auto &DL = DAG.getDataLayout();
3893 uint64_t TySize = DL.getTypeAllocSize(Ty);
3894 MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
3895
3896 SDValue AllocSize = getValue(I.getArraySize());
3897
3898 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
3899 if (AllocSize.getValueType() != IntPtr)
3900 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
3901
3902 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
3903 AllocSize,
3904 DAG.getConstant(TySize, dl, IntPtr));
3905
3906 // Handle alignment. If the requested alignment is less than or equal to
3907 // the stack alignment, ignore it. If the size is greater than or equal to
3908 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3909 Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
3910 if (*Alignment <= StackAlign)
3911 Alignment = None;
3912
3913 const uint64_t StackAlignMask = StackAlign.value() - 1U;
3914 // Round the size of the allocation up to the stack alignment size
3915 // by add SA-1 to the size. This doesn't overflow because we're computing
3916 // an address inside an alloca.
3917 SDNodeFlags Flags;
3918 Flags.setNoUnsignedWrap(true);
3919 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
3920 DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
3921
3922 // Mask out the low bits for alignment purposes.
3923 AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
3924 DAG.getConstant(~StackAlignMask, dl, IntPtr));
3925
3926 SDValue Ops[] = {
3927 getRoot(), AllocSize,
3928 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
3929 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3930 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
3931 setValue(&I, DSA);
3932 DAG.setRoot(DSA.getValue(1));
3933
3934 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
3935 }
3936
visitLoad(const LoadInst & I)3937 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3938 if (I.isAtomic())
3939 return visitAtomicLoad(I);
3940
3941 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3942 const Value *SV = I.getOperand(0);
3943 if (TLI.supportSwiftError()) {
3944 // Swifterror values can come from either a function parameter with
3945 // swifterror attribute or an alloca with swifterror attribute.
3946 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
3947 if (Arg->hasSwiftErrorAttr())
3948 return visitLoadFromSwiftError(I);
3949 }
3950
3951 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
3952 if (Alloca->isSwiftError())
3953 return visitLoadFromSwiftError(I);
3954 }
3955 }
3956
3957 SDValue Ptr = getValue(SV);
3958
3959 Type *Ty = I.getType();
3960 Align Alignment = I.getAlign();
3961
3962 AAMDNodes AAInfo;
3963 I.getAAMetadata(AAInfo);
3964 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3965
3966 SmallVector<EVT, 4> ValueVTs, MemVTs;
3967 SmallVector<uint64_t, 4> Offsets;
3968 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
3969 unsigned NumValues = ValueVTs.size();
3970 if (NumValues == 0)
3971 return;
3972
3973 bool isVolatile = I.isVolatile();
3974
3975 SDValue Root;
3976 bool ConstantMemory = false;
3977 if (isVolatile)
3978 // Serialize volatile loads with other side effects.
3979 Root = getRoot();
3980 else if (NumValues > MaxParallelChains)
3981 Root = getMemoryRoot();
3982 else if (AA &&
3983 AA->pointsToConstantMemory(MemoryLocation(
3984 SV,
3985 LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
3986 AAInfo))) {
3987 // Do not serialize (non-volatile) loads of constant memory with anything.
3988 Root = DAG.getEntryNode();
3989 ConstantMemory = true;
3990 } else {
3991 // Do not serialize non-volatile loads against each other.
3992 Root = DAG.getRoot();
3993 }
3994
3995 SDLoc dl = getCurSDLoc();
3996
3997 if (isVolatile)
3998 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
3999
4000 // An aggregate load cannot wrap around the address space, so offsets to its
4001 // parts don't wrap either.
4002 SDNodeFlags Flags;
4003 Flags.setNoUnsignedWrap(true);
4004
4005 SmallVector<SDValue, 4> Values(NumValues);
4006 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4007 EVT PtrVT = Ptr.getValueType();
4008
4009 MachineMemOperand::Flags MMOFlags
4010 = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4011
4012 unsigned ChainI = 0;
4013 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4014 // Serializing loads here may result in excessive register pressure, and
4015 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4016 // could recover a bit by hoisting nodes upward in the chain by recognizing
4017 // they are side-effect free or do not alias. The optimizer should really
4018 // avoid this case by converting large object/array copies to llvm.memcpy
4019 // (MaxParallelChains should always remain as failsafe).
4020 if (ChainI == MaxParallelChains) {
4021 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4022 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4023 makeArrayRef(Chains.data(), ChainI));
4024 Root = Chain;
4025 ChainI = 0;
4026 }
4027 SDValue A = DAG.getNode(ISD::ADD, dl,
4028 PtrVT, Ptr,
4029 DAG.getConstant(Offsets[i], dl, PtrVT),
4030 Flags);
4031
4032 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A,
4033 MachinePointerInfo(SV, Offsets[i]), Alignment,
4034 MMOFlags, AAInfo, Ranges);
4035 Chains[ChainI] = L.getValue(1);
4036
4037 if (MemVTs[i] != ValueVTs[i])
4038 L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]);
4039
4040 Values[i] = L;
4041 }
4042
4043 if (!ConstantMemory) {
4044 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4045 makeArrayRef(Chains.data(), ChainI));
4046 if (isVolatile)
4047 DAG.setRoot(Chain);
4048 else
4049 PendingLoads.push_back(Chain);
4050 }
4051
4052 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4053 DAG.getVTList(ValueVTs), Values));
4054 }
4055
visitStoreToSwiftError(const StoreInst & I)4056 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4057 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4058 "call visitStoreToSwiftError when backend supports swifterror");
4059
4060 SmallVector<EVT, 4> ValueVTs;
4061 SmallVector<uint64_t, 4> Offsets;
4062 const Value *SrcV = I.getOperand(0);
4063 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4064 SrcV->getType(), ValueVTs, &Offsets);
4065 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4066 "expect a single EVT for swifterror");
4067
4068 SDValue Src = getValue(SrcV);
4069 // Create a virtual register, then update the virtual register.
4070 Register VReg =
4071 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4072 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4073 // Chain can be getRoot or getControlRoot.
4074 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4075 SDValue(Src.getNode(), Src.getResNo()));
4076 DAG.setRoot(CopyNode);
4077 }
4078
visitLoadFromSwiftError(const LoadInst & I)4079 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4080 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4081 "call visitLoadFromSwiftError when backend supports swifterror");
4082
4083 assert(!I.isVolatile() &&
4084 !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4085 !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4086 "Support volatile, non temporal, invariant for load_from_swift_error");
4087
4088 const Value *SV = I.getOperand(0);
4089 Type *Ty = I.getType();
4090 AAMDNodes AAInfo;
4091 I.getAAMetadata(AAInfo);
4092 assert(
4093 (!AA ||
4094 !AA->pointsToConstantMemory(MemoryLocation(
4095 SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4096 AAInfo))) &&
4097 "load_from_swift_error should not be constant memory");
4098
4099 SmallVector<EVT, 4> ValueVTs;
4100 SmallVector<uint64_t, 4> Offsets;
4101 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4102 ValueVTs, &Offsets);
4103 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4104 "expect a single EVT for swifterror");
4105
4106 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4107 SDValue L = DAG.getCopyFromReg(
4108 getRoot(), getCurSDLoc(),
4109 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4110
4111 setValue(&I, L);
4112 }
4113
visitStore(const StoreInst & I)4114 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4115 if (I.isAtomic())
4116 return visitAtomicStore(I);
4117
4118 const Value *SrcV = I.getOperand(0);
4119 const Value *PtrV = I.getOperand(1);
4120
4121 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4122 if (TLI.supportSwiftError()) {
4123 // Swifterror values can come from either a function parameter with
4124 // swifterror attribute or an alloca with swifterror attribute.
4125 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4126 if (Arg->hasSwiftErrorAttr())
4127 return visitStoreToSwiftError(I);
4128 }
4129
4130 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4131 if (Alloca->isSwiftError())
4132 return visitStoreToSwiftError(I);
4133 }
4134 }
4135
4136 SmallVector<EVT, 4> ValueVTs, MemVTs;
4137 SmallVector<uint64_t, 4> Offsets;
4138 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4139 SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4140 unsigned NumValues = ValueVTs.size();
4141 if (NumValues == 0)
4142 return;
4143
4144 // Get the lowered operands. Note that we do this after
4145 // checking if NumResults is zero, because with zero results
4146 // the operands won't have values in the map.
4147 SDValue Src = getValue(SrcV);
4148 SDValue Ptr = getValue(PtrV);
4149
4150 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4151 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4152 SDLoc dl = getCurSDLoc();
4153 Align Alignment = I.getAlign();
4154 AAMDNodes AAInfo;
4155 I.getAAMetadata(AAInfo);
4156
4157 auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4158
4159 // An aggregate load cannot wrap around the address space, so offsets to its
4160 // parts don't wrap either.
4161 SDNodeFlags Flags;
4162 Flags.setNoUnsignedWrap(true);
4163
4164 unsigned ChainI = 0;
4165 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4166 // See visitLoad comments.
4167 if (ChainI == MaxParallelChains) {
4168 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4169 makeArrayRef(Chains.data(), ChainI));
4170 Root = Chain;
4171 ChainI = 0;
4172 }
4173 SDValue Add = DAG.getMemBasePlusOffset(Ptr, Offsets[i], dl, Flags);
4174 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4175 if (MemVTs[i] != ValueVTs[i])
4176 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4177 SDValue St =
4178 DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]),
4179 Alignment, MMOFlags, AAInfo);
4180 Chains[ChainI] = St;
4181 }
4182
4183 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4184 makeArrayRef(Chains.data(), ChainI));
4185 DAG.setRoot(StoreNode);
4186 }
4187
visitMaskedStore(const CallInst & I,bool IsCompressing)4188 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4189 bool IsCompressing) {
4190 SDLoc sdl = getCurSDLoc();
4191
4192 auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4193 MaybeAlign &Alignment) {
4194 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4195 Src0 = I.getArgOperand(0);
4196 Ptr = I.getArgOperand(1);
4197 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
4198 Mask = I.getArgOperand(3);
4199 };
4200 auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4201 MaybeAlign &Alignment) {
4202 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4203 Src0 = I.getArgOperand(0);
4204 Ptr = I.getArgOperand(1);
4205 Mask = I.getArgOperand(2);
4206 Alignment = None;
4207 };
4208
4209 Value *PtrOperand, *MaskOperand, *Src0Operand;
4210 MaybeAlign Alignment;
4211 if (IsCompressing)
4212 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4213 else
4214 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4215
4216 SDValue Ptr = getValue(PtrOperand);
4217 SDValue Src0 = getValue(Src0Operand);
4218 SDValue Mask = getValue(MaskOperand);
4219 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4220
4221 EVT VT = Src0.getValueType();
4222 if (!Alignment)
4223 Alignment = DAG.getEVTAlign(VT);
4224
4225 AAMDNodes AAInfo;
4226 I.getAAMetadata(AAInfo);
4227
4228 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4229 MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4230 // TODO: Make MachineMemOperands aware of scalable
4231 // vectors.
4232 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo);
4233 SDValue StoreNode =
4234 DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4235 ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4236 DAG.setRoot(StoreNode);
4237 setValue(&I, StoreNode);
4238 }
4239
4240 // Get a uniform base for the Gather/Scatter intrinsic.
4241 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4242 // We try to represent it as a base pointer + vector of indices.
4243 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4244 // The first operand of the GEP may be a single pointer or a vector of pointers
4245 // Example:
4246 // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4247 // or
4248 // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
4249 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4250 //
4251 // When the first GEP operand is a single pointer - it is the uniform base we
4252 // are looking for. If first operand of the GEP is a splat vector - we
4253 // extract the splat value and use it as a uniform base.
4254 // In all other cases the function returns 'false'.
getUniformBase(const Value * Ptr,SDValue & Base,SDValue & Index,ISD::MemIndexType & IndexType,SDValue & Scale,SelectionDAGBuilder * SDB,const BasicBlock * CurBB)4255 static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4256 ISD::MemIndexType &IndexType, SDValue &Scale,
4257 SelectionDAGBuilder *SDB, const BasicBlock *CurBB) {
4258 SelectionDAG& DAG = SDB->DAG;
4259 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4260 const DataLayout &DL = DAG.getDataLayout();
4261
4262 assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
4263
4264 // Handle splat constant pointer.
4265 if (auto *C = dyn_cast<Constant>(Ptr)) {
4266 C = C->getSplatValue();
4267 if (!C)
4268 return false;
4269
4270 Base = SDB->getValue(C);
4271
4272 unsigned NumElts = cast<FixedVectorType>(Ptr->getType())->getNumElements();
4273 EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4274 Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4275 IndexType = ISD::SIGNED_SCALED;
4276 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4277 return true;
4278 }
4279
4280 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4281 if (!GEP || GEP->getParent() != CurBB)
4282 return false;
4283
4284 if (GEP->getNumOperands() != 2)
4285 return false;
4286
4287 const Value *BasePtr = GEP->getPointerOperand();
4288 const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4289
4290 // Make sure the base is scalar and the index is a vector.
4291 if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4292 return false;
4293
4294 Base = SDB->getValue(BasePtr);
4295 Index = SDB->getValue(IndexVal);
4296 IndexType = ISD::SIGNED_SCALED;
4297 Scale = DAG.getTargetConstant(
4298 DL.getTypeAllocSize(GEP->getResultElementType()),
4299 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4300 return true;
4301 }
4302
visitMaskedScatter(const CallInst & I)4303 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4304 SDLoc sdl = getCurSDLoc();
4305
4306 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4307 const Value *Ptr = I.getArgOperand(1);
4308 SDValue Src0 = getValue(I.getArgOperand(0));
4309 SDValue Mask = getValue(I.getArgOperand(3));
4310 EVT VT = Src0.getValueType();
4311 Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4312 ->getMaybeAlignValue()
4313 .getValueOr(DAG.getEVTAlign(VT));
4314 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4315
4316 AAMDNodes AAInfo;
4317 I.getAAMetadata(AAInfo);
4318
4319 SDValue Base;
4320 SDValue Index;
4321 ISD::MemIndexType IndexType;
4322 SDValue Scale;
4323 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4324 I.getParent());
4325
4326 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4327 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4328 MachinePointerInfo(AS), MachineMemOperand::MOStore,
4329 // TODO: Make MachineMemOperands aware of scalable
4330 // vectors.
4331 MemoryLocation::UnknownSize, Alignment, AAInfo);
4332 if (!UniformBase) {
4333 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4334 Index = getValue(Ptr);
4335 IndexType = ISD::SIGNED_SCALED;
4336 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4337 }
4338 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4339 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4340 Ops, MMO, IndexType);
4341 DAG.setRoot(Scatter);
4342 setValue(&I, Scatter);
4343 }
4344
visitMaskedLoad(const CallInst & I,bool IsExpanding)4345 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4346 SDLoc sdl = getCurSDLoc();
4347
4348 auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4349 MaybeAlign &Alignment) {
4350 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4351 Ptr = I.getArgOperand(0);
4352 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4353 Mask = I.getArgOperand(2);
4354 Src0 = I.getArgOperand(3);
4355 };
4356 auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4357 MaybeAlign &Alignment) {
4358 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4359 Ptr = I.getArgOperand(0);
4360 Alignment = None;
4361 Mask = I.getArgOperand(1);
4362 Src0 = I.getArgOperand(2);
4363 };
4364
4365 Value *PtrOperand, *MaskOperand, *Src0Operand;
4366 MaybeAlign Alignment;
4367 if (IsExpanding)
4368 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4369 else
4370 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4371
4372 SDValue Ptr = getValue(PtrOperand);
4373 SDValue Src0 = getValue(Src0Operand);
4374 SDValue Mask = getValue(MaskOperand);
4375 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4376
4377 EVT VT = Src0.getValueType();
4378 if (!Alignment)
4379 Alignment = DAG.getEVTAlign(VT);
4380
4381 AAMDNodes AAInfo;
4382 I.getAAMetadata(AAInfo);
4383 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4384
4385 // Do not serialize masked loads of constant memory with anything.
4386 MemoryLocation ML;
4387 if (VT.isScalableVector())
4388 ML = MemoryLocation(PtrOperand);
4389 else
4390 ML = MemoryLocation(PtrOperand, LocationSize::precise(
4391 DAG.getDataLayout().getTypeStoreSize(I.getType())),
4392 AAInfo);
4393 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4394
4395 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4396
4397 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4398 MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4399 // TODO: Make MachineMemOperands aware of scalable
4400 // vectors.
4401 VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges);
4402
4403 SDValue Load =
4404 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4405 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4406 if (AddToChain)
4407 PendingLoads.push_back(Load.getValue(1));
4408 setValue(&I, Load);
4409 }
4410
visitMaskedGather(const CallInst & I)4411 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4412 SDLoc sdl = getCurSDLoc();
4413
4414 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4415 const Value *Ptr = I.getArgOperand(0);
4416 SDValue Src0 = getValue(I.getArgOperand(3));
4417 SDValue Mask = getValue(I.getArgOperand(2));
4418
4419 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4420 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4421 Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
4422 ->getMaybeAlignValue()
4423 .getValueOr(DAG.getEVTAlign(VT));
4424
4425 AAMDNodes AAInfo;
4426 I.getAAMetadata(AAInfo);
4427 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4428
4429 SDValue Root = DAG.getRoot();
4430 SDValue Base;
4431 SDValue Index;
4432 ISD::MemIndexType IndexType;
4433 SDValue Scale;
4434 bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4435 I.getParent());
4436 unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4437 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4438 MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4439 // TODO: Make MachineMemOperands aware of scalable
4440 // vectors.
4441 MemoryLocation::UnknownSize, Alignment, AAInfo, Ranges);
4442
4443 if (!UniformBase) {
4444 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4445 Index = getValue(Ptr);
4446 IndexType = ISD::SIGNED_SCALED;
4447 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4448 }
4449 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4450 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4451 Ops, MMO, IndexType);
4452
4453 PendingLoads.push_back(Gather.getValue(1));
4454 setValue(&I, Gather);
4455 }
4456
visitAtomicCmpXchg(const AtomicCmpXchgInst & I)4457 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4458 SDLoc dl = getCurSDLoc();
4459 AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4460 AtomicOrdering FailureOrdering = I.getFailureOrdering();
4461 SyncScope::ID SSID = I.getSyncScopeID();
4462
4463 SDValue InChain = getRoot();
4464
4465 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4466 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4467
4468 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4469 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4470
4471 MachineFunction &MF = DAG.getMachineFunction();
4472 MachineMemOperand *MMO = MF.getMachineMemOperand(
4473 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4474 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4475 FailureOrdering);
4476
4477 SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4478 dl, MemVT, VTs, InChain,
4479 getValue(I.getPointerOperand()),
4480 getValue(I.getCompareOperand()),
4481 getValue(I.getNewValOperand()), MMO);
4482
4483 SDValue OutChain = L.getValue(2);
4484
4485 setValue(&I, L);
4486 DAG.setRoot(OutChain);
4487 }
4488
visitAtomicRMW(const AtomicRMWInst & I)4489 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4490 SDLoc dl = getCurSDLoc();
4491 ISD::NodeType NT;
4492 switch (I.getOperation()) {
4493 default: llvm_unreachable("Unknown atomicrmw operation");
4494 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4495 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
4496 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
4497 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
4498 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4499 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
4500 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
4501 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
4502 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
4503 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4504 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4505 case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4506 case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4507 }
4508 AtomicOrdering Ordering = I.getOrdering();
4509 SyncScope::ID SSID = I.getSyncScopeID();
4510
4511 SDValue InChain = getRoot();
4512
4513 auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4514 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4515 auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4516
4517 MachineFunction &MF = DAG.getMachineFunction();
4518 MachineMemOperand *MMO = MF.getMachineMemOperand(
4519 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4520 DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4521
4522 SDValue L =
4523 DAG.getAtomic(NT, dl, MemVT, InChain,
4524 getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4525 MMO);
4526
4527 SDValue OutChain = L.getValue(1);
4528
4529 setValue(&I, L);
4530 DAG.setRoot(OutChain);
4531 }
4532
visitFence(const FenceInst & I)4533 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4534 SDLoc dl = getCurSDLoc();
4535 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4536 SDValue Ops[3];
4537 Ops[0] = getRoot();
4538 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4539 TLI.getFenceOperandTy(DAG.getDataLayout()));
4540 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4541 TLI.getFenceOperandTy(DAG.getDataLayout()));
4542 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4543 }
4544
visitAtomicLoad(const LoadInst & I)4545 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4546 SDLoc dl = getCurSDLoc();
4547 AtomicOrdering Order = I.getOrdering();
4548 SyncScope::ID SSID = I.getSyncScopeID();
4549
4550 SDValue InChain = getRoot();
4551
4552 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4553 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4554 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4555
4556 if (!TLI.supportsUnalignedAtomics() &&
4557 I.getAlignment() < MemVT.getSizeInBits() / 8)
4558 report_fatal_error("Cannot generate unaligned atomic load");
4559
4560 auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout());
4561
4562 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4563 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4564 I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4565
4566 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4567
4568 SDValue Ptr = getValue(I.getPointerOperand());
4569
4570 if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4571 // TODO: Once this is better exercised by tests, it should be merged with
4572 // the normal path for loads to prevent future divergence.
4573 SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4574 if (MemVT != VT)
4575 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4576
4577 setValue(&I, L);
4578 SDValue OutChain = L.getValue(1);
4579 if (!I.isUnordered())
4580 DAG.setRoot(OutChain);
4581 else
4582 PendingLoads.push_back(OutChain);
4583 return;
4584 }
4585
4586 SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4587 Ptr, MMO);
4588
4589 SDValue OutChain = L.getValue(1);
4590 if (MemVT != VT)
4591 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4592
4593 setValue(&I, L);
4594 DAG.setRoot(OutChain);
4595 }
4596
visitAtomicStore(const StoreInst & I)4597 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4598 SDLoc dl = getCurSDLoc();
4599
4600 AtomicOrdering Ordering = I.getOrdering();
4601 SyncScope::ID SSID = I.getSyncScopeID();
4602
4603 SDValue InChain = getRoot();
4604
4605 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4606 EVT MemVT =
4607 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4608
4609 if (I.getAlignment() < MemVT.getSizeInBits() / 8)
4610 report_fatal_error("Cannot generate unaligned atomic store");
4611
4612 auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4613
4614 MachineFunction &MF = DAG.getMachineFunction();
4615 MachineMemOperand *MMO = MF.getMachineMemOperand(
4616 MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4617 I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4618
4619 SDValue Val = getValue(I.getValueOperand());
4620 if (Val.getValueType() != MemVT)
4621 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4622 SDValue Ptr = getValue(I.getPointerOperand());
4623
4624 if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4625 // TODO: Once this is better exercised by tests, it should be merged with
4626 // the normal path for stores to prevent future divergence.
4627 SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4628 DAG.setRoot(S);
4629 return;
4630 }
4631 SDValue OutChain = DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain,
4632 Ptr, Val, MMO);
4633
4634
4635 DAG.setRoot(OutChain);
4636 }
4637
4638 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4639 /// node.
visitTargetIntrinsic(const CallInst & I,unsigned Intrinsic)4640 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4641 unsigned Intrinsic) {
4642 // Ignore the callsite's attributes. A specific call site may be marked with
4643 // readnone, but the lowering code will expect the chain based on the
4644 // definition.
4645 const Function *F = I.getCalledFunction();
4646 bool HasChain = !F->doesNotAccessMemory();
4647 bool OnlyLoad = HasChain && F->onlyReadsMemory();
4648
4649 // Build the operand list.
4650 SmallVector<SDValue, 8> Ops;
4651 if (HasChain) { // If this intrinsic has side-effects, chainify it.
4652 if (OnlyLoad) {
4653 // We don't need to serialize loads against other loads.
4654 Ops.push_back(DAG.getRoot());
4655 } else {
4656 Ops.push_back(getRoot());
4657 }
4658 }
4659
4660 // Info is set by getTgtMemInstrinsic
4661 TargetLowering::IntrinsicInfo Info;
4662 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4663 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4664 DAG.getMachineFunction(),
4665 Intrinsic);
4666
4667 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4668 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4669 Info.opc == ISD::INTRINSIC_W_CHAIN)
4670 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4671 TLI.getPointerTy(DAG.getDataLayout())));
4672
4673 // Add all operands of the call to the operand list.
4674 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4675 const Value *Arg = I.getArgOperand(i);
4676 if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4677 Ops.push_back(getValue(Arg));
4678 continue;
4679 }
4680
4681 // Use TargetConstant instead of a regular constant for immarg.
4682 EVT VT = TLI.getValueType(*DL, Arg->getType(), true);
4683 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4684 assert(CI->getBitWidth() <= 64 &&
4685 "large intrinsic immediates not handled");
4686 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4687 } else {
4688 Ops.push_back(
4689 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4690 }
4691 }
4692
4693 SmallVector<EVT, 4> ValueVTs;
4694 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4695
4696 if (HasChain)
4697 ValueVTs.push_back(MVT::Other);
4698
4699 SDVTList VTs = DAG.getVTList(ValueVTs);
4700
4701 // Create the node.
4702 SDValue Result;
4703 if (IsTgtIntrinsic) {
4704 // This is target intrinsic that touches memory
4705 AAMDNodes AAInfo;
4706 I.getAAMetadata(AAInfo);
4707 Result =
4708 DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT,
4709 MachinePointerInfo(Info.ptrVal, Info.offset),
4710 Info.align, Info.flags, Info.size, AAInfo);
4711 } else if (!HasChain) {
4712 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4713 } else if (!I.getType()->isVoidTy()) {
4714 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4715 } else {
4716 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4717 }
4718
4719 if (HasChain) {
4720 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4721 if (OnlyLoad)
4722 PendingLoads.push_back(Chain);
4723 else
4724 DAG.setRoot(Chain);
4725 }
4726
4727 if (!I.getType()->isVoidTy()) {
4728 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4729 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4730 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4731 } else
4732 Result = lowerRangeToAssertZExt(DAG, I, Result);
4733
4734 MaybeAlign Alignment = I.getRetAlign();
4735 if (!Alignment)
4736 Alignment = F->getAttributes().getRetAlignment();
4737 // Insert `assertalign` node if there's an alignment.
4738 if (InsertAssertAlign && Alignment) {
4739 Result =
4740 DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
4741 }
4742
4743 setValue(&I, Result);
4744 }
4745 }
4746
4747 /// GetSignificand - Get the significand and build it into a floating-point
4748 /// number with exponent of 1:
4749 ///
4750 /// Op = (Op & 0x007fffff) | 0x3f800000;
4751 ///
4752 /// where Op is the hexadecimal representation of floating point value.
GetSignificand(SelectionDAG & DAG,SDValue Op,const SDLoc & dl)4753 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4754 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4755 DAG.getConstant(0x007fffff, dl, MVT::i32));
4756 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4757 DAG.getConstant(0x3f800000, dl, MVT::i32));
4758 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4759 }
4760
4761 /// GetExponent - Get the exponent:
4762 ///
4763 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4764 ///
4765 /// where Op is the hexadecimal representation of floating point value.
GetExponent(SelectionDAG & DAG,SDValue Op,const TargetLowering & TLI,const SDLoc & dl)4766 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4767 const TargetLowering &TLI, const SDLoc &dl) {
4768 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4769 DAG.getConstant(0x7f800000, dl, MVT::i32));
4770 SDValue t1 = DAG.getNode(
4771 ISD::SRL, dl, MVT::i32, t0,
4772 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4773 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4774 DAG.getConstant(127, dl, MVT::i32));
4775 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4776 }
4777
4778 /// getF32Constant - Get 32-bit floating point constant.
getF32Constant(SelectionDAG & DAG,unsigned Flt,const SDLoc & dl)4779 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4780 const SDLoc &dl) {
4781 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4782 MVT::f32);
4783 }
4784
getLimitedPrecisionExp2(SDValue t0,const SDLoc & dl,SelectionDAG & DAG)4785 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4786 SelectionDAG &DAG) {
4787 // TODO: What fast-math-flags should be set on the floating-point nodes?
4788
4789 // IntegerPartOfX = ((int32_t)(t0);
4790 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4791
4792 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
4793 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4794 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4795
4796 // IntegerPartOfX <<= 23;
4797 IntegerPartOfX = DAG.getNode(
4798 ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4799 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4800 DAG.getDataLayout())));
4801
4802 SDValue TwoToFractionalPartOfX;
4803 if (LimitFloatPrecision <= 6) {
4804 // For floating-point precision of 6:
4805 //
4806 // TwoToFractionalPartOfX =
4807 // 0.997535578f +
4808 // (0.735607626f + 0.252464424f * x) * x;
4809 //
4810 // error 0.0144103317, which is 6 bits
4811 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4812 getF32Constant(DAG, 0x3e814304, dl));
4813 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4814 getF32Constant(DAG, 0x3f3c50c8, dl));
4815 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4816 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4817 getF32Constant(DAG, 0x3f7f5e7e, dl));
4818 } else if (LimitFloatPrecision <= 12) {
4819 // For floating-point precision of 12:
4820 //
4821 // TwoToFractionalPartOfX =
4822 // 0.999892986f +
4823 // (0.696457318f +
4824 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4825 //
4826 // error 0.000107046256, which is 13 to 14 bits
4827 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4828 getF32Constant(DAG, 0x3da235e3, dl));
4829 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4830 getF32Constant(DAG, 0x3e65b8f3, dl));
4831 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4832 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4833 getF32Constant(DAG, 0x3f324b07, dl));
4834 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4835 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4836 getF32Constant(DAG, 0x3f7ff8fd, dl));
4837 } else { // LimitFloatPrecision <= 18
4838 // For floating-point precision of 18:
4839 //
4840 // TwoToFractionalPartOfX =
4841 // 0.999999982f +
4842 // (0.693148872f +
4843 // (0.240227044f +
4844 // (0.554906021e-1f +
4845 // (0.961591928e-2f +
4846 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4847 // error 2.47208000*10^(-7), which is better than 18 bits
4848 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4849 getF32Constant(DAG, 0x3924b03e, dl));
4850 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4851 getF32Constant(DAG, 0x3ab24b87, dl));
4852 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4853 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4854 getF32Constant(DAG, 0x3c1d8c17, dl));
4855 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4856 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4857 getF32Constant(DAG, 0x3d634a1d, dl));
4858 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4859 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4860 getF32Constant(DAG, 0x3e75fe14, dl));
4861 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4862 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4863 getF32Constant(DAG, 0x3f317234, dl));
4864 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4865 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4866 getF32Constant(DAG, 0x3f800000, dl));
4867 }
4868
4869 // Add the exponent into the result in integer domain.
4870 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
4871 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4872 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
4873 }
4874
4875 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
4876 /// limited-precision mode.
expandExp(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)4877 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4878 const TargetLowering &TLI) {
4879 if (Op.getValueType() == MVT::f32 &&
4880 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4881
4882 // Put the exponent in the right bit position for later addition to the
4883 // final result:
4884 //
4885 // t0 = Op * log2(e)
4886
4887 // TODO: What fast-math-flags should be set here?
4888 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
4889 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
4890 return getLimitedPrecisionExp2(t0, dl, DAG);
4891 }
4892
4893 // No special expansion.
4894 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4895 }
4896
4897 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4898 /// limited-precision mode.
expandLog(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)4899 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4900 const TargetLowering &TLI) {
4901 // TODO: What fast-math-flags should be set on the floating-point nodes?
4902
4903 if (Op.getValueType() == MVT::f32 &&
4904 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4905 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4906
4907 // Scale the exponent by log(2).
4908 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4909 SDValue LogOfExponent =
4910 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4911 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
4912
4913 // Get the significand and build it into a floating-point number with
4914 // exponent of 1.
4915 SDValue X = GetSignificand(DAG, Op1, dl);
4916
4917 SDValue LogOfMantissa;
4918 if (LimitFloatPrecision <= 6) {
4919 // For floating-point precision of 6:
4920 //
4921 // LogofMantissa =
4922 // -1.1609546f +
4923 // (1.4034025f - 0.23903021f * x) * x;
4924 //
4925 // error 0.0034276066, which is better than 8 bits
4926 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4927 getF32Constant(DAG, 0xbe74c456, dl));
4928 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4929 getF32Constant(DAG, 0x3fb3a2b1, dl));
4930 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4931 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4932 getF32Constant(DAG, 0x3f949a29, dl));
4933 } else if (LimitFloatPrecision <= 12) {
4934 // For floating-point precision of 12:
4935 //
4936 // LogOfMantissa =
4937 // -1.7417939f +
4938 // (2.8212026f +
4939 // (-1.4699568f +
4940 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4941 //
4942 // error 0.000061011436, which is 14 bits
4943 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4944 getF32Constant(DAG, 0xbd67b6d6, dl));
4945 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4946 getF32Constant(DAG, 0x3ee4f4b8, dl));
4947 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4948 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4949 getF32Constant(DAG, 0x3fbc278b, dl));
4950 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4951 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4952 getF32Constant(DAG, 0x40348e95, dl));
4953 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4954 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4955 getF32Constant(DAG, 0x3fdef31a, dl));
4956 } else { // LimitFloatPrecision <= 18
4957 // For floating-point precision of 18:
4958 //
4959 // LogOfMantissa =
4960 // -2.1072184f +
4961 // (4.2372794f +
4962 // (-3.7029485f +
4963 // (2.2781945f +
4964 // (-0.87823314f +
4965 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4966 //
4967 // error 0.0000023660568, which is better than 18 bits
4968 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4969 getF32Constant(DAG, 0xbc91e5ac, dl));
4970 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4971 getF32Constant(DAG, 0x3e4350aa, dl));
4972 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4973 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4974 getF32Constant(DAG, 0x3f60d3e3, dl));
4975 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4976 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4977 getF32Constant(DAG, 0x4011cdf0, dl));
4978 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4979 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4980 getF32Constant(DAG, 0x406cfd1c, dl));
4981 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4982 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4983 getF32Constant(DAG, 0x408797cb, dl));
4984 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4985 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4986 getF32Constant(DAG, 0x4006dcab, dl));
4987 }
4988
4989 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4990 }
4991
4992 // No special expansion.
4993 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4994 }
4995
4996 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4997 /// limited-precision mode.
expandLog2(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)4998 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
4999 const TargetLowering &TLI) {
5000 // TODO: What fast-math-flags should be set on the floating-point nodes?
5001
5002 if (Op.getValueType() == MVT::f32 &&
5003 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5004 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5005
5006 // Get the exponent.
5007 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5008
5009 // Get the significand and build it into a floating-point number with
5010 // exponent of 1.
5011 SDValue X = GetSignificand(DAG, Op1, dl);
5012
5013 // Different possible minimax approximations of significand in
5014 // floating-point for various degrees of accuracy over [1,2].
5015 SDValue Log2ofMantissa;
5016 if (LimitFloatPrecision <= 6) {
5017 // For floating-point precision of 6:
5018 //
5019 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5020 //
5021 // error 0.0049451742, which is more than 7 bits
5022 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5023 getF32Constant(DAG, 0xbeb08fe0, dl));
5024 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5025 getF32Constant(DAG, 0x40019463, dl));
5026 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5027 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5028 getF32Constant(DAG, 0x3fd6633d, dl));
5029 } else if (LimitFloatPrecision <= 12) {
5030 // For floating-point precision of 12:
5031 //
5032 // Log2ofMantissa =
5033 // -2.51285454f +
5034 // (4.07009056f +
5035 // (-2.12067489f +
5036 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5037 //
5038 // error 0.0000876136000, which is better than 13 bits
5039 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5040 getF32Constant(DAG, 0xbda7262e, dl));
5041 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5042 getF32Constant(DAG, 0x3f25280b, dl));
5043 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5044 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5045 getF32Constant(DAG, 0x4007b923, dl));
5046 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5047 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5048 getF32Constant(DAG, 0x40823e2f, dl));
5049 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5050 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5051 getF32Constant(DAG, 0x4020d29c, dl));
5052 } else { // LimitFloatPrecision <= 18
5053 // For floating-point precision of 18:
5054 //
5055 // Log2ofMantissa =
5056 // -3.0400495f +
5057 // (6.1129976f +
5058 // (-5.3420409f +
5059 // (3.2865683f +
5060 // (-1.2669343f +
5061 // (0.27515199f -
5062 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5063 //
5064 // error 0.0000018516, which is better than 18 bits
5065 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5066 getF32Constant(DAG, 0xbcd2769e, dl));
5067 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5068 getF32Constant(DAG, 0x3e8ce0b9, dl));
5069 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5070 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5071 getF32Constant(DAG, 0x3fa22ae7, dl));
5072 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5073 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5074 getF32Constant(DAG, 0x40525723, dl));
5075 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5076 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5077 getF32Constant(DAG, 0x40aaf200, dl));
5078 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5079 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5080 getF32Constant(DAG, 0x40c39dad, dl));
5081 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5082 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5083 getF32Constant(DAG, 0x4042902c, dl));
5084 }
5085
5086 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5087 }
5088
5089 // No special expansion.
5090 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
5091 }
5092
5093 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5094 /// limited-precision mode.
expandLog10(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)5095 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5096 const TargetLowering &TLI) {
5097 // TODO: What fast-math-flags should be set on the floating-point nodes?
5098
5099 if (Op.getValueType() == MVT::f32 &&
5100 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5101 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5102
5103 // Scale the exponent by log10(2) [0.30102999f].
5104 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5105 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5106 getF32Constant(DAG, 0x3e9a209a, dl));
5107
5108 // Get the significand and build it into a floating-point number with
5109 // exponent of 1.
5110 SDValue X = GetSignificand(DAG, Op1, dl);
5111
5112 SDValue Log10ofMantissa;
5113 if (LimitFloatPrecision <= 6) {
5114 // For floating-point precision of 6:
5115 //
5116 // Log10ofMantissa =
5117 // -0.50419619f +
5118 // (0.60948995f - 0.10380950f * x) * x;
5119 //
5120 // error 0.0014886165, which is 6 bits
5121 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5122 getF32Constant(DAG, 0xbdd49a13, dl));
5123 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5124 getF32Constant(DAG, 0x3f1c0789, dl));
5125 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5126 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5127 getF32Constant(DAG, 0x3f011300, dl));
5128 } else if (LimitFloatPrecision <= 12) {
5129 // For floating-point precision of 12:
5130 //
5131 // Log10ofMantissa =
5132 // -0.64831180f +
5133 // (0.91751397f +
5134 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5135 //
5136 // error 0.00019228036, which is better than 12 bits
5137 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5138 getF32Constant(DAG, 0x3d431f31, dl));
5139 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5140 getF32Constant(DAG, 0x3ea21fb2, dl));
5141 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5142 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5143 getF32Constant(DAG, 0x3f6ae232, dl));
5144 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5145 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5146 getF32Constant(DAG, 0x3f25f7c3, dl));
5147 } else { // LimitFloatPrecision <= 18
5148 // For floating-point precision of 18:
5149 //
5150 // Log10ofMantissa =
5151 // -0.84299375f +
5152 // (1.5327582f +
5153 // (-1.0688956f +
5154 // (0.49102474f +
5155 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5156 //
5157 // error 0.0000037995730, which is better than 18 bits
5158 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5159 getF32Constant(DAG, 0x3c5d51ce, dl));
5160 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5161 getF32Constant(DAG, 0x3e00685a, dl));
5162 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5163 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5164 getF32Constant(DAG, 0x3efb6798, dl));
5165 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5166 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5167 getF32Constant(DAG, 0x3f88d192, dl));
5168 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5169 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5170 getF32Constant(DAG, 0x3fc4316c, dl));
5171 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5172 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5173 getF32Constant(DAG, 0x3f57ce70, dl));
5174 }
5175
5176 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5177 }
5178
5179 // No special expansion.
5180 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
5181 }
5182
5183 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5184 /// limited-precision mode.
expandExp2(const SDLoc & dl,SDValue Op,SelectionDAG & DAG,const TargetLowering & TLI)5185 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5186 const TargetLowering &TLI) {
5187 if (Op.getValueType() == MVT::f32 &&
5188 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5189 return getLimitedPrecisionExp2(Op, dl, DAG);
5190
5191 // No special expansion.
5192 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
5193 }
5194
5195 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5196 /// limited-precision mode with x == 10.0f.
expandPow(const SDLoc & dl,SDValue LHS,SDValue RHS,SelectionDAG & DAG,const TargetLowering & TLI)5197 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5198 SelectionDAG &DAG, const TargetLowering &TLI) {
5199 bool IsExp10 = false;
5200 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5201 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5202 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5203 APFloat Ten(10.0f);
5204 IsExp10 = LHSC->isExactlyValue(Ten);
5205 }
5206 }
5207
5208 // TODO: What fast-math-flags should be set on the FMUL node?
5209 if (IsExp10) {
5210 // Put the exponent in the right bit position for later addition to the
5211 // final result:
5212 //
5213 // #define LOG2OF10 3.3219281f
5214 // t0 = Op * LOG2OF10;
5215 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5216 getF32Constant(DAG, 0x40549a78, dl));
5217 return getLimitedPrecisionExp2(t0, dl, DAG);
5218 }
5219
5220 // No special expansion.
5221 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
5222 }
5223
5224 /// ExpandPowI - Expand a llvm.powi intrinsic.
ExpandPowI(const SDLoc & DL,SDValue LHS,SDValue RHS,SelectionDAG & DAG)5225 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5226 SelectionDAG &DAG) {
5227 // If RHS is a constant, we can expand this out to a multiplication tree,
5228 // otherwise we end up lowering to a call to __powidf2 (for example). When
5229 // optimizing for size, we only want to do this if the expansion would produce
5230 // a small number of multiplies, otherwise we do the full expansion.
5231 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5232 // Get the exponent as a positive value.
5233 unsigned Val = RHSC->getSExtValue();
5234 if ((int)Val < 0) Val = -Val;
5235
5236 // powi(x, 0) -> 1.0
5237 if (Val == 0)
5238 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5239
5240 bool OptForSize = DAG.shouldOptForSize();
5241 if (!OptForSize ||
5242 // If optimizing for size, don't insert too many multiplies.
5243 // This inserts up to 5 multiplies.
5244 countPopulation(Val) + Log2_32(Val) < 7) {
5245 // We use the simple binary decomposition method to generate the multiply
5246 // sequence. There are more optimal ways to do this (for example,
5247 // powi(x,15) generates one more multiply than it should), but this has
5248 // the benefit of being both really simple and much better than a libcall.
5249 SDValue Res; // Logically starts equal to 1.0
5250 SDValue CurSquare = LHS;
5251 // TODO: Intrinsics should have fast-math-flags that propagate to these
5252 // nodes.
5253 while (Val) {
5254 if (Val & 1) {
5255 if (Res.getNode())
5256 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
5257 else
5258 Res = CurSquare; // 1.0*CurSquare.
5259 }
5260
5261 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5262 CurSquare, CurSquare);
5263 Val >>= 1;
5264 }
5265
5266 // If the original was negative, invert the result, producing 1/(x*x*x).
5267 if (RHSC->getSExtValue() < 0)
5268 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5269 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5270 return Res;
5271 }
5272 }
5273
5274 // Otherwise, expand to a libcall.
5275 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5276 }
5277
expandDivFix(unsigned Opcode,const SDLoc & DL,SDValue LHS,SDValue RHS,SDValue Scale,SelectionDAG & DAG,const TargetLowering & TLI)5278 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5279 SDValue LHS, SDValue RHS, SDValue Scale,
5280 SelectionDAG &DAG, const TargetLowering &TLI) {
5281 EVT VT = LHS.getValueType();
5282 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5283 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5284 LLVMContext &Ctx = *DAG.getContext();
5285
5286 // If the type is legal but the operation isn't, this node might survive all
5287 // the way to operation legalization. If we end up there and we do not have
5288 // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5289 // node.
5290
5291 // Coax the legalizer into expanding the node during type legalization instead
5292 // by bumping the size by one bit. This will force it to Promote, enabling the
5293 // early expansion and avoiding the need to expand later.
5294
5295 // We don't have to do this if Scale is 0; that can always be expanded, unless
5296 // it's a saturating signed operation. Those can experience true integer
5297 // division overflow, a case which we must avoid.
5298
5299 // FIXME: We wouldn't have to do this (or any of the early
5300 // expansion/promotion) if it was possible to expand a libcall of an
5301 // illegal type during operation legalization. But it's not, so things
5302 // get a bit hacky.
5303 unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5304 if ((ScaleInt > 0 || (Saturating && Signed)) &&
5305 (TLI.isTypeLegal(VT) ||
5306 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5307 TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5308 Opcode, VT, ScaleInt);
5309 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5310 EVT PromVT;
5311 if (VT.isScalarInteger())
5312 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5313 else if (VT.isVector()) {
5314 PromVT = VT.getVectorElementType();
5315 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5316 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5317 } else
5318 llvm_unreachable("Wrong VT for DIVFIX?");
5319 if (Signed) {
5320 LHS = DAG.getSExtOrTrunc(LHS, DL, PromVT);
5321 RHS = DAG.getSExtOrTrunc(RHS, DL, PromVT);
5322 } else {
5323 LHS = DAG.getZExtOrTrunc(LHS, DL, PromVT);
5324 RHS = DAG.getZExtOrTrunc(RHS, DL, PromVT);
5325 }
5326 EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5327 // For saturating operations, we need to shift up the LHS to get the
5328 // proper saturation width, and then shift down again afterwards.
5329 if (Saturating)
5330 LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5331 DAG.getConstant(1, DL, ShiftTy));
5332 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5333 if (Saturating)
5334 Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5335 DAG.getConstant(1, DL, ShiftTy));
5336 return DAG.getZExtOrTrunc(Res, DL, VT);
5337 }
5338 }
5339
5340 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5341 }
5342
5343 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5344 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5345 static void
getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned,unsigned>> & Regs,const SDValue & N)5346 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
5347 const SDValue &N) {
5348 switch (N.getOpcode()) {
5349 case ISD::CopyFromReg: {
5350 SDValue Op = N.getOperand(1);
5351 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5352 Op.getValueType().getSizeInBits());
5353 return;
5354 }
5355 case ISD::BITCAST:
5356 case ISD::AssertZext:
5357 case ISD::AssertSext:
5358 case ISD::TRUNCATE:
5359 getUnderlyingArgRegs(Regs, N.getOperand(0));
5360 return;
5361 case ISD::BUILD_PAIR:
5362 case ISD::BUILD_VECTOR:
5363 case ISD::CONCAT_VECTORS:
5364 for (SDValue Op : N->op_values())
5365 getUnderlyingArgRegs(Regs, Op);
5366 return;
5367 default:
5368 return;
5369 }
5370 }
5371
5372 /// If the DbgValueInst is a dbg_value of a function argument, create the
5373 /// corresponding DBG_VALUE machine instruction for it now. At the end of
5374 /// instruction selection, they will be inserted to the entry BB.
EmitFuncArgumentDbgValue(const Value * V,DILocalVariable * Variable,DIExpression * Expr,DILocation * DL,bool IsDbgDeclare,const SDValue & N)5375 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5376 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5377 DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
5378 const Argument *Arg = dyn_cast<Argument>(V);
5379 if (!Arg)
5380 return false;
5381
5382 if (!IsDbgDeclare) {
5383 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5384 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5385 // the entry block.
5386 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5387 if (!IsInEntryBlock)
5388 return false;
5389
5390 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5391 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5392 // variable that also is a param.
5393 //
5394 // Although, if we are at the top of the entry block already, we can still
5395 // emit using ArgDbgValue. This might catch some situations when the
5396 // dbg.value refers to an argument that isn't used in the entry block, so
5397 // any CopyToReg node would be optimized out and the only way to express
5398 // this DBG_VALUE is by using the physical reg (or FI) as done in this
5399 // method. ArgDbgValues are hoisted to the beginning of the entry block. So
5400 // we should only emit as ArgDbgValue if the Variable is an argument to the
5401 // current function, and the dbg.value intrinsic is found in the entry
5402 // block.
5403 bool VariableIsFunctionInputArg = Variable->isParameter() &&
5404 !DL->getInlinedAt();
5405 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5406 if (!IsInPrologue && !VariableIsFunctionInputArg)
5407 return false;
5408
5409 // Here we assume that a function argument on IR level only can be used to
5410 // describe one input parameter on source level. If we for example have
5411 // source code like this
5412 //
5413 // struct A { long x, y; };
5414 // void foo(struct A a, long b) {
5415 // ...
5416 // b = a.x;
5417 // ...
5418 // }
5419 //
5420 // and IR like this
5421 //
5422 // define void @foo(i32 %a1, i32 %a2, i32 %b) {
5423 // entry:
5424 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5425 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5426 // call void @llvm.dbg.value(metadata i32 %b, "b",
5427 // ...
5428 // call void @llvm.dbg.value(metadata i32 %a1, "b"
5429 // ...
5430 //
5431 // then the last dbg.value is describing a parameter "b" using a value that
5432 // is an argument. But since we already has used %a1 to describe a parameter
5433 // we should not handle that last dbg.value here (that would result in an
5434 // incorrect hoisting of the DBG_VALUE to the function entry).
5435 // Notice that we allow one dbg.value per IR level argument, to accommodate
5436 // for the situation with fragments above.
5437 if (VariableIsFunctionInputArg) {
5438 unsigned ArgNo = Arg->getArgNo();
5439 if (ArgNo >= FuncInfo.DescribedArgs.size())
5440 FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5441 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5442 return false;
5443 FuncInfo.DescribedArgs.set(ArgNo);
5444 }
5445 }
5446
5447 MachineFunction &MF = DAG.getMachineFunction();
5448 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5449
5450 bool IsIndirect = false;
5451 Optional<MachineOperand> Op;
5452 // Some arguments' frame index is recorded during argument lowering.
5453 int FI = FuncInfo.getArgumentFrameIndex(Arg);
5454 if (FI != std::numeric_limits<int>::max())
5455 Op = MachineOperand::CreateFI(FI);
5456
5457 SmallVector<std::pair<unsigned, unsigned>, 8> ArgRegsAndSizes;
5458 if (!Op && N.getNode()) {
5459 getUnderlyingArgRegs(ArgRegsAndSizes, N);
5460 Register Reg;
5461 if (ArgRegsAndSizes.size() == 1)
5462 Reg = ArgRegsAndSizes.front().first;
5463
5464 if (Reg && Reg.isVirtual()) {
5465 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5466 Register PR = RegInfo.getLiveInPhysReg(Reg);
5467 if (PR)
5468 Reg = PR;
5469 }
5470 if (Reg) {
5471 Op = MachineOperand::CreateReg(Reg, false);
5472 IsIndirect = IsDbgDeclare;
5473 }
5474 }
5475
5476 if (!Op && N.getNode()) {
5477 // Check if frame index is available.
5478 SDValue LCandidate = peekThroughBitcasts(N);
5479 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5480 if (FrameIndexSDNode *FINode =
5481 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5482 Op = MachineOperand::CreateFI(FINode->getIndex());
5483 }
5484
5485 if (!Op) {
5486 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5487 auto splitMultiRegDbgValue
5488 = [&](ArrayRef<std::pair<unsigned, unsigned>> SplitRegs) {
5489 unsigned Offset = 0;
5490 for (auto RegAndSize : SplitRegs) {
5491 // If the expression is already a fragment, the current register
5492 // offset+size might extend beyond the fragment. In this case, only
5493 // the register bits that are inside the fragment are relevant.
5494 int RegFragmentSizeInBits = RegAndSize.second;
5495 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5496 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5497 // The register is entirely outside the expression fragment,
5498 // so is irrelevant for debug info.
5499 if (Offset >= ExprFragmentSizeInBits)
5500 break;
5501 // The register is partially outside the expression fragment, only
5502 // the low bits within the fragment are relevant for debug info.
5503 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5504 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5505 }
5506 }
5507
5508 auto FragmentExpr = DIExpression::createFragmentExpression(
5509 Expr, Offset, RegFragmentSizeInBits);
5510 Offset += RegAndSize.second;
5511 // If a valid fragment expression cannot be created, the variable's
5512 // correct value cannot be determined and so it is set as Undef.
5513 if (!FragmentExpr) {
5514 SDDbgValue *SDV = DAG.getConstantDbgValue(
5515 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5516 DAG.AddDbgValue(SDV, nullptr, false);
5517 continue;
5518 }
5519 assert(!IsDbgDeclare && "DbgDeclare operand is not in memory?");
5520 FuncInfo.ArgDbgValues.push_back(
5521 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
5522 RegAndSize.first, Variable, *FragmentExpr));
5523 }
5524 };
5525
5526 // Check if ValueMap has reg number.
5527 DenseMap<const Value *, Register>::const_iterator
5528 VMI = FuncInfo.ValueMap.find(V);
5529 if (VMI != FuncInfo.ValueMap.end()) {
5530 const auto &TLI = DAG.getTargetLoweringInfo();
5531 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5532 V->getType(), None);
5533 if (RFV.occupiesMultipleRegs()) {
5534 splitMultiRegDbgValue(RFV.getRegsAndSizes());
5535 return true;
5536 }
5537
5538 Op = MachineOperand::CreateReg(VMI->second, false);
5539 IsIndirect = IsDbgDeclare;
5540 } else if (ArgRegsAndSizes.size() > 1) {
5541 // This was split due to the calling convention, and no virtual register
5542 // mapping exists for the value.
5543 splitMultiRegDbgValue(ArgRegsAndSizes);
5544 return true;
5545 }
5546 }
5547
5548 if (!Op)
5549 return false;
5550
5551 assert(Variable->isValidLocationForIntrinsic(DL) &&
5552 "Expected inlined-at fields to agree");
5553 IsIndirect = (Op->isReg()) ? IsIndirect : true;
5554 FuncInfo.ArgDbgValues.push_back(
5555 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
5556 *Op, Variable, Expr));
5557
5558 return true;
5559 }
5560
5561 /// Return the appropriate SDDbgValue based on N.
getDbgValue(SDValue N,DILocalVariable * Variable,DIExpression * Expr,const DebugLoc & dl,unsigned DbgSDNodeOrder)5562 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5563 DILocalVariable *Variable,
5564 DIExpression *Expr,
5565 const DebugLoc &dl,
5566 unsigned DbgSDNodeOrder) {
5567 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5568 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5569 // stack slot locations.
5570 //
5571 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5572 // debug values here after optimization:
5573 //
5574 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
5575 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5576 //
5577 // Both describe the direct values of their associated variables.
5578 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5579 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5580 }
5581 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5582 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5583 }
5584
FixedPointIntrinsicToOpcode(unsigned Intrinsic)5585 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5586 switch (Intrinsic) {
5587 case Intrinsic::smul_fix:
5588 return ISD::SMULFIX;
5589 case Intrinsic::umul_fix:
5590 return ISD::UMULFIX;
5591 case Intrinsic::smul_fix_sat:
5592 return ISD::SMULFIXSAT;
5593 case Intrinsic::umul_fix_sat:
5594 return ISD::UMULFIXSAT;
5595 case Intrinsic::sdiv_fix:
5596 return ISD::SDIVFIX;
5597 case Intrinsic::udiv_fix:
5598 return ISD::UDIVFIX;
5599 case Intrinsic::sdiv_fix_sat:
5600 return ISD::SDIVFIXSAT;
5601 case Intrinsic::udiv_fix_sat:
5602 return ISD::UDIVFIXSAT;
5603 default:
5604 llvm_unreachable("Unhandled fixed point intrinsic");
5605 }
5606 }
5607
lowerCallToExternalSymbol(const CallInst & I,const char * FunctionName)5608 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5609 const char *FunctionName) {
5610 assert(FunctionName && "FunctionName must not be nullptr");
5611 SDValue Callee = DAG.getExternalSymbol(
5612 FunctionName,
5613 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5614 LowerCallTo(I, Callee, I.isTailCall());
5615 }
5616
5617 /// Given a @llvm.call.preallocated.setup, return the corresponding
5618 /// preallocated call.
FindPreallocatedCall(const Value * PreallocatedSetup)5619 static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
5620 assert(cast<CallBase>(PreallocatedSetup)
5621 ->getCalledFunction()
5622 ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
5623 "expected call_preallocated_setup Value");
5624 for (auto *U : PreallocatedSetup->users()) {
5625 auto *UseCall = cast<CallBase>(U);
5626 const Function *Fn = UseCall->getCalledFunction();
5627 if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
5628 return UseCall;
5629 }
5630 }
5631 llvm_unreachable("expected corresponding call to preallocated setup/arg");
5632 }
5633
5634 /// Lower the call to the specified intrinsic function.
visitIntrinsicCall(const CallInst & I,unsigned Intrinsic)5635 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
5636 unsigned Intrinsic) {
5637 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5638 SDLoc sdl = getCurSDLoc();
5639 DebugLoc dl = getCurDebugLoc();
5640 SDValue Res;
5641
5642 switch (Intrinsic) {
5643 default:
5644 // By default, turn this into a target intrinsic node.
5645 visitTargetIntrinsic(I, Intrinsic);
5646 return;
5647 case Intrinsic::vscale: {
5648 match(&I, m_VScale(DAG.getDataLayout()));
5649 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5650 setValue(&I,
5651 DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1)));
5652 return;
5653 }
5654 case Intrinsic::vastart: visitVAStart(I); return;
5655 case Intrinsic::vaend: visitVAEnd(I); return;
5656 case Intrinsic::vacopy: visitVACopy(I); return;
5657 case Intrinsic::returnaddress:
5658 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5659 TLI.getPointerTy(DAG.getDataLayout()),
5660 getValue(I.getArgOperand(0))));
5661 return;
5662 case Intrinsic::addressofreturnaddress:
5663 setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5664 TLI.getPointerTy(DAG.getDataLayout())));
5665 return;
5666 case Intrinsic::sponentry:
5667 setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl,
5668 TLI.getFrameIndexTy(DAG.getDataLayout())));
5669 return;
5670 case Intrinsic::frameaddress:
5671 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5672 TLI.getFrameIndexTy(DAG.getDataLayout()),
5673 getValue(I.getArgOperand(0))));
5674 return;
5675 case Intrinsic::read_volatile_register:
5676 case Intrinsic::read_register: {
5677 Value *Reg = I.getArgOperand(0);
5678 SDValue Chain = getRoot();
5679 SDValue RegName =
5680 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5681 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5682 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5683 DAG.getVTList(VT, MVT::Other), Chain, RegName);
5684 setValue(&I, Res);
5685 DAG.setRoot(Res.getValue(1));
5686 return;
5687 }
5688 case Intrinsic::write_register: {
5689 Value *Reg = I.getArgOperand(0);
5690 Value *RegValue = I.getArgOperand(1);
5691 SDValue Chain = getRoot();
5692 SDValue RegName =
5693 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5694 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5695 RegName, getValue(RegValue)));
5696 return;
5697 }
5698 case Intrinsic::memcpy: {
5699 const auto &MCI = cast<MemCpyInst>(I);
5700 SDValue Op1 = getValue(I.getArgOperand(0));
5701 SDValue Op2 = getValue(I.getArgOperand(1));
5702 SDValue Op3 = getValue(I.getArgOperand(2));
5703 // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5704 Align DstAlign = MCI.getDestAlign().valueOrOne();
5705 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5706 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5707 bool isVol = MCI.isVolatile();
5708 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5709 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5710 // node.
5711 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5712 SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5713 /* AlwaysInline */ false, isTC,
5714 MachinePointerInfo(I.getArgOperand(0)),
5715 MachinePointerInfo(I.getArgOperand(1)));
5716 updateDAGForMaybeTailCall(MC);
5717 return;
5718 }
5719 case Intrinsic::memcpy_inline: {
5720 const auto &MCI = cast<MemCpyInlineInst>(I);
5721 SDValue Dst = getValue(I.getArgOperand(0));
5722 SDValue Src = getValue(I.getArgOperand(1));
5723 SDValue Size = getValue(I.getArgOperand(2));
5724 assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
5725 // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
5726 Align DstAlign = MCI.getDestAlign().valueOrOne();
5727 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
5728 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5729 bool isVol = MCI.isVolatile();
5730 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5731 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5732 // node.
5733 SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
5734 /* AlwaysInline */ true, isTC,
5735 MachinePointerInfo(I.getArgOperand(0)),
5736 MachinePointerInfo(I.getArgOperand(1)));
5737 updateDAGForMaybeTailCall(MC);
5738 return;
5739 }
5740 case Intrinsic::memset: {
5741 const auto &MSI = cast<MemSetInst>(I);
5742 SDValue Op1 = getValue(I.getArgOperand(0));
5743 SDValue Op2 = getValue(I.getArgOperand(1));
5744 SDValue Op3 = getValue(I.getArgOperand(2));
5745 // @llvm.memset defines 0 and 1 to both mean no alignment.
5746 Align Alignment = MSI.getDestAlign().valueOrOne();
5747 bool isVol = MSI.isVolatile();
5748 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5749 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5750 SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Alignment, isVol, isTC,
5751 MachinePointerInfo(I.getArgOperand(0)));
5752 updateDAGForMaybeTailCall(MS);
5753 return;
5754 }
5755 case Intrinsic::memmove: {
5756 const auto &MMI = cast<MemMoveInst>(I);
5757 SDValue Op1 = getValue(I.getArgOperand(0));
5758 SDValue Op2 = getValue(I.getArgOperand(1));
5759 SDValue Op3 = getValue(I.getArgOperand(2));
5760 // @llvm.memmove defines 0 and 1 to both mean no alignment.
5761 Align DstAlign = MMI.getDestAlign().valueOrOne();
5762 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
5763 Align Alignment = commonAlignment(DstAlign, SrcAlign);
5764 bool isVol = MMI.isVolatile();
5765 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5766 // FIXME: Support passing different dest/src alignments to the memmove DAG
5767 // node.
5768 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5769 SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
5770 isTC, MachinePointerInfo(I.getArgOperand(0)),
5771 MachinePointerInfo(I.getArgOperand(1)));
5772 updateDAGForMaybeTailCall(MM);
5773 return;
5774 }
5775 case Intrinsic::memcpy_element_unordered_atomic: {
5776 const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5777 SDValue Dst = getValue(MI.getRawDest());
5778 SDValue Src = getValue(MI.getRawSource());
5779 SDValue Length = getValue(MI.getLength());
5780
5781 unsigned DstAlign = MI.getDestAlignment();
5782 unsigned SrcAlign = MI.getSourceAlignment();
5783 Type *LengthTy = MI.getLength()->getType();
5784 unsigned ElemSz = MI.getElementSizeInBytes();
5785 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5786 SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5787 SrcAlign, Length, LengthTy, ElemSz, isTC,
5788 MachinePointerInfo(MI.getRawDest()),
5789 MachinePointerInfo(MI.getRawSource()));
5790 updateDAGForMaybeTailCall(MC);
5791 return;
5792 }
5793 case Intrinsic::memmove_element_unordered_atomic: {
5794 auto &MI = cast<AtomicMemMoveInst>(I);
5795 SDValue Dst = getValue(MI.getRawDest());
5796 SDValue Src = getValue(MI.getRawSource());
5797 SDValue Length = getValue(MI.getLength());
5798
5799 unsigned DstAlign = MI.getDestAlignment();
5800 unsigned SrcAlign = MI.getSourceAlignment();
5801 Type *LengthTy = MI.getLength()->getType();
5802 unsigned ElemSz = MI.getElementSizeInBytes();
5803 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5804 SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5805 SrcAlign, Length, LengthTy, ElemSz, isTC,
5806 MachinePointerInfo(MI.getRawDest()),
5807 MachinePointerInfo(MI.getRawSource()));
5808 updateDAGForMaybeTailCall(MC);
5809 return;
5810 }
5811 case Intrinsic::memset_element_unordered_atomic: {
5812 auto &MI = cast<AtomicMemSetInst>(I);
5813 SDValue Dst = getValue(MI.getRawDest());
5814 SDValue Val = getValue(MI.getValue());
5815 SDValue Length = getValue(MI.getLength());
5816
5817 unsigned DstAlign = MI.getDestAlignment();
5818 Type *LengthTy = MI.getLength()->getType();
5819 unsigned ElemSz = MI.getElementSizeInBytes();
5820 bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
5821 SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5822 LengthTy, ElemSz, isTC,
5823 MachinePointerInfo(MI.getRawDest()));
5824 updateDAGForMaybeTailCall(MC);
5825 return;
5826 }
5827 case Intrinsic::call_preallocated_setup: {
5828 const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
5829 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
5830 SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
5831 getRoot(), SrcValue);
5832 setValue(&I, Res);
5833 DAG.setRoot(Res);
5834 return;
5835 }
5836 case Intrinsic::call_preallocated_arg: {
5837 const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
5838 SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
5839 SDValue Ops[3];
5840 Ops[0] = getRoot();
5841 Ops[1] = SrcValue;
5842 Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
5843 MVT::i32); // arg index
5844 SDValue Res = DAG.getNode(
5845 ISD::PREALLOCATED_ARG, sdl,
5846 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
5847 setValue(&I, Res);
5848 DAG.setRoot(Res.getValue(1));
5849 return;
5850 }
5851 case Intrinsic::dbg_addr:
5852 case Intrinsic::dbg_declare: {
5853 const auto &DI = cast<DbgVariableIntrinsic>(I);
5854 DILocalVariable *Variable = DI.getVariable();
5855 DIExpression *Expression = DI.getExpression();
5856 dropDanglingDebugInfo(Variable, Expression);
5857 assert(Variable && "Missing variable");
5858 LLVM_DEBUG(dbgs() << "SelectionDAG visiting debug intrinsic: " << DI
5859 << "\n");
5860 // Check if address has undef value.
5861 const Value *Address = DI.getVariableLocation();
5862 if (!Address || isa<UndefValue>(Address) ||
5863 (Address->use_empty() && !isa<Argument>(Address))) {
5864 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
5865 << " (bad/undef/unused-arg address)\n");
5866 return;
5867 }
5868
5869 bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5870
5871 // Check if this variable can be described by a frame index, typically
5872 // either as a static alloca or a byval parameter.
5873 int FI = std::numeric_limits<int>::max();
5874 if (const auto *AI =
5875 dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5876 if (AI->isStaticAlloca()) {
5877 auto I = FuncInfo.StaticAllocaMap.find(AI);
5878 if (I != FuncInfo.StaticAllocaMap.end())
5879 FI = I->second;
5880 }
5881 } else if (const auto *Arg = dyn_cast<Argument>(
5882 Address->stripInBoundsConstantOffsets())) {
5883 FI = FuncInfo.getArgumentFrameIndex(Arg);
5884 }
5885
5886 // llvm.dbg.addr is control dependent and always generates indirect
5887 // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5888 // the MachineFunction variable table.
5889 if (FI != std::numeric_limits<int>::max()) {
5890 if (Intrinsic == Intrinsic::dbg_addr) {
5891 SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5892 Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5893 DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5894 } else {
5895 LLVM_DEBUG(dbgs() << "Skipping " << DI
5896 << " (variable info stashed in MF side table)\n");
5897 }
5898 return;
5899 }
5900
5901 SDValue &N = NodeMap[Address];
5902 if (!N.getNode() && isa<Argument>(Address))
5903 // Check unused arguments map.
5904 N = UnusedArgNodeMap[Address];
5905 SDDbgValue *SDV;
5906 if (N.getNode()) {
5907 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
5908 Address = BCI->getOperand(0);
5909 // Parameters are handled specially.
5910 auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
5911 if (isParameter && FINode) {
5912 // Byval parameter. We have a frame index at this point.
5913 SDV =
5914 DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
5915 /*IsIndirect*/ true, dl, SDNodeOrder);
5916 } else if (isa<Argument>(Address)) {
5917 // Address is an argument, so try to emit its dbg value using
5918 // virtual register info from the FuncInfo.ValueMap.
5919 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
5920 return;
5921 } else {
5922 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
5923 true, dl, SDNodeOrder);
5924 }
5925 DAG.AddDbgValue(SDV, N.getNode(), isParameter);
5926 } else {
5927 // If Address is an argument then try to emit its dbg value using
5928 // virtual register info from the FuncInfo.ValueMap.
5929 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
5930 N)) {
5931 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
5932 << " (could not emit func-arg dbg_value)\n");
5933 }
5934 }
5935 return;
5936 }
5937 case Intrinsic::dbg_label: {
5938 const DbgLabelInst &DI = cast<DbgLabelInst>(I);
5939 DILabel *Label = DI.getLabel();
5940 assert(Label && "Missing label");
5941
5942 SDDbgLabel *SDV;
5943 SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
5944 DAG.AddDbgLabel(SDV);
5945 return;
5946 }
5947 case Intrinsic::dbg_value: {
5948 const DbgValueInst &DI = cast<DbgValueInst>(I);
5949 assert(DI.getVariable() && "Missing variable");
5950
5951 DILocalVariable *Variable = DI.getVariable();
5952 DIExpression *Expression = DI.getExpression();
5953 dropDanglingDebugInfo(Variable, Expression);
5954 const Value *V = DI.getValue();
5955 if (!V)
5956 return;
5957
5958 if (handleDebugValue(V, Variable, Expression, dl, DI.getDebugLoc(),
5959 SDNodeOrder))
5960 return;
5961
5962 // TODO: Dangling debug info will eventually either be resolved or produce
5963 // an Undef DBG_VALUE. However in the resolution case, a gap may appear
5964 // between the original dbg.value location and its resolved DBG_VALUE, which
5965 // we should ideally fill with an extra Undef DBG_VALUE.
5966
5967 DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
5968 return;
5969 }
5970
5971 case Intrinsic::eh_typeid_for: {
5972 // Find the type id for the given typeinfo.
5973 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
5974 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
5975 Res = DAG.getConstant(TypeID, sdl, MVT::i32);
5976 setValue(&I, Res);
5977 return;
5978 }
5979
5980 case Intrinsic::eh_return_i32:
5981 case Intrinsic::eh_return_i64:
5982 DAG.getMachineFunction().setCallsEHReturn(true);
5983 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
5984 MVT::Other,
5985 getControlRoot(),
5986 getValue(I.getArgOperand(0)),
5987 getValue(I.getArgOperand(1))));
5988 return;
5989 case Intrinsic::eh_unwind_init:
5990 DAG.getMachineFunction().setCallsUnwindInit(true);
5991 return;
5992 case Intrinsic::eh_dwarf_cfa:
5993 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
5994 TLI.getPointerTy(DAG.getDataLayout()),
5995 getValue(I.getArgOperand(0))));
5996 return;
5997 case Intrinsic::eh_sjlj_callsite: {
5998 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5999 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
6000 assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
6001 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
6002
6003 MMI.setCurrentCallSite(CI->getZExtValue());
6004 return;
6005 }
6006 case Intrinsic::eh_sjlj_functioncontext: {
6007 // Get and store the index of the function context.
6008 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6009 AllocaInst *FnCtx =
6010 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6011 int FI = FuncInfo.StaticAllocaMap[FnCtx];
6012 MFI.setFunctionContextIndex(FI);
6013 return;
6014 }
6015 case Intrinsic::eh_sjlj_setjmp: {
6016 SDValue Ops[2];
6017 Ops[0] = getRoot();
6018 Ops[1] = getValue(I.getArgOperand(0));
6019 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6020 DAG.getVTList(MVT::i32, MVT::Other), Ops);
6021 setValue(&I, Op.getValue(0));
6022 DAG.setRoot(Op.getValue(1));
6023 return;
6024 }
6025 case Intrinsic::eh_sjlj_longjmp:
6026 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6027 getRoot(), getValue(I.getArgOperand(0))));
6028 return;
6029 case Intrinsic::eh_sjlj_setup_dispatch:
6030 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6031 getRoot()));
6032 return;
6033 case Intrinsic::masked_gather:
6034 visitMaskedGather(I);
6035 return;
6036 case Intrinsic::masked_load:
6037 visitMaskedLoad(I);
6038 return;
6039 case Intrinsic::masked_scatter:
6040 visitMaskedScatter(I);
6041 return;
6042 case Intrinsic::masked_store:
6043 visitMaskedStore(I);
6044 return;
6045 case Intrinsic::masked_expandload:
6046 visitMaskedLoad(I, true /* IsExpanding */);
6047 return;
6048 case Intrinsic::masked_compressstore:
6049 visitMaskedStore(I, true /* IsCompressing */);
6050 return;
6051 case Intrinsic::powi:
6052 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6053 getValue(I.getArgOperand(1)), DAG));
6054 return;
6055 case Intrinsic::log:
6056 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6057 return;
6058 case Intrinsic::log2:
6059 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6060 return;
6061 case Intrinsic::log10:
6062 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6063 return;
6064 case Intrinsic::exp:
6065 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6066 return;
6067 case Intrinsic::exp2:
6068 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6069 return;
6070 case Intrinsic::pow:
6071 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6072 getValue(I.getArgOperand(1)), DAG, TLI));
6073 return;
6074 case Intrinsic::sqrt:
6075 case Intrinsic::fabs:
6076 case Intrinsic::sin:
6077 case Intrinsic::cos:
6078 case Intrinsic::floor:
6079 case Intrinsic::ceil:
6080 case Intrinsic::trunc:
6081 case Intrinsic::rint:
6082 case Intrinsic::nearbyint:
6083 case Intrinsic::round:
6084 case Intrinsic::roundeven:
6085 case Intrinsic::canonicalize: {
6086 unsigned Opcode;
6087 switch (Intrinsic) {
6088 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6089 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
6090 case Intrinsic::fabs: Opcode = ISD::FABS; break;
6091 case Intrinsic::sin: Opcode = ISD::FSIN; break;
6092 case Intrinsic::cos: Opcode = ISD::FCOS; break;
6093 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
6094 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
6095 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
6096 case Intrinsic::rint: Opcode = ISD::FRINT; break;
6097 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6098 case Intrinsic::round: Opcode = ISD::FROUND; break;
6099 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6100 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6101 }
6102
6103 setValue(&I, DAG.getNode(Opcode, sdl,
6104 getValue(I.getArgOperand(0)).getValueType(),
6105 getValue(I.getArgOperand(0))));
6106 return;
6107 }
6108 case Intrinsic::lround:
6109 case Intrinsic::llround:
6110 case Intrinsic::lrint:
6111 case Intrinsic::llrint: {
6112 unsigned Opcode;
6113 switch (Intrinsic) {
6114 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6115 case Intrinsic::lround: Opcode = ISD::LROUND; break;
6116 case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6117 case Intrinsic::lrint: Opcode = ISD::LRINT; break;
6118 case Intrinsic::llrint: Opcode = ISD::LLRINT; break;
6119 }
6120
6121 EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6122 setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6123 getValue(I.getArgOperand(0))));
6124 return;
6125 }
6126 case Intrinsic::minnum:
6127 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6128 getValue(I.getArgOperand(0)).getValueType(),
6129 getValue(I.getArgOperand(0)),
6130 getValue(I.getArgOperand(1))));
6131 return;
6132 case Intrinsic::maxnum:
6133 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6134 getValue(I.getArgOperand(0)).getValueType(),
6135 getValue(I.getArgOperand(0)),
6136 getValue(I.getArgOperand(1))));
6137 return;
6138 case Intrinsic::minimum:
6139 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6140 getValue(I.getArgOperand(0)).getValueType(),
6141 getValue(I.getArgOperand(0)),
6142 getValue(I.getArgOperand(1))));
6143 return;
6144 case Intrinsic::maximum:
6145 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6146 getValue(I.getArgOperand(0)).getValueType(),
6147 getValue(I.getArgOperand(0)),
6148 getValue(I.getArgOperand(1))));
6149 return;
6150 case Intrinsic::copysign:
6151 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6152 getValue(I.getArgOperand(0)).getValueType(),
6153 getValue(I.getArgOperand(0)),
6154 getValue(I.getArgOperand(1))));
6155 return;
6156 case Intrinsic::fma:
6157 setValue(&I, DAG.getNode(ISD::FMA, sdl,
6158 getValue(I.getArgOperand(0)).getValueType(),
6159 getValue(I.getArgOperand(0)),
6160 getValue(I.getArgOperand(1)),
6161 getValue(I.getArgOperand(2))));
6162 return;
6163 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6164 case Intrinsic::INTRINSIC:
6165 #include "llvm/IR/ConstrainedOps.def"
6166 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6167 return;
6168 case Intrinsic::fmuladd: {
6169 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6170 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6171 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6172 setValue(&I, DAG.getNode(ISD::FMA, sdl,
6173 getValue(I.getArgOperand(0)).getValueType(),
6174 getValue(I.getArgOperand(0)),
6175 getValue(I.getArgOperand(1)),
6176 getValue(I.getArgOperand(2))));
6177 } else {
6178 // TODO: Intrinsic calls should have fast-math-flags.
6179 SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
6180 getValue(I.getArgOperand(0)).getValueType(),
6181 getValue(I.getArgOperand(0)),
6182 getValue(I.getArgOperand(1)));
6183 SDValue Add = DAG.getNode(ISD::FADD, sdl,
6184 getValue(I.getArgOperand(0)).getValueType(),
6185 Mul,
6186 getValue(I.getArgOperand(2)));
6187 setValue(&I, Add);
6188 }
6189 return;
6190 }
6191 case Intrinsic::convert_to_fp16:
6192 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6193 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6194 getValue(I.getArgOperand(0)),
6195 DAG.getTargetConstant(0, sdl,
6196 MVT::i32))));
6197 return;
6198 case Intrinsic::convert_from_fp16:
6199 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6200 TLI.getValueType(DAG.getDataLayout(), I.getType()),
6201 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6202 getValue(I.getArgOperand(0)))));
6203 return;
6204 case Intrinsic::pcmarker: {
6205 SDValue Tmp = getValue(I.getArgOperand(0));
6206 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6207 return;
6208 }
6209 case Intrinsic::readcyclecounter: {
6210 SDValue Op = getRoot();
6211 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6212 DAG.getVTList(MVT::i64, MVT::Other), Op);
6213 setValue(&I, Res);
6214 DAG.setRoot(Res.getValue(1));
6215 return;
6216 }
6217 case Intrinsic::bitreverse:
6218 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6219 getValue(I.getArgOperand(0)).getValueType(),
6220 getValue(I.getArgOperand(0))));
6221 return;
6222 case Intrinsic::bswap:
6223 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6224 getValue(I.getArgOperand(0)).getValueType(),
6225 getValue(I.getArgOperand(0))));
6226 return;
6227 case Intrinsic::cttz: {
6228 SDValue Arg = getValue(I.getArgOperand(0));
6229 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6230 EVT Ty = Arg.getValueType();
6231 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6232 sdl, Ty, Arg));
6233 return;
6234 }
6235 case Intrinsic::ctlz: {
6236 SDValue Arg = getValue(I.getArgOperand(0));
6237 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6238 EVT Ty = Arg.getValueType();
6239 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6240 sdl, Ty, Arg));
6241 return;
6242 }
6243 case Intrinsic::ctpop: {
6244 SDValue Arg = getValue(I.getArgOperand(0));
6245 EVT Ty = Arg.getValueType();
6246 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6247 return;
6248 }
6249 case Intrinsic::fshl:
6250 case Intrinsic::fshr: {
6251 bool IsFSHL = Intrinsic == Intrinsic::fshl;
6252 SDValue X = getValue(I.getArgOperand(0));
6253 SDValue Y = getValue(I.getArgOperand(1));
6254 SDValue Z = getValue(I.getArgOperand(2));
6255 EVT VT = X.getValueType();
6256 SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT);
6257 SDValue Zero = DAG.getConstant(0, sdl, VT);
6258 SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC);
6259
6260 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6261 if (TLI.isOperationLegalOrCustom(FunnelOpcode, VT)) {
6262 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6263 return;
6264 }
6265
6266 // When X == Y, this is rotate. If the data type has a power-of-2 size, we
6267 // avoid the select that is necessary in the general case to filter out
6268 // the 0-shift possibility that leads to UB.
6269 if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
6270 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6271 if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6272 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6273 return;
6274 }
6275
6276 // Some targets only rotate one way. Try the opposite direction.
6277 RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
6278 if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6279 // Negate the shift amount because it is safe to ignore the high bits.
6280 SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6281 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
6282 return;
6283 }
6284
6285 // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
6286 // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
6287 SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6288 SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC);
6289 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt);
6290 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt);
6291 setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY));
6292 return;
6293 }
6294
6295 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
6296 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
6297 SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt);
6298 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt);
6299 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt);
6300 SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY);
6301
6302 // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
6303 // and that is undefined. We must compare and select to avoid UB.
6304 EVT CCVT = MVT::i1;
6305 if (VT.isVector())
6306 CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements());
6307
6308 // For fshl, 0-shift returns the 1st arg (X).
6309 // For fshr, 0-shift returns the 2nd arg (Y).
6310 SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ);
6311 setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
6312 return;
6313 }
6314 case Intrinsic::sadd_sat: {
6315 SDValue Op1 = getValue(I.getArgOperand(0));
6316 SDValue Op2 = getValue(I.getArgOperand(1));
6317 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6318 return;
6319 }
6320 case Intrinsic::uadd_sat: {
6321 SDValue Op1 = getValue(I.getArgOperand(0));
6322 SDValue Op2 = getValue(I.getArgOperand(1));
6323 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6324 return;
6325 }
6326 case Intrinsic::ssub_sat: {
6327 SDValue Op1 = getValue(I.getArgOperand(0));
6328 SDValue Op2 = getValue(I.getArgOperand(1));
6329 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6330 return;
6331 }
6332 case Intrinsic::usub_sat: {
6333 SDValue Op1 = getValue(I.getArgOperand(0));
6334 SDValue Op2 = getValue(I.getArgOperand(1));
6335 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6336 return;
6337 }
6338 case Intrinsic::smul_fix:
6339 case Intrinsic::umul_fix:
6340 case Intrinsic::smul_fix_sat:
6341 case Intrinsic::umul_fix_sat: {
6342 SDValue Op1 = getValue(I.getArgOperand(0));
6343 SDValue Op2 = getValue(I.getArgOperand(1));
6344 SDValue Op3 = getValue(I.getArgOperand(2));
6345 setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6346 Op1.getValueType(), Op1, Op2, Op3));
6347 return;
6348 }
6349 case Intrinsic::sdiv_fix:
6350 case Intrinsic::udiv_fix:
6351 case Intrinsic::sdiv_fix_sat:
6352 case Intrinsic::udiv_fix_sat: {
6353 SDValue Op1 = getValue(I.getArgOperand(0));
6354 SDValue Op2 = getValue(I.getArgOperand(1));
6355 SDValue Op3 = getValue(I.getArgOperand(2));
6356 setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6357 Op1, Op2, Op3, DAG, TLI));
6358 return;
6359 }
6360 case Intrinsic::stacksave: {
6361 SDValue Op = getRoot();
6362 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6363 Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6364 setValue(&I, Res);
6365 DAG.setRoot(Res.getValue(1));
6366 return;
6367 }
6368 case Intrinsic::stackrestore:
6369 Res = getValue(I.getArgOperand(0));
6370 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6371 return;
6372 case Intrinsic::get_dynamic_area_offset: {
6373 SDValue Op = getRoot();
6374 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6375 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6376 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6377 // target.
6378 if (PtrTy.getSizeInBits() < ResTy.getSizeInBits())
6379 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6380 " intrinsic!");
6381 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6382 Op);
6383 DAG.setRoot(Op);
6384 setValue(&I, Res);
6385 return;
6386 }
6387 case Intrinsic::stackguard: {
6388 MachineFunction &MF = DAG.getMachineFunction();
6389 const Module &M = *MF.getFunction().getParent();
6390 SDValue Chain = getRoot();
6391 if (TLI.useLoadStackGuardNode()) {
6392 Res = getLoadStackGuard(DAG, sdl, Chain);
6393 } else {
6394 EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6395 const Value *Global = TLI.getSDagStackGuard(M);
6396 unsigned Align = DL->getPrefTypeAlignment(Global->getType());
6397 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6398 MachinePointerInfo(Global, 0), Align,
6399 MachineMemOperand::MOVolatile);
6400 }
6401 if (TLI.useStackGuardXorFP())
6402 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6403 DAG.setRoot(Chain);
6404 setValue(&I, Res);
6405 return;
6406 }
6407 case Intrinsic::stackprotector: {
6408 // Emit code into the DAG to store the stack guard onto the stack.
6409 MachineFunction &MF = DAG.getMachineFunction();
6410 MachineFrameInfo &MFI = MF.getFrameInfo();
6411 SDValue Src, Chain = getRoot();
6412
6413 if (TLI.useLoadStackGuardNode())
6414 Src = getLoadStackGuard(DAG, sdl, Chain);
6415 else
6416 Src = getValue(I.getArgOperand(0)); // The guard's value.
6417
6418 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6419
6420 int FI = FuncInfo.StaticAllocaMap[Slot];
6421 MFI.setStackProtectorIndex(FI);
6422 EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6423
6424 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6425
6426 // Store the stack protector onto the stack.
6427 Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
6428 DAG.getMachineFunction(), FI),
6429 /* Alignment = */ 0, MachineMemOperand::MOVolatile);
6430 setValue(&I, Res);
6431 DAG.setRoot(Res);
6432 return;
6433 }
6434 case Intrinsic::objectsize:
6435 llvm_unreachable("llvm.objectsize.* should have been lowered already");
6436
6437 case Intrinsic::is_constant:
6438 llvm_unreachable("llvm.is.constant.* should have been lowered already");
6439
6440 case Intrinsic::annotation:
6441 case Intrinsic::ptr_annotation:
6442 case Intrinsic::launder_invariant_group:
6443 case Intrinsic::strip_invariant_group:
6444 // Drop the intrinsic, but forward the value
6445 setValue(&I, getValue(I.getOperand(0)));
6446 return;
6447 case Intrinsic::assume:
6448 case Intrinsic::var_annotation:
6449 case Intrinsic::sideeffect:
6450 // Discard annotate attributes, assumptions, and artificial side-effects.
6451 return;
6452
6453 case Intrinsic::codeview_annotation: {
6454 // Emit a label associated with this metadata.
6455 MachineFunction &MF = DAG.getMachineFunction();
6456 MCSymbol *Label =
6457 MF.getMMI().getContext().createTempSymbol("annotation", true);
6458 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
6459 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
6460 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
6461 DAG.setRoot(Res);
6462 return;
6463 }
6464
6465 case Intrinsic::init_trampoline: {
6466 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
6467
6468 SDValue Ops[6];
6469 Ops[0] = getRoot();
6470 Ops[1] = getValue(I.getArgOperand(0));
6471 Ops[2] = getValue(I.getArgOperand(1));
6472 Ops[3] = getValue(I.getArgOperand(2));
6473 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
6474 Ops[5] = DAG.getSrcValue(F);
6475
6476 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
6477
6478 DAG.setRoot(Res);
6479 return;
6480 }
6481 case Intrinsic::adjust_trampoline:
6482 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
6483 TLI.getPointerTy(DAG.getDataLayout()),
6484 getValue(I.getArgOperand(0))));
6485 return;
6486 case Intrinsic::gcroot: {
6487 assert(DAG.getMachineFunction().getFunction().hasGC() &&
6488 "only valid in functions with gc specified, enforced by Verifier");
6489 assert(GFI && "implied by previous");
6490 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
6491 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
6492
6493 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
6494 GFI->addStackRoot(FI->getIndex(), TypeMap);
6495 return;
6496 }
6497 case Intrinsic::gcread:
6498 case Intrinsic::gcwrite:
6499 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
6500 case Intrinsic::flt_rounds:
6501 Res = DAG.getNode(ISD::FLT_ROUNDS_, sdl, {MVT::i32, MVT::Other}, getRoot());
6502 setValue(&I, Res);
6503 DAG.setRoot(Res.getValue(1));
6504 return;
6505
6506 case Intrinsic::expect:
6507 // Just replace __builtin_expect(exp, c) with EXP.
6508 setValue(&I, getValue(I.getArgOperand(0)));
6509 return;
6510
6511 case Intrinsic::debugtrap:
6512 case Intrinsic::trap: {
6513 StringRef TrapFuncName =
6514 I.getAttributes()
6515 .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
6516 .getValueAsString();
6517 if (TrapFuncName.empty()) {
6518 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
6519 ISD::TRAP : ISD::DEBUGTRAP;
6520 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
6521 return;
6522 }
6523 TargetLowering::ArgListTy Args;
6524
6525 TargetLowering::CallLoweringInfo CLI(DAG);
6526 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
6527 CallingConv::C, I.getType(),
6528 DAG.getExternalSymbol(TrapFuncName.data(),
6529 TLI.getPointerTy(DAG.getDataLayout())),
6530 std::move(Args));
6531
6532 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6533 DAG.setRoot(Result.second);
6534 return;
6535 }
6536
6537 case Intrinsic::uadd_with_overflow:
6538 case Intrinsic::sadd_with_overflow:
6539 case Intrinsic::usub_with_overflow:
6540 case Intrinsic::ssub_with_overflow:
6541 case Intrinsic::umul_with_overflow:
6542 case Intrinsic::smul_with_overflow: {
6543 ISD::NodeType Op;
6544 switch (Intrinsic) {
6545 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6546 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
6547 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
6548 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
6549 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
6550 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
6551 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
6552 }
6553 SDValue Op1 = getValue(I.getArgOperand(0));
6554 SDValue Op2 = getValue(I.getArgOperand(1));
6555
6556 EVT ResultVT = Op1.getValueType();
6557 EVT OverflowVT = MVT::i1;
6558 if (ResultVT.isVector())
6559 OverflowVT = EVT::getVectorVT(
6560 *Context, OverflowVT, ResultVT.getVectorNumElements());
6561
6562 SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
6563 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
6564 return;
6565 }
6566 case Intrinsic::prefetch: {
6567 SDValue Ops[5];
6568 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6569 auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
6570 Ops[0] = DAG.getRoot();
6571 Ops[1] = getValue(I.getArgOperand(0));
6572 Ops[2] = getValue(I.getArgOperand(1));
6573 Ops[3] = getValue(I.getArgOperand(2));
6574 Ops[4] = getValue(I.getArgOperand(3));
6575 SDValue Result = DAG.getMemIntrinsicNode(
6576 ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
6577 EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
6578 /* align */ None, Flags);
6579
6580 // Chain the prefetch in parallell with any pending loads, to stay out of
6581 // the way of later optimizations.
6582 PendingLoads.push_back(Result);
6583 Result = getRoot();
6584 DAG.setRoot(Result);
6585 return;
6586 }
6587 case Intrinsic::lifetime_start:
6588 case Intrinsic::lifetime_end: {
6589 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
6590 // Stack coloring is not enabled in O0, discard region information.
6591 if (TM.getOptLevel() == CodeGenOpt::None)
6592 return;
6593
6594 const int64_t ObjectSize =
6595 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
6596 Value *const ObjectPtr = I.getArgOperand(1);
6597 SmallVector<const Value *, 4> Allocas;
6598 GetUnderlyingObjects(ObjectPtr, Allocas, *DL);
6599
6600 for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
6601 E = Allocas.end(); Object != E; ++Object) {
6602 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6603
6604 // Could not find an Alloca.
6605 if (!LifetimeObject)
6606 continue;
6607
6608 // First check that the Alloca is static, otherwise it won't have a
6609 // valid frame index.
6610 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6611 if (SI == FuncInfo.StaticAllocaMap.end())
6612 return;
6613
6614 const int FrameIndex = SI->second;
6615 int64_t Offset;
6616 if (GetPointerBaseWithConstantOffset(
6617 ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
6618 Offset = -1; // Cannot determine offset from alloca to lifetime object.
6619 Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
6620 Offset);
6621 DAG.setRoot(Res);
6622 }
6623 return;
6624 }
6625 case Intrinsic::invariant_start:
6626 // Discard region information.
6627 setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6628 return;
6629 case Intrinsic::invariant_end:
6630 // Discard region information.
6631 return;
6632 case Intrinsic::clear_cache:
6633 /// FunctionName may be null.
6634 if (const char *FunctionName = TLI.getClearCacheBuiltinName())
6635 lowerCallToExternalSymbol(I, FunctionName);
6636 return;
6637 case Intrinsic::donothing:
6638 // ignore
6639 return;
6640 case Intrinsic::experimental_stackmap:
6641 visitStackmap(I);
6642 return;
6643 case Intrinsic::experimental_patchpoint_void:
6644 case Intrinsic::experimental_patchpoint_i64:
6645 visitPatchpoint(I);
6646 return;
6647 case Intrinsic::experimental_gc_statepoint:
6648 LowerStatepoint(cast<GCStatepointInst>(I));
6649 return;
6650 case Intrinsic::experimental_gc_result:
6651 visitGCResult(cast<GCResultInst>(I));
6652 return;
6653 case Intrinsic::experimental_gc_relocate:
6654 visitGCRelocate(cast<GCRelocateInst>(I));
6655 return;
6656 case Intrinsic::instrprof_increment:
6657 llvm_unreachable("instrprof failed to lower an increment");
6658 case Intrinsic::instrprof_value_profile:
6659 llvm_unreachable("instrprof failed to lower a value profiling call");
6660 case Intrinsic::localescape: {
6661 MachineFunction &MF = DAG.getMachineFunction();
6662 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6663
6664 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6665 // is the same on all targets.
6666 for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6667 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6668 if (isa<ConstantPointerNull>(Arg))
6669 continue; // Skip null pointers. They represent a hole in index space.
6670 AllocaInst *Slot = cast<AllocaInst>(Arg);
6671 assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6672 "can only escape static allocas");
6673 int FI = FuncInfo.StaticAllocaMap[Slot];
6674 MCSymbol *FrameAllocSym =
6675 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6676 GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6677 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6678 TII->get(TargetOpcode::LOCAL_ESCAPE))
6679 .addSym(FrameAllocSym)
6680 .addFrameIndex(FI);
6681 }
6682
6683 return;
6684 }
6685
6686 case Intrinsic::localrecover: {
6687 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6688 MachineFunction &MF = DAG.getMachineFunction();
6689
6690 // Get the symbol that defines the frame offset.
6691 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6692 auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6693 unsigned IdxVal =
6694 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6695 MCSymbol *FrameAllocSym =
6696 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6697 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6698
6699 Value *FP = I.getArgOperand(1);
6700 SDValue FPVal = getValue(FP);
6701 EVT PtrVT = FPVal.getValueType();
6702
6703 // Create a MCSymbol for the label to avoid any target lowering
6704 // that would make this PC relative.
6705 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6706 SDValue OffsetVal =
6707 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6708
6709 // Add the offset to the FP.
6710 SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
6711 setValue(&I, Add);
6712
6713 return;
6714 }
6715
6716 case Intrinsic::eh_exceptionpointer:
6717 case Intrinsic::eh_exceptioncode: {
6718 // Get the exception pointer vreg, copy from it, and resize it to fit.
6719 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6720 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6721 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6722 unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6723 SDValue N =
6724 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6725 if (Intrinsic == Intrinsic::eh_exceptioncode)
6726 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6727 setValue(&I, N);
6728 return;
6729 }
6730 case Intrinsic::xray_customevent: {
6731 // Here we want to make sure that the intrinsic behaves as if it has a
6732 // specific calling convention, and only for x86_64.
6733 // FIXME: Support other platforms later.
6734 const auto &Triple = DAG.getTarget().getTargetTriple();
6735 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6736 return;
6737
6738 SDLoc DL = getCurSDLoc();
6739 SmallVector<SDValue, 8> Ops;
6740
6741 // We want to say that we always want the arguments in registers.
6742 SDValue LogEntryVal = getValue(I.getArgOperand(0));
6743 SDValue StrSizeVal = getValue(I.getArgOperand(1));
6744 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6745 SDValue Chain = getRoot();
6746 Ops.push_back(LogEntryVal);
6747 Ops.push_back(StrSizeVal);
6748 Ops.push_back(Chain);
6749
6750 // We need to enforce the calling convention for the callsite, so that
6751 // argument ordering is enforced correctly, and that register allocation can
6752 // see that some registers may be assumed clobbered and have to preserve
6753 // them across calls to the intrinsic.
6754 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6755 DL, NodeTys, Ops);
6756 SDValue patchableNode = SDValue(MN, 0);
6757 DAG.setRoot(patchableNode);
6758 setValue(&I, patchableNode);
6759 return;
6760 }
6761 case Intrinsic::xray_typedevent: {
6762 // Here we want to make sure that the intrinsic behaves as if it has a
6763 // specific calling convention, and only for x86_64.
6764 // FIXME: Support other platforms later.
6765 const auto &Triple = DAG.getTarget().getTargetTriple();
6766 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6767 return;
6768
6769 SDLoc DL = getCurSDLoc();
6770 SmallVector<SDValue, 8> Ops;
6771
6772 // We want to say that we always want the arguments in registers.
6773 // It's unclear to me how manipulating the selection DAG here forces callers
6774 // to provide arguments in registers instead of on the stack.
6775 SDValue LogTypeId = getValue(I.getArgOperand(0));
6776 SDValue LogEntryVal = getValue(I.getArgOperand(1));
6777 SDValue StrSizeVal = getValue(I.getArgOperand(2));
6778 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6779 SDValue Chain = getRoot();
6780 Ops.push_back(LogTypeId);
6781 Ops.push_back(LogEntryVal);
6782 Ops.push_back(StrSizeVal);
6783 Ops.push_back(Chain);
6784
6785 // We need to enforce the calling convention for the callsite, so that
6786 // argument ordering is enforced correctly, and that register allocation can
6787 // see that some registers may be assumed clobbered and have to preserve
6788 // them across calls to the intrinsic.
6789 MachineSDNode *MN = DAG.getMachineNode(
6790 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6791 SDValue patchableNode = SDValue(MN, 0);
6792 DAG.setRoot(patchableNode);
6793 setValue(&I, patchableNode);
6794 return;
6795 }
6796 case Intrinsic::experimental_deoptimize:
6797 LowerDeoptimizeCall(&I);
6798 return;
6799
6800 case Intrinsic::experimental_vector_reduce_v2_fadd:
6801 case Intrinsic::experimental_vector_reduce_v2_fmul:
6802 case Intrinsic::experimental_vector_reduce_add:
6803 case Intrinsic::experimental_vector_reduce_mul:
6804 case Intrinsic::experimental_vector_reduce_and:
6805 case Intrinsic::experimental_vector_reduce_or:
6806 case Intrinsic::experimental_vector_reduce_xor:
6807 case Intrinsic::experimental_vector_reduce_smax:
6808 case Intrinsic::experimental_vector_reduce_smin:
6809 case Intrinsic::experimental_vector_reduce_umax:
6810 case Intrinsic::experimental_vector_reduce_umin:
6811 case Intrinsic::experimental_vector_reduce_fmax:
6812 case Intrinsic::experimental_vector_reduce_fmin:
6813 visitVectorReduce(I, Intrinsic);
6814 return;
6815
6816 case Intrinsic::icall_branch_funnel: {
6817 SmallVector<SDValue, 16> Ops;
6818 Ops.push_back(getValue(I.getArgOperand(0)));
6819
6820 int64_t Offset;
6821 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6822 I.getArgOperand(1), Offset, DAG.getDataLayout()));
6823 if (!Base)
6824 report_fatal_error(
6825 "llvm.icall.branch.funnel operand must be a GlobalValue");
6826 Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
6827
6828 struct BranchFunnelTarget {
6829 int64_t Offset;
6830 SDValue Target;
6831 };
6832 SmallVector<BranchFunnelTarget, 8> Targets;
6833
6834 for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
6835 auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6836 I.getArgOperand(Op), Offset, DAG.getDataLayout()));
6837 if (ElemBase != Base)
6838 report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6839 "to the same GlobalValue");
6840
6841 SDValue Val = getValue(I.getArgOperand(Op + 1));
6842 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
6843 if (!GA)
6844 report_fatal_error(
6845 "llvm.icall.branch.funnel operand must be a GlobalValue");
6846 Targets.push_back({Offset, DAG.getTargetGlobalAddress(
6847 GA->getGlobal(), getCurSDLoc(),
6848 Val.getValueType(), GA->getOffset())});
6849 }
6850 llvm::sort(Targets,
6851 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
6852 return T1.Offset < T2.Offset;
6853 });
6854
6855 for (auto &T : Targets) {
6856 Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
6857 Ops.push_back(T.Target);
6858 }
6859
6860 Ops.push_back(DAG.getRoot()); // Chain
6861 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
6862 getCurSDLoc(), MVT::Other, Ops),
6863 0);
6864 DAG.setRoot(N);
6865 setValue(&I, N);
6866 HasTailCall = true;
6867 return;
6868 }
6869
6870 case Intrinsic::wasm_landingpad_index:
6871 // Information this intrinsic contained has been transferred to
6872 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
6873 // delete it now.
6874 return;
6875
6876 case Intrinsic::aarch64_settag:
6877 case Intrinsic::aarch64_settag_zero: {
6878 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6879 bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
6880 SDValue Val = TSI.EmitTargetCodeForSetTag(
6881 DAG, getCurSDLoc(), getRoot(), getValue(I.getArgOperand(0)),
6882 getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
6883 ZeroMemory);
6884 DAG.setRoot(Val);
6885 setValue(&I, Val);
6886 return;
6887 }
6888 case Intrinsic::ptrmask: {
6889 SDValue Ptr = getValue(I.getOperand(0));
6890 SDValue Const = getValue(I.getOperand(1));
6891
6892 EVT PtrVT = Ptr.getValueType();
6893 setValue(&I, DAG.getNode(ISD::AND, getCurSDLoc(), PtrVT, Ptr,
6894 DAG.getZExtOrTrunc(Const, getCurSDLoc(), PtrVT)));
6895 return;
6896 }
6897 case Intrinsic::get_active_lane_mask: {
6898 auto DL = getCurSDLoc();
6899 SDValue Index = getValue(I.getOperand(0));
6900 SDValue BTC = getValue(I.getOperand(1));
6901 Type *ElementTy = I.getOperand(0)->getType();
6902 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6903 unsigned VecWidth = VT.getVectorNumElements();
6904
6905 SmallVector<SDValue, 16> OpsBTC;
6906 SmallVector<SDValue, 16> OpsIndex;
6907 SmallVector<SDValue, 16> OpsStepConstants;
6908 for (unsigned i = 0; i < VecWidth; i++) {
6909 OpsBTC.push_back(BTC);
6910 OpsIndex.push_back(Index);
6911 OpsStepConstants.push_back(DAG.getConstant(i, DL, MVT::getVT(ElementTy)));
6912 }
6913
6914 EVT CCVT = MVT::i1;
6915 CCVT = EVT::getVectorVT(I.getContext(), CCVT, VecWidth);
6916
6917 auto VecTy = MVT::getVT(FixedVectorType::get(ElementTy, VecWidth));
6918 SDValue VectorIndex = DAG.getBuildVector(VecTy, DL, OpsIndex);
6919 SDValue VectorStep = DAG.getBuildVector(VecTy, DL, OpsStepConstants);
6920 SDValue VectorInduction = DAG.getNode(
6921 ISD::UADDO, DL, DAG.getVTList(VecTy, CCVT), VectorIndex, VectorStep);
6922 SDValue VectorBTC = DAG.getBuildVector(VecTy, DL, OpsBTC);
6923 SDValue SetCC = DAG.getSetCC(DL, CCVT, VectorInduction.getValue(0),
6924 VectorBTC, ISD::CondCode::SETULE);
6925 setValue(&I, DAG.getNode(ISD::AND, DL, CCVT,
6926 DAG.getNOT(DL, VectorInduction.getValue(1), CCVT),
6927 SetCC));
6928 return;
6929 }
6930 }
6931 }
6932
visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic & FPI)6933 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
6934 const ConstrainedFPIntrinsic &FPI) {
6935 SDLoc sdl = getCurSDLoc();
6936
6937 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6938 SmallVector<EVT, 4> ValueVTs;
6939 ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
6940 ValueVTs.push_back(MVT::Other); // Out chain
6941
6942 // We do not need to serialize constrained FP intrinsics against
6943 // each other or against (nonvolatile) loads, so they can be
6944 // chained like loads.
6945 SDValue Chain = DAG.getRoot();
6946 SmallVector<SDValue, 4> Opers;
6947 Opers.push_back(Chain);
6948 if (FPI.isUnaryOp()) {
6949 Opers.push_back(getValue(FPI.getArgOperand(0)));
6950 } else if (FPI.isTernaryOp()) {
6951 Opers.push_back(getValue(FPI.getArgOperand(0)));
6952 Opers.push_back(getValue(FPI.getArgOperand(1)));
6953 Opers.push_back(getValue(FPI.getArgOperand(2)));
6954 } else {
6955 Opers.push_back(getValue(FPI.getArgOperand(0)));
6956 Opers.push_back(getValue(FPI.getArgOperand(1)));
6957 }
6958
6959 auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
6960 assert(Result.getNode()->getNumValues() == 2);
6961
6962 // Push node to the appropriate list so that future instructions can be
6963 // chained up correctly.
6964 SDValue OutChain = Result.getValue(1);
6965 switch (EB) {
6966 case fp::ExceptionBehavior::ebIgnore:
6967 // The only reason why ebIgnore nodes still need to be chained is that
6968 // they might depend on the current rounding mode, and therefore must
6969 // not be moved across instruction that may change that mode.
6970 LLVM_FALLTHROUGH;
6971 case fp::ExceptionBehavior::ebMayTrap:
6972 // These must not be moved across calls or instructions that may change
6973 // floating-point exception masks.
6974 PendingConstrainedFP.push_back(OutChain);
6975 break;
6976 case fp::ExceptionBehavior::ebStrict:
6977 // These must not be moved across calls or instructions that may change
6978 // floating-point exception masks or read floating-point exception flags.
6979 // In addition, they cannot be optimized out even if unused.
6980 PendingConstrainedFPStrict.push_back(OutChain);
6981 break;
6982 }
6983 };
6984
6985 SDVTList VTs = DAG.getVTList(ValueVTs);
6986 fp::ExceptionBehavior EB = FPI.getExceptionBehavior().getValue();
6987
6988 SDNodeFlags Flags;
6989 if (EB == fp::ExceptionBehavior::ebIgnore)
6990 Flags.setNoFPExcept(true);
6991
6992 if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
6993 Flags.copyFMF(*FPOp);
6994
6995 unsigned Opcode;
6996 switch (FPI.getIntrinsicID()) {
6997 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6998 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
6999 case Intrinsic::INTRINSIC: \
7000 Opcode = ISD::STRICT_##DAGN; \
7001 break;
7002 #include "llvm/IR/ConstrainedOps.def"
7003 case Intrinsic::experimental_constrained_fmuladd: {
7004 Opcode = ISD::STRICT_FMA;
7005 // Break fmuladd into fmul and fadd.
7006 if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
7007 !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(),
7008 ValueVTs[0])) {
7009 Opers.pop_back();
7010 SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
7011 pushOutChain(Mul, EB);
7012 Opcode = ISD::STRICT_FADD;
7013 Opers.clear();
7014 Opers.push_back(Mul.getValue(1));
7015 Opers.push_back(Mul.getValue(0));
7016 Opers.push_back(getValue(FPI.getArgOperand(2)));
7017 }
7018 break;
7019 }
7020 }
7021
7022 // A few strict DAG nodes carry additional operands that are not
7023 // set up by the default code above.
7024 switch (Opcode) {
7025 default: break;
7026 case ISD::STRICT_FP_ROUND:
7027 Opers.push_back(
7028 DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7029 break;
7030 case ISD::STRICT_FSETCC:
7031 case ISD::STRICT_FSETCCS: {
7032 auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7033 Opers.push_back(DAG.getCondCode(getFCmpCondCode(FPCmp->getPredicate())));
7034 break;
7035 }
7036 }
7037
7038 SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7039 pushOutChain(Result, EB);
7040
7041 SDValue FPResult = Result.getValue(0);
7042 setValue(&FPI, FPResult);
7043 }
7044
7045 std::pair<SDValue, SDValue>
lowerInvokable(TargetLowering::CallLoweringInfo & CLI,const BasicBlock * EHPadBB)7046 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
7047 const BasicBlock *EHPadBB) {
7048 MachineFunction &MF = DAG.getMachineFunction();
7049 MachineModuleInfo &MMI = MF.getMMI();
7050 MCSymbol *BeginLabel = nullptr;
7051
7052 if (EHPadBB) {
7053 // Insert a label before the invoke call to mark the try range. This can be
7054 // used to detect deletion of the invoke via the MachineModuleInfo.
7055 BeginLabel = MMI.getContext().createTempSymbol();
7056
7057 // For SjLj, keep track of which landing pads go with which invokes
7058 // so as to maintain the ordering of pads in the LSDA.
7059 unsigned CallSiteIndex = MMI.getCurrentCallSite();
7060 if (CallSiteIndex) {
7061 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
7062 LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
7063
7064 // Now that the call site is handled, stop tracking it.
7065 MMI.setCurrentCallSite(0);
7066 }
7067
7068 // Both PendingLoads and PendingExports must be flushed here;
7069 // this call might not return.
7070 (void)getRoot();
7071 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
7072
7073 CLI.setChain(getRoot());
7074 }
7075 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7076 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7077
7078 assert((CLI.IsTailCall || Result.second.getNode()) &&
7079 "Non-null chain expected with non-tail call!");
7080 assert((Result.second.getNode() || !Result.first.getNode()) &&
7081 "Null value expected with tail call!");
7082
7083 if (!Result.second.getNode()) {
7084 // As a special case, a null chain means that a tail call has been emitted
7085 // and the DAG root is already updated.
7086 HasTailCall = true;
7087
7088 // Since there's no actual continuation from this block, nothing can be
7089 // relying on us setting vregs for them.
7090 PendingExports.clear();
7091 } else {
7092 DAG.setRoot(Result.second);
7093 }
7094
7095 if (EHPadBB) {
7096 // Insert a label at the end of the invoke call to mark the try range. This
7097 // can be used to detect deletion of the invoke via the MachineModuleInfo.
7098 MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
7099 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
7100
7101 // Inform MachineModuleInfo of range.
7102 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
7103 // There is a platform (e.g. wasm) that uses funclet style IR but does not
7104 // actually use outlined funclets and their LSDA info style.
7105 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
7106 assert(CLI.CB);
7107 WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
7108 EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CB), BeginLabel, EndLabel);
7109 } else if (!isScopedEHPersonality(Pers)) {
7110 MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
7111 }
7112 }
7113
7114 return Result;
7115 }
7116
LowerCallTo(const CallBase & CB,SDValue Callee,bool isTailCall,const BasicBlock * EHPadBB)7117 void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
7118 bool isTailCall,
7119 const BasicBlock *EHPadBB) {
7120 auto &DL = DAG.getDataLayout();
7121 FunctionType *FTy = CB.getFunctionType();
7122 Type *RetTy = CB.getType();
7123
7124 TargetLowering::ArgListTy Args;
7125 Args.reserve(CB.arg_size());
7126
7127 const Value *SwiftErrorVal = nullptr;
7128 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7129
7130 if (isTailCall) {
7131 // Avoid emitting tail calls in functions with the disable-tail-calls
7132 // attribute.
7133 auto *Caller = CB.getParent()->getParent();
7134 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
7135 "true")
7136 isTailCall = false;
7137
7138 // We can't tail call inside a function with a swifterror argument. Lowering
7139 // does not support this yet. It would have to move into the swifterror
7140 // register before the call.
7141 if (TLI.supportSwiftError() &&
7142 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
7143 isTailCall = false;
7144 }
7145
7146 for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
7147 TargetLowering::ArgListEntry Entry;
7148 const Value *V = *I;
7149
7150 // Skip empty types
7151 if (V->getType()->isEmptyTy())
7152 continue;
7153
7154 SDValue ArgNode = getValue(V);
7155 Entry.Node = ArgNode; Entry.Ty = V->getType();
7156
7157 Entry.setAttributes(&CB, I - CB.arg_begin());
7158
7159 // Use swifterror virtual register as input to the call.
7160 if (Entry.IsSwiftError && TLI.supportSwiftError()) {
7161 SwiftErrorVal = V;
7162 // We find the virtual register for the actual swifterror argument.
7163 // Instead of using the Value, we use the virtual register instead.
7164 Entry.Node =
7165 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
7166 EVT(TLI.getPointerTy(DL)));
7167 }
7168
7169 Args.push_back(Entry);
7170
7171 // If we have an explicit sret argument that is an Instruction, (i.e., it
7172 // might point to function-local memory), we can't meaningfully tail-call.
7173 if (Entry.IsSRet && isa<Instruction>(V))
7174 isTailCall = false;
7175 }
7176
7177 // If call site has a cfguardtarget operand bundle, create and add an
7178 // additional ArgListEntry.
7179 if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
7180 TargetLowering::ArgListEntry Entry;
7181 Value *V = Bundle->Inputs[0];
7182 SDValue ArgNode = getValue(V);
7183 Entry.Node = ArgNode;
7184 Entry.Ty = V->getType();
7185 Entry.IsCFGuardTarget = true;
7186 Args.push_back(Entry);
7187 }
7188
7189 // Check if target-independent constraints permit a tail call here.
7190 // Target-dependent constraints are checked within TLI->LowerCallTo.
7191 if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
7192 isTailCall = false;
7193
7194 // Disable tail calls if there is an swifterror argument. Targets have not
7195 // been updated to support tail calls.
7196 if (TLI.supportSwiftError() && SwiftErrorVal)
7197 isTailCall = false;
7198
7199 TargetLowering::CallLoweringInfo CLI(DAG);
7200 CLI.setDebugLoc(getCurSDLoc())
7201 .setChain(getRoot())
7202 .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
7203 .setTailCall(isTailCall)
7204 .setConvergent(CB.isConvergent())
7205 .setIsPreallocated(
7206 CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
7207 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
7208
7209 if (Result.first.getNode()) {
7210 Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
7211 setValue(&CB, Result.first);
7212 }
7213
7214 // The last element of CLI.InVals has the SDValue for swifterror return.
7215 // Here we copy it to a virtual register and update SwiftErrorMap for
7216 // book-keeping.
7217 if (SwiftErrorVal && TLI.supportSwiftError()) {
7218 // Get the last element of InVals.
7219 SDValue Src = CLI.InVals.back();
7220 Register VReg =
7221 SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
7222 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
7223 DAG.setRoot(CopyNode);
7224 }
7225 }
7226
getMemCmpLoad(const Value * PtrVal,MVT LoadVT,SelectionDAGBuilder & Builder)7227 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
7228 SelectionDAGBuilder &Builder) {
7229 // Check to see if this load can be trivially constant folded, e.g. if the
7230 // input is from a string literal.
7231 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
7232 // Cast pointer to the type we really want to load.
7233 Type *LoadTy =
7234 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
7235 if (LoadVT.isVector())
7236 LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
7237
7238 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
7239 PointerType::getUnqual(LoadTy));
7240
7241 if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
7242 const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
7243 return Builder.getValue(LoadCst);
7244 }
7245
7246 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
7247 // still constant memory, the input chain can be the entry node.
7248 SDValue Root;
7249 bool ConstantMemory = false;
7250
7251 // Do not serialize (non-volatile) loads of constant memory with anything.
7252 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
7253 Root = Builder.DAG.getEntryNode();
7254 ConstantMemory = true;
7255 } else {
7256 // Do not serialize non-volatile loads against each other.
7257 Root = Builder.DAG.getRoot();
7258 }
7259
7260 SDValue Ptr = Builder.getValue(PtrVal);
7261 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
7262 Ptr, MachinePointerInfo(PtrVal),
7263 /* Alignment = */ 1);
7264
7265 if (!ConstantMemory)
7266 Builder.PendingLoads.push_back(LoadVal.getValue(1));
7267 return LoadVal;
7268 }
7269
7270 /// Record the value for an instruction that produces an integer result,
7271 /// converting the type where necessary.
processIntegerCallValue(const Instruction & I,SDValue Value,bool IsSigned)7272 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
7273 SDValue Value,
7274 bool IsSigned) {
7275 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7276 I.getType(), true);
7277 if (IsSigned)
7278 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
7279 else
7280 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
7281 setValue(&I, Value);
7282 }
7283
7284 /// See if we can lower a memcmp call into an optimized form. If so, return
7285 /// true and lower it. Otherwise return false, and it will be lowered like a
7286 /// normal call.
7287 /// The caller already checked that \p I calls the appropriate LibFunc with a
7288 /// correct prototype.
visitMemCmpCall(const CallInst & I)7289 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
7290 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
7291 const Value *Size = I.getArgOperand(2);
7292 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
7293 if (CSize && CSize->getZExtValue() == 0) {
7294 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7295 I.getType(), true);
7296 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
7297 return true;
7298 }
7299
7300 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7301 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
7302 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
7303 getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
7304 if (Res.first.getNode()) {
7305 processIntegerCallValue(I, Res.first, true);
7306 PendingLoads.push_back(Res.second);
7307 return true;
7308 }
7309
7310 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
7311 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
7312 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
7313 return false;
7314
7315 // If the target has a fast compare for the given size, it will return a
7316 // preferred load type for that size. Require that the load VT is legal and
7317 // that the target supports unaligned loads of that type. Otherwise, return
7318 // INVALID.
7319 auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
7320 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7321 MVT LVT = TLI.hasFastEqualityCompare(NumBits);
7322 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
7323 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
7324 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
7325 // TODO: Check alignment of src and dest ptrs.
7326 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
7327 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
7328 if (!TLI.isTypeLegal(LVT) ||
7329 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
7330 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
7331 LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
7332 }
7333
7334 return LVT;
7335 };
7336
7337 // This turns into unaligned loads. We only do this if the target natively
7338 // supports the MVT we'll be loading or if it is small enough (<= 4) that
7339 // we'll only produce a small number of byte loads.
7340 MVT LoadVT;
7341 unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
7342 switch (NumBitsToCompare) {
7343 default:
7344 return false;
7345 case 16:
7346 LoadVT = MVT::i16;
7347 break;
7348 case 32:
7349 LoadVT = MVT::i32;
7350 break;
7351 case 64:
7352 case 128:
7353 case 256:
7354 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
7355 break;
7356 }
7357
7358 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
7359 return false;
7360
7361 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
7362 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
7363
7364 // Bitcast to a wide integer type if the loads are vectors.
7365 if (LoadVT.isVector()) {
7366 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
7367 LoadL = DAG.getBitcast(CmpVT, LoadL);
7368 LoadR = DAG.getBitcast(CmpVT, LoadR);
7369 }
7370
7371 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
7372 processIntegerCallValue(I, Cmp, false);
7373 return true;
7374 }
7375
7376 /// See if we can lower a memchr call into an optimized form. If so, return
7377 /// true and lower it. Otherwise return false, and it will be lowered like a
7378 /// normal call.
7379 /// The caller already checked that \p I calls the appropriate LibFunc with a
7380 /// correct prototype.
visitMemChrCall(const CallInst & I)7381 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
7382 const Value *Src = I.getArgOperand(0);
7383 const Value *Char = I.getArgOperand(1);
7384 const Value *Length = I.getArgOperand(2);
7385
7386 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7387 std::pair<SDValue, SDValue> Res =
7388 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
7389 getValue(Src), getValue(Char), getValue(Length),
7390 MachinePointerInfo(Src));
7391 if (Res.first.getNode()) {
7392 setValue(&I, Res.first);
7393 PendingLoads.push_back(Res.second);
7394 return true;
7395 }
7396
7397 return false;
7398 }
7399
7400 /// See if we can lower a mempcpy call into an optimized form. If so, return
7401 /// true and lower it. Otherwise return false, and it will be lowered like a
7402 /// normal call.
7403 /// The caller already checked that \p I calls the appropriate LibFunc with a
7404 /// correct prototype.
visitMemPCpyCall(const CallInst & I)7405 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
7406 SDValue Dst = getValue(I.getArgOperand(0));
7407 SDValue Src = getValue(I.getArgOperand(1));
7408 SDValue Size = getValue(I.getArgOperand(2));
7409
7410 Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
7411 Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
7412 // DAG::getMemcpy needs Alignment to be defined.
7413 Align Alignment = std::min(DstAlign, SrcAlign);
7414
7415 bool isVol = false;
7416 SDLoc sdl = getCurSDLoc();
7417
7418 // In the mempcpy context we need to pass in a false value for isTailCall
7419 // because the return pointer needs to be adjusted by the size of
7420 // the copied memory.
7421 SDValue Root = isVol ? getRoot() : getMemoryRoot();
7422 SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, isVol, false,
7423 /*isTailCall=*/false,
7424 MachinePointerInfo(I.getArgOperand(0)),
7425 MachinePointerInfo(I.getArgOperand(1)));
7426 assert(MC.getNode() != nullptr &&
7427 "** memcpy should not be lowered as TailCall in mempcpy context **");
7428 DAG.setRoot(MC);
7429
7430 // Check if Size needs to be truncated or extended.
7431 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
7432
7433 // Adjust return pointer to point just past the last dst byte.
7434 SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
7435 Dst, Size);
7436 setValue(&I, DstPlusSize);
7437 return true;
7438 }
7439
7440 /// See if we can lower a strcpy call into an optimized form. If so, return
7441 /// true and lower it, otherwise return false and it will be lowered like a
7442 /// normal call.
7443 /// The caller already checked that \p I calls the appropriate LibFunc with a
7444 /// correct prototype.
visitStrCpyCall(const CallInst & I,bool isStpcpy)7445 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
7446 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7447
7448 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7449 std::pair<SDValue, SDValue> Res =
7450 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
7451 getValue(Arg0), getValue(Arg1),
7452 MachinePointerInfo(Arg0),
7453 MachinePointerInfo(Arg1), isStpcpy);
7454 if (Res.first.getNode()) {
7455 setValue(&I, Res.first);
7456 DAG.setRoot(Res.second);
7457 return true;
7458 }
7459
7460 return false;
7461 }
7462
7463 /// See if we can lower a strcmp call into an optimized form. If so, return
7464 /// true and lower it, otherwise return false and it will be lowered like a
7465 /// normal call.
7466 /// The caller already checked that \p I calls the appropriate LibFunc with a
7467 /// correct prototype.
visitStrCmpCall(const CallInst & I)7468 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
7469 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7470
7471 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7472 std::pair<SDValue, SDValue> Res =
7473 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
7474 getValue(Arg0), getValue(Arg1),
7475 MachinePointerInfo(Arg0),
7476 MachinePointerInfo(Arg1));
7477 if (Res.first.getNode()) {
7478 processIntegerCallValue(I, Res.first, true);
7479 PendingLoads.push_back(Res.second);
7480 return true;
7481 }
7482
7483 return false;
7484 }
7485
7486 /// See if we can lower a strlen call into an optimized form. If so, return
7487 /// true and lower it, otherwise return false and it will be lowered like a
7488 /// normal call.
7489 /// The caller already checked that \p I calls the appropriate LibFunc with a
7490 /// correct prototype.
visitStrLenCall(const CallInst & I)7491 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
7492 const Value *Arg0 = I.getArgOperand(0);
7493
7494 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7495 std::pair<SDValue, SDValue> Res =
7496 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
7497 getValue(Arg0), MachinePointerInfo(Arg0));
7498 if (Res.first.getNode()) {
7499 processIntegerCallValue(I, Res.first, false);
7500 PendingLoads.push_back(Res.second);
7501 return true;
7502 }
7503
7504 return false;
7505 }
7506
7507 /// See if we can lower a strnlen call into an optimized form. If so, return
7508 /// true and lower it, otherwise return false and it will be lowered like a
7509 /// normal call.
7510 /// The caller already checked that \p I calls the appropriate LibFunc with a
7511 /// correct prototype.
visitStrNLenCall(const CallInst & I)7512 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
7513 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7514
7515 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7516 std::pair<SDValue, SDValue> Res =
7517 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
7518 getValue(Arg0), getValue(Arg1),
7519 MachinePointerInfo(Arg0));
7520 if (Res.first.getNode()) {
7521 processIntegerCallValue(I, Res.first, false);
7522 PendingLoads.push_back(Res.second);
7523 return true;
7524 }
7525
7526 return false;
7527 }
7528
7529 /// See if we can lower a unary floating-point operation into an SDNode with
7530 /// the specified Opcode. If so, return true and lower it, otherwise return
7531 /// false and it will be lowered like a normal call.
7532 /// The caller already checked that \p I calls the appropriate LibFunc with a
7533 /// correct prototype.
visitUnaryFloatCall(const CallInst & I,unsigned Opcode)7534 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
7535 unsigned Opcode) {
7536 // We already checked this call's prototype; verify it doesn't modify errno.
7537 if (!I.onlyReadsMemory())
7538 return false;
7539
7540 SDValue Tmp = getValue(I.getArgOperand(0));
7541 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
7542 return true;
7543 }
7544
7545 /// See if we can lower a binary floating-point operation into an SDNode with
7546 /// the specified Opcode. If so, return true and lower it. Otherwise return
7547 /// false, and it will be lowered like a normal call.
7548 /// The caller already checked that \p I calls the appropriate LibFunc with a
7549 /// correct prototype.
visitBinaryFloatCall(const CallInst & I,unsigned Opcode)7550 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
7551 unsigned Opcode) {
7552 // We already checked this call's prototype; verify it doesn't modify errno.
7553 if (!I.onlyReadsMemory())
7554 return false;
7555
7556 SDValue Tmp0 = getValue(I.getArgOperand(0));
7557 SDValue Tmp1 = getValue(I.getArgOperand(1));
7558 EVT VT = Tmp0.getValueType();
7559 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
7560 return true;
7561 }
7562
visitCall(const CallInst & I)7563 void SelectionDAGBuilder::visitCall(const CallInst &I) {
7564 // Handle inline assembly differently.
7565 if (I.isInlineAsm()) {
7566 visitInlineAsm(I);
7567 return;
7568 }
7569
7570 if (Function *F = I.getCalledFunction()) {
7571 if (F->isDeclaration()) {
7572 // Is this an LLVM intrinsic or a target-specific intrinsic?
7573 unsigned IID = F->getIntrinsicID();
7574 if (!IID)
7575 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
7576 IID = II->getIntrinsicID(F);
7577
7578 if (IID) {
7579 visitIntrinsicCall(I, IID);
7580 return;
7581 }
7582 }
7583
7584 // Check for well-known libc/libm calls. If the function is internal, it
7585 // can't be a library call. Don't do the check if marked as nobuiltin for
7586 // some reason or the call site requires strict floating point semantics.
7587 LibFunc Func;
7588 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
7589 F->hasName() && LibInfo->getLibFunc(*F, Func) &&
7590 LibInfo->hasOptimizedCodeGen(Func)) {
7591 switch (Func) {
7592 default: break;
7593 case LibFunc_copysign:
7594 case LibFunc_copysignf:
7595 case LibFunc_copysignl:
7596 // We already checked this call's prototype; verify it doesn't modify
7597 // errno.
7598 if (I.onlyReadsMemory()) {
7599 SDValue LHS = getValue(I.getArgOperand(0));
7600 SDValue RHS = getValue(I.getArgOperand(1));
7601 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
7602 LHS.getValueType(), LHS, RHS));
7603 return;
7604 }
7605 break;
7606 case LibFunc_fabs:
7607 case LibFunc_fabsf:
7608 case LibFunc_fabsl:
7609 if (visitUnaryFloatCall(I, ISD::FABS))
7610 return;
7611 break;
7612 case LibFunc_fmin:
7613 case LibFunc_fminf:
7614 case LibFunc_fminl:
7615 if (visitBinaryFloatCall(I, ISD::FMINNUM))
7616 return;
7617 break;
7618 case LibFunc_fmax:
7619 case LibFunc_fmaxf:
7620 case LibFunc_fmaxl:
7621 if (visitBinaryFloatCall(I, ISD::FMAXNUM))
7622 return;
7623 break;
7624 case LibFunc_sin:
7625 case LibFunc_sinf:
7626 case LibFunc_sinl:
7627 if (visitUnaryFloatCall(I, ISD::FSIN))
7628 return;
7629 break;
7630 case LibFunc_cos:
7631 case LibFunc_cosf:
7632 case LibFunc_cosl:
7633 if (visitUnaryFloatCall(I, ISD::FCOS))
7634 return;
7635 break;
7636 case LibFunc_sqrt:
7637 case LibFunc_sqrtf:
7638 case LibFunc_sqrtl:
7639 case LibFunc_sqrt_finite:
7640 case LibFunc_sqrtf_finite:
7641 case LibFunc_sqrtl_finite:
7642 if (visitUnaryFloatCall(I, ISD::FSQRT))
7643 return;
7644 break;
7645 case LibFunc_floor:
7646 case LibFunc_floorf:
7647 case LibFunc_floorl:
7648 if (visitUnaryFloatCall(I, ISD::FFLOOR))
7649 return;
7650 break;
7651 case LibFunc_nearbyint:
7652 case LibFunc_nearbyintf:
7653 case LibFunc_nearbyintl:
7654 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
7655 return;
7656 break;
7657 case LibFunc_ceil:
7658 case LibFunc_ceilf:
7659 case LibFunc_ceill:
7660 if (visitUnaryFloatCall(I, ISD::FCEIL))
7661 return;
7662 break;
7663 case LibFunc_rint:
7664 case LibFunc_rintf:
7665 case LibFunc_rintl:
7666 if (visitUnaryFloatCall(I, ISD::FRINT))
7667 return;
7668 break;
7669 case LibFunc_round:
7670 case LibFunc_roundf:
7671 case LibFunc_roundl:
7672 if (visitUnaryFloatCall(I, ISD::FROUND))
7673 return;
7674 break;
7675 case LibFunc_trunc:
7676 case LibFunc_truncf:
7677 case LibFunc_truncl:
7678 if (visitUnaryFloatCall(I, ISD::FTRUNC))
7679 return;
7680 break;
7681 case LibFunc_log2:
7682 case LibFunc_log2f:
7683 case LibFunc_log2l:
7684 if (visitUnaryFloatCall(I, ISD::FLOG2))
7685 return;
7686 break;
7687 case LibFunc_exp2:
7688 case LibFunc_exp2f:
7689 case LibFunc_exp2l:
7690 if (visitUnaryFloatCall(I, ISD::FEXP2))
7691 return;
7692 break;
7693 case LibFunc_memcmp:
7694 if (visitMemCmpCall(I))
7695 return;
7696 break;
7697 case LibFunc_mempcpy:
7698 if (visitMemPCpyCall(I))
7699 return;
7700 break;
7701 case LibFunc_memchr:
7702 if (visitMemChrCall(I))
7703 return;
7704 break;
7705 case LibFunc_strcpy:
7706 if (visitStrCpyCall(I, false))
7707 return;
7708 break;
7709 case LibFunc_stpcpy:
7710 if (visitStrCpyCall(I, true))
7711 return;
7712 break;
7713 case LibFunc_strcmp:
7714 if (visitStrCmpCall(I))
7715 return;
7716 break;
7717 case LibFunc_strlen:
7718 if (visitStrLenCall(I))
7719 return;
7720 break;
7721 case LibFunc_strnlen:
7722 if (visitStrNLenCall(I))
7723 return;
7724 break;
7725 }
7726 }
7727 }
7728
7729 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7730 // have to do anything here to lower funclet bundles.
7731 // CFGuardTarget bundles are lowered in LowerCallTo.
7732 assert(!I.hasOperandBundlesOtherThan(
7733 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
7734 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated}) &&
7735 "Cannot lower calls with arbitrary operand bundles!");
7736
7737 SDValue Callee = getValue(I.getCalledOperand());
7738
7739 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
7740 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
7741 else
7742 // Check if we can potentially perform a tail call. More detailed checking
7743 // is be done within LowerCallTo, after more information about the call is
7744 // known.
7745 LowerCallTo(I, Callee, I.isTailCall());
7746 }
7747
7748 namespace {
7749
7750 /// AsmOperandInfo - This contains information for each constraint that we are
7751 /// lowering.
7752 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
7753 public:
7754 /// CallOperand - If this is the result output operand or a clobber
7755 /// this is null, otherwise it is the incoming operand to the CallInst.
7756 /// This gets modified as the asm is processed.
7757 SDValue CallOperand;
7758
7759 /// AssignedRegs - If this is a register or register class operand, this
7760 /// contains the set of register corresponding to the operand.
7761 RegsForValue AssignedRegs;
7762
SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo & info)7763 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
7764 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
7765 }
7766
7767 /// Whether or not this operand accesses memory
hasMemory(const TargetLowering & TLI) const7768 bool hasMemory(const TargetLowering &TLI) const {
7769 // Indirect operand accesses access memory.
7770 if (isIndirect)
7771 return true;
7772
7773 for (const auto &Code : Codes)
7774 if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
7775 return true;
7776
7777 return false;
7778 }
7779
7780 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7781 /// corresponds to. If there is no Value* for this operand, it returns
7782 /// MVT::Other.
getCallOperandValEVT(LLVMContext & Context,const TargetLowering & TLI,const DataLayout & DL) const7783 EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
7784 const DataLayout &DL) const {
7785 if (!CallOperandVal) return MVT::Other;
7786
7787 if (isa<BasicBlock>(CallOperandVal))
7788 return TLI.getProgramPointerTy(DL);
7789
7790 llvm::Type *OpTy = CallOperandVal->getType();
7791
7792 // FIXME: code duplicated from TargetLowering::ParseConstraints().
7793 // If this is an indirect operand, the operand is a pointer to the
7794 // accessed type.
7795 if (isIndirect) {
7796 PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
7797 if (!PtrTy)
7798 report_fatal_error("Indirect operand for inline asm not a pointer!");
7799 OpTy = PtrTy->getElementType();
7800 }
7801
7802 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7803 if (StructType *STy = dyn_cast<StructType>(OpTy))
7804 if (STy->getNumElements() == 1)
7805 OpTy = STy->getElementType(0);
7806
7807 // If OpTy is not a single value, it may be a struct/union that we
7808 // can tile with integers.
7809 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
7810 unsigned BitSize = DL.getTypeSizeInBits(OpTy);
7811 switch (BitSize) {
7812 default: break;
7813 case 1:
7814 case 8:
7815 case 16:
7816 case 32:
7817 case 64:
7818 case 128:
7819 OpTy = IntegerType::get(Context, BitSize);
7820 break;
7821 }
7822 }
7823
7824 return TLI.getValueType(DL, OpTy, true);
7825 }
7826 };
7827
7828
7829 } // end anonymous namespace
7830
7831 /// Make sure that the output operand \p OpInfo and its corresponding input
7832 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7833 /// out).
patchMatchingInput(const SDISelAsmOperandInfo & OpInfo,SDISelAsmOperandInfo & MatchingOpInfo,SelectionDAG & DAG)7834 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
7835 SDISelAsmOperandInfo &MatchingOpInfo,
7836 SelectionDAG &DAG) {
7837 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7838 return;
7839
7840 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
7841 const auto &TLI = DAG.getTargetLoweringInfo();
7842
7843 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7844 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
7845 OpInfo.ConstraintVT);
7846 std::pair<unsigned, const TargetRegisterClass *> InputRC =
7847 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7848 MatchingOpInfo.ConstraintVT);
7849 if ((OpInfo.ConstraintVT.isInteger() !=
7850 MatchingOpInfo.ConstraintVT.isInteger()) ||
7851 (MatchRC.second != InputRC.second)) {
7852 // FIXME: error out in a more elegant fashion
7853 report_fatal_error("Unsupported asm: input constraint"
7854 " with a matching output constraint of"
7855 " incompatible type!");
7856 }
7857 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7858 }
7859
7860 /// Get a direct memory input to behave well as an indirect operand.
7861 /// This may introduce stores, hence the need for a \p Chain.
7862 /// \return The (possibly updated) chain.
getAddressForMemoryInput(SDValue Chain,const SDLoc & Location,SDISelAsmOperandInfo & OpInfo,SelectionDAG & DAG)7863 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
7864 SDISelAsmOperandInfo &OpInfo,
7865 SelectionDAG &DAG) {
7866 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7867
7868 // If we don't have an indirect input, put it in the constpool if we can,
7869 // otherwise spill it to a stack slot.
7870 // TODO: This isn't quite right. We need to handle these according to
7871 // the addressing mode that the constraint wants. Also, this may take
7872 // an additional register for the computation and we don't want that
7873 // either.
7874
7875 // If the operand is a float, integer, or vector constant, spill to a
7876 // constant pool entry to get its address.
7877 const Value *OpVal = OpInfo.CallOperandVal;
7878 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7879 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
7880 OpInfo.CallOperand = DAG.getConstantPool(
7881 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
7882 return Chain;
7883 }
7884
7885 // Otherwise, create a stack slot and emit a store to it before the asm.
7886 Type *Ty = OpVal->getType();
7887 auto &DL = DAG.getDataLayout();
7888 uint64_t TySize = DL.getTypeAllocSize(Ty);
7889 MachineFunction &MF = DAG.getMachineFunction();
7890 int SSFI = MF.getFrameInfo().CreateStackObject(
7891 TySize, DL.getPrefTypeAlign(Ty), false);
7892 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
7893 Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
7894 MachinePointerInfo::getFixedStack(MF, SSFI),
7895 TLI.getMemValueType(DL, Ty));
7896 OpInfo.CallOperand = StackSlot;
7897
7898 return Chain;
7899 }
7900
7901 /// GetRegistersForValue - Assign registers (virtual or physical) for the
7902 /// specified operand. We prefer to assign virtual registers, to allow the
7903 /// register allocator to handle the assignment process. However, if the asm
7904 /// uses features that we can't model on machineinstrs, we have SDISel do the
7905 /// allocation. This produces generally horrible, but correct, code.
7906 ///
7907 /// OpInfo describes the operand
7908 /// RefOpInfo describes the matching operand if any, the operand otherwise
GetRegistersForValue(SelectionDAG & DAG,const SDLoc & DL,SDISelAsmOperandInfo & OpInfo,SDISelAsmOperandInfo & RefOpInfo)7909 static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
7910 SDISelAsmOperandInfo &OpInfo,
7911 SDISelAsmOperandInfo &RefOpInfo) {
7912 LLVMContext &Context = *DAG.getContext();
7913 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7914
7915 MachineFunction &MF = DAG.getMachineFunction();
7916 SmallVector<unsigned, 4> Regs;
7917 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7918
7919 // No work to do for memory operations.
7920 if (OpInfo.ConstraintType == TargetLowering::C_Memory)
7921 return;
7922
7923 // If this is a constraint for a single physreg, or a constraint for a
7924 // register class, find it.
7925 unsigned AssignedReg;
7926 const TargetRegisterClass *RC;
7927 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
7928 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
7929 // RC is unset only on failure. Return immediately.
7930 if (!RC)
7931 return;
7932
7933 // Get the actual register value type. This is important, because the user
7934 // may have asked for (e.g.) the AX register in i32 type. We need to
7935 // remember that AX is actually i16 to get the right extension.
7936 const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
7937
7938 if (OpInfo.ConstraintVT != MVT::Other) {
7939 // If this is an FP operand in an integer register (or visa versa), or more
7940 // generally if the operand value disagrees with the register class we plan
7941 // to stick it in, fix the operand type.
7942 //
7943 // If this is an input value, the bitcast to the new type is done now.
7944 // Bitcast for output value is done at the end of visitInlineAsm().
7945 if ((OpInfo.Type == InlineAsm::isOutput ||
7946 OpInfo.Type == InlineAsm::isInput) &&
7947 !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
7948 // Try to convert to the first EVT that the reg class contains. If the
7949 // types are identical size, use a bitcast to convert (e.g. two differing
7950 // vector types). Note: output bitcast is done at the end of
7951 // visitInlineAsm().
7952 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
7953 // Exclude indirect inputs while they are unsupported because the code
7954 // to perform the load is missing and thus OpInfo.CallOperand still
7955 // refers to the input address rather than the pointed-to value.
7956 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
7957 OpInfo.CallOperand =
7958 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7959 OpInfo.ConstraintVT = RegVT;
7960 // If the operand is an FP value and we want it in integer registers,
7961 // use the corresponding integer type. This turns an f64 value into
7962 // i64, which can be passed with two i32 values on a 32-bit machine.
7963 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
7964 MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
7965 if (OpInfo.Type == InlineAsm::isInput)
7966 OpInfo.CallOperand =
7967 DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
7968 OpInfo.ConstraintVT = VT;
7969 }
7970 }
7971 }
7972
7973 // No need to allocate a matching input constraint since the constraint it's
7974 // matching to has already been allocated.
7975 if (OpInfo.isMatchingInputConstraint())
7976 return;
7977
7978 EVT ValueVT = OpInfo.ConstraintVT;
7979 if (OpInfo.ConstraintVT == MVT::Other)
7980 ValueVT = RegVT;
7981
7982 // Initialize NumRegs.
7983 unsigned NumRegs = 1;
7984 if (OpInfo.ConstraintVT != MVT::Other)
7985 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
7986
7987 // If this is a constraint for a specific physical register, like {r17},
7988 // assign it now.
7989
7990 // If this associated to a specific register, initialize iterator to correct
7991 // place. If virtual, make sure we have enough registers
7992
7993 // Initialize iterator if necessary
7994 TargetRegisterClass::iterator I = RC->begin();
7995 MachineRegisterInfo &RegInfo = MF.getRegInfo();
7996
7997 // Do not check for single registers.
7998 if (AssignedReg) {
7999 for (; *I != AssignedReg; ++I)
8000 assert(I != RC->end() && "AssignedReg should be member of RC");
8001 }
8002
8003 for (; NumRegs; --NumRegs, ++I) {
8004 assert(I != RC->end() && "Ran out of registers to allocate!");
8005 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
8006 Regs.push_back(R);
8007 }
8008
8009 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
8010 }
8011
8012 static unsigned
findMatchingInlineAsmOperand(unsigned OperandNo,const std::vector<SDValue> & AsmNodeOperands)8013 findMatchingInlineAsmOperand(unsigned OperandNo,
8014 const std::vector<SDValue> &AsmNodeOperands) {
8015 // Scan until we find the definition we already emitted of this operand.
8016 unsigned CurOp = InlineAsm::Op_FirstOperand;
8017 for (; OperandNo; --OperandNo) {
8018 // Advance to the next operand.
8019 unsigned OpFlag =
8020 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8021 assert((InlineAsm::isRegDefKind(OpFlag) ||
8022 InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
8023 InlineAsm::isMemKind(OpFlag)) &&
8024 "Skipped past definitions?");
8025 CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
8026 }
8027 return CurOp;
8028 }
8029
8030 namespace {
8031
8032 class ExtraFlags {
8033 unsigned Flags = 0;
8034
8035 public:
ExtraFlags(const CallBase & Call)8036 explicit ExtraFlags(const CallBase &Call) {
8037 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8038 if (IA->hasSideEffects())
8039 Flags |= InlineAsm::Extra_HasSideEffects;
8040 if (IA->isAlignStack())
8041 Flags |= InlineAsm::Extra_IsAlignStack;
8042 if (Call.isConvergent())
8043 Flags |= InlineAsm::Extra_IsConvergent;
8044 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
8045 }
8046
update(const TargetLowering::AsmOperandInfo & OpInfo)8047 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
8048 // Ideally, we would only check against memory constraints. However, the
8049 // meaning of an Other constraint can be target-specific and we can't easily
8050 // reason about it. Therefore, be conservative and set MayLoad/MayStore
8051 // for Other constraints as well.
8052 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
8053 OpInfo.ConstraintType == TargetLowering::C_Other) {
8054 if (OpInfo.Type == InlineAsm::isInput)
8055 Flags |= InlineAsm::Extra_MayLoad;
8056 else if (OpInfo.Type == InlineAsm::isOutput)
8057 Flags |= InlineAsm::Extra_MayStore;
8058 else if (OpInfo.Type == InlineAsm::isClobber)
8059 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
8060 }
8061 }
8062
get() const8063 unsigned get() const { return Flags; }
8064 };
8065
8066 } // end anonymous namespace
8067
8068 /// visitInlineAsm - Handle a call to an InlineAsm object.
visitInlineAsm(const CallBase & Call)8069 void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) {
8070 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
8071
8072 /// ConstraintOperands - Information about all of the constraints.
8073 SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
8074
8075 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8076 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
8077 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
8078
8079 // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
8080 // AsmDialect, MayLoad, MayStore).
8081 bool HasSideEffect = IA->hasSideEffects();
8082 ExtraFlags ExtraInfo(Call);
8083
8084 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
8085 unsigned ResNo = 0; // ResNo - The result number of the next output.
8086 unsigned NumMatchingOps = 0;
8087 for (auto &T : TargetConstraints) {
8088 ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
8089 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
8090
8091 // Compute the value type for each operand.
8092 if (OpInfo.Type == InlineAsm::isInput ||
8093 (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
8094 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++);
8095
8096 // Process the call argument. BasicBlocks are labels, currently appearing
8097 // only in asm's.
8098 if (isa<CallBrInst>(Call) &&
8099 ArgNo - 1 >= (cast<CallBrInst>(&Call)->getNumArgOperands() -
8100 cast<CallBrInst>(&Call)->getNumIndirectDests() -
8101 NumMatchingOps) &&
8102 (NumMatchingOps == 0 ||
8103 ArgNo - 1 < (cast<CallBrInst>(&Call)->getNumArgOperands() -
8104 NumMatchingOps))) {
8105 const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal);
8106 EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true);
8107 OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT);
8108 } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
8109 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
8110 } else {
8111 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
8112 }
8113
8114 OpInfo.ConstraintVT =
8115 OpInfo
8116 .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
8117 .getSimpleVT();
8118 } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
8119 // The return value of the call is this value. As such, there is no
8120 // corresponding argument.
8121 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
8122 if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
8123 OpInfo.ConstraintVT = TLI.getSimpleValueType(
8124 DAG.getDataLayout(), STy->getElementType(ResNo));
8125 } else {
8126 assert(ResNo == 0 && "Asm only has one result!");
8127 OpInfo.ConstraintVT =
8128 TLI.getSimpleValueType(DAG.getDataLayout(), Call.getType());
8129 }
8130 ++ResNo;
8131 } else {
8132 OpInfo.ConstraintVT = MVT::Other;
8133 }
8134
8135 if (OpInfo.hasMatchingInput())
8136 ++NumMatchingOps;
8137
8138 if (!HasSideEffect)
8139 HasSideEffect = OpInfo.hasMemory(TLI);
8140
8141 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
8142 // FIXME: Could we compute this on OpInfo rather than T?
8143
8144 // Compute the constraint code and ConstraintType to use.
8145 TLI.ComputeConstraintToUse(T, SDValue());
8146
8147 if (T.ConstraintType == TargetLowering::C_Immediate &&
8148 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
8149 // We've delayed emitting a diagnostic like the "n" constraint because
8150 // inlining could cause an integer showing up.
8151 return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
8152 "' expects an integer constant "
8153 "expression");
8154
8155 ExtraInfo.update(T);
8156 }
8157
8158
8159 // We won't need to flush pending loads if this asm doesn't touch
8160 // memory and is nonvolatile.
8161 SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
8162
8163 bool IsCallBr = isa<CallBrInst>(Call);
8164 if (IsCallBr) {
8165 // If this is a callbr we need to flush pending exports since inlineasm_br
8166 // is a terminator. We need to do this before nodes are glued to
8167 // the inlineasm_br node.
8168 Chain = getControlRoot();
8169 }
8170
8171 // Second pass over the constraints: compute which constraint option to use.
8172 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8173 // If this is an output operand with a matching input operand, look up the
8174 // matching input. If their types mismatch, e.g. one is an integer, the
8175 // other is floating point, or their sizes are different, flag it as an
8176 // error.
8177 if (OpInfo.hasMatchingInput()) {
8178 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
8179 patchMatchingInput(OpInfo, Input, DAG);
8180 }
8181
8182 // Compute the constraint code and ConstraintType to use.
8183 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
8184
8185 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8186 OpInfo.Type == InlineAsm::isClobber)
8187 continue;
8188
8189 // If this is a memory input, and if the operand is not indirect, do what we
8190 // need to provide an address for the memory input.
8191 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8192 !OpInfo.isIndirect) {
8193 assert((OpInfo.isMultipleAlternative ||
8194 (OpInfo.Type == InlineAsm::isInput)) &&
8195 "Can only indirectify direct input operands!");
8196
8197 // Memory operands really want the address of the value.
8198 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
8199
8200 // There is no longer a Value* corresponding to this operand.
8201 OpInfo.CallOperandVal = nullptr;
8202
8203 // It is now an indirect operand.
8204 OpInfo.isIndirect = true;
8205 }
8206
8207 }
8208
8209 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
8210 std::vector<SDValue> AsmNodeOperands;
8211 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
8212 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
8213 IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
8214
8215 // If we have a !srcloc metadata node associated with it, we want to attach
8216 // this to the ultimately generated inline asm machineinstr. To do this, we
8217 // pass in the third operand as this (potentially null) inline asm MDNode.
8218 const MDNode *SrcLoc = Call.getMetadata("srcloc");
8219 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
8220
8221 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
8222 // bits as operand 3.
8223 AsmNodeOperands.push_back(DAG.getTargetConstant(
8224 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8225
8226 // Third pass: Loop over operands to prepare DAG-level operands.. As part of
8227 // this, assign virtual and physical registers for inputs and otput.
8228 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8229 // Assign Registers.
8230 SDISelAsmOperandInfo &RefOpInfo =
8231 OpInfo.isMatchingInputConstraint()
8232 ? ConstraintOperands[OpInfo.getMatchedOperand()]
8233 : OpInfo;
8234 GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
8235
8236 auto DetectWriteToReservedRegister = [&]() {
8237 const MachineFunction &MF = DAG.getMachineFunction();
8238 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
8239 for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
8240 if (Register::isPhysicalRegister(Reg) &&
8241 TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
8242 const char *RegName = TRI.getName(Reg);
8243 emitInlineAsmError(Call, "write to reserved register '" +
8244 Twine(RegName) + "'");
8245 return true;
8246 }
8247 }
8248 return false;
8249 };
8250
8251 switch (OpInfo.Type) {
8252 case InlineAsm::isOutput:
8253 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8254 unsigned ConstraintID =
8255 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8256 assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8257 "Failed to convert memory constraint code to constraint id.");
8258
8259 // Add information to the INLINEASM node to know about this output.
8260 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8261 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
8262 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
8263 MVT::i32));
8264 AsmNodeOperands.push_back(OpInfo.CallOperand);
8265 } else {
8266 // Otherwise, this outputs to a register (directly for C_Register /
8267 // C_RegisterClass, and a target-defined fashion for
8268 // C_Immediate/C_Other). Find a register that we can use.
8269 if (OpInfo.AssignedRegs.Regs.empty()) {
8270 emitInlineAsmError(
8271 Call, "couldn't allocate output register for constraint '" +
8272 Twine(OpInfo.ConstraintCode) + "'");
8273 return;
8274 }
8275
8276 if (DetectWriteToReservedRegister())
8277 return;
8278
8279 // Add information to the INLINEASM node to know that this register is
8280 // set.
8281 OpInfo.AssignedRegs.AddInlineAsmOperands(
8282 OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
8283 : InlineAsm::Kind_RegDef,
8284 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
8285 }
8286 break;
8287
8288 case InlineAsm::isInput: {
8289 SDValue InOperandVal = OpInfo.CallOperand;
8290
8291 if (OpInfo.isMatchingInputConstraint()) {
8292 // If this is required to match an output register we have already set,
8293 // just use its register.
8294 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
8295 AsmNodeOperands);
8296 unsigned OpFlag =
8297 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8298 if (InlineAsm::isRegDefKind(OpFlag) ||
8299 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
8300 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
8301 if (OpInfo.isIndirect) {
8302 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
8303 emitInlineAsmError(Call, "inline asm not supported yet: "
8304 "don't know how to handle tied "
8305 "indirect register inputs");
8306 return;
8307 }
8308
8309 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
8310 SmallVector<unsigned, 4> Regs;
8311
8312 if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) {
8313 unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
8314 MachineRegisterInfo &RegInfo =
8315 DAG.getMachineFunction().getRegInfo();
8316 for (unsigned i = 0; i != NumRegs; ++i)
8317 Regs.push_back(RegInfo.createVirtualRegister(RC));
8318 } else {
8319 emitInlineAsmError(Call,
8320 "inline asm error: This value type register "
8321 "class is not natively supported!");
8322 return;
8323 }
8324
8325 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
8326
8327 SDLoc dl = getCurSDLoc();
8328 // Use the produced MatchedRegs object to
8329 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, &Call);
8330 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
8331 true, OpInfo.getMatchedOperand(), dl,
8332 DAG, AsmNodeOperands);
8333 break;
8334 }
8335
8336 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
8337 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
8338 "Unexpected number of operands");
8339 // Add information to the INLINEASM node to know about this input.
8340 // See InlineAsm.h isUseOperandTiedToDef.
8341 OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
8342 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
8343 OpInfo.getMatchedOperand());
8344 AsmNodeOperands.push_back(DAG.getTargetConstant(
8345 OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8346 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
8347 break;
8348 }
8349
8350 // Treat indirect 'X' constraint as memory.
8351 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
8352 OpInfo.isIndirect)
8353 OpInfo.ConstraintType = TargetLowering::C_Memory;
8354
8355 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
8356 OpInfo.ConstraintType == TargetLowering::C_Other) {
8357 std::vector<SDValue> Ops;
8358 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
8359 Ops, DAG);
8360 if (Ops.empty()) {
8361 if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
8362 if (isa<ConstantSDNode>(InOperandVal)) {
8363 emitInlineAsmError(Call, "value out of range for constraint '" +
8364 Twine(OpInfo.ConstraintCode) + "'");
8365 return;
8366 }
8367
8368 emitInlineAsmError(Call,
8369 "invalid operand for inline asm constraint '" +
8370 Twine(OpInfo.ConstraintCode) + "'");
8371 return;
8372 }
8373
8374 // Add information to the INLINEASM node to know about this input.
8375 unsigned ResOpType =
8376 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
8377 AsmNodeOperands.push_back(DAG.getTargetConstant(
8378 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8379 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
8380 break;
8381 }
8382
8383 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8384 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
8385 assert(InOperandVal.getValueType() ==
8386 TLI.getPointerTy(DAG.getDataLayout()) &&
8387 "Memory operands expect pointer values");
8388
8389 unsigned ConstraintID =
8390 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8391 assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8392 "Failed to convert memory constraint code to constraint id.");
8393
8394 // Add information to the INLINEASM node to know about this input.
8395 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8396 ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
8397 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
8398 getCurSDLoc(),
8399 MVT::i32));
8400 AsmNodeOperands.push_back(InOperandVal);
8401 break;
8402 }
8403
8404 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
8405 OpInfo.ConstraintType == TargetLowering::C_Register) &&
8406 "Unknown constraint type!");
8407
8408 // TODO: Support this.
8409 if (OpInfo.isIndirect) {
8410 emitInlineAsmError(
8411 Call, "Don't know how to handle indirect register inputs yet "
8412 "for constraint '" +
8413 Twine(OpInfo.ConstraintCode) + "'");
8414 return;
8415 }
8416
8417 // Copy the input into the appropriate registers.
8418 if (OpInfo.AssignedRegs.Regs.empty()) {
8419 emitInlineAsmError(Call,
8420 "couldn't allocate input reg for constraint '" +
8421 Twine(OpInfo.ConstraintCode) + "'");
8422 return;
8423 }
8424
8425 if (DetectWriteToReservedRegister())
8426 return;
8427
8428 SDLoc dl = getCurSDLoc();
8429
8430 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
8431 &Call);
8432
8433 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
8434 dl, DAG, AsmNodeOperands);
8435 break;
8436 }
8437 case InlineAsm::isClobber:
8438 // Add the clobbered value to the operand list, so that the register
8439 // allocator is aware that the physreg got clobbered.
8440 if (!OpInfo.AssignedRegs.Regs.empty())
8441 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
8442 false, 0, getCurSDLoc(), DAG,
8443 AsmNodeOperands);
8444 break;
8445 }
8446 }
8447
8448 // Finish up input operands. Set the input chain and add the flag last.
8449 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
8450 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
8451
8452 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
8453 Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
8454 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
8455 Flag = Chain.getValue(1);
8456
8457 // Do additional work to generate outputs.
8458
8459 SmallVector<EVT, 1> ResultVTs;
8460 SmallVector<SDValue, 1> ResultValues;
8461 SmallVector<SDValue, 8> OutChains;
8462
8463 llvm::Type *CallResultType = Call.getType();
8464 ArrayRef<Type *> ResultTypes;
8465 if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
8466 ResultTypes = StructResult->elements();
8467 else if (!CallResultType->isVoidTy())
8468 ResultTypes = makeArrayRef(CallResultType);
8469
8470 auto CurResultType = ResultTypes.begin();
8471 auto handleRegAssign = [&](SDValue V) {
8472 assert(CurResultType != ResultTypes.end() && "Unexpected value");
8473 assert((*CurResultType)->isSized() && "Unexpected unsized type");
8474 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
8475 ++CurResultType;
8476 // If the type of the inline asm call site return value is different but has
8477 // same size as the type of the asm output bitcast it. One example of this
8478 // is for vectors with different width / number of elements. This can
8479 // happen for register classes that can contain multiple different value
8480 // types. The preg or vreg allocated may not have the same VT as was
8481 // expected.
8482 //
8483 // This can also happen for a return value that disagrees with the register
8484 // class it is put in, eg. a double in a general-purpose register on a
8485 // 32-bit machine.
8486 if (ResultVT != V.getValueType() &&
8487 ResultVT.getSizeInBits() == V.getValueSizeInBits())
8488 V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
8489 else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
8490 V.getValueType().isInteger()) {
8491 // If a result value was tied to an input value, the computed result
8492 // may have a wider width than the expected result. Extract the
8493 // relevant portion.
8494 V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
8495 }
8496 assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
8497 ResultVTs.push_back(ResultVT);
8498 ResultValues.push_back(V);
8499 };
8500
8501 // Deal with output operands.
8502 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8503 if (OpInfo.Type == InlineAsm::isOutput) {
8504 SDValue Val;
8505 // Skip trivial output operands.
8506 if (OpInfo.AssignedRegs.Regs.empty())
8507 continue;
8508
8509 switch (OpInfo.ConstraintType) {
8510 case TargetLowering::C_Register:
8511 case TargetLowering::C_RegisterClass:
8512 Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
8513 Chain, &Flag, &Call);
8514 break;
8515 case TargetLowering::C_Immediate:
8516 case TargetLowering::C_Other:
8517 Val = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
8518 OpInfo, DAG);
8519 break;
8520 case TargetLowering::C_Memory:
8521 break; // Already handled.
8522 case TargetLowering::C_Unknown:
8523 assert(false && "Unexpected unknown constraint");
8524 }
8525
8526 // Indirect output manifest as stores. Record output chains.
8527 if (OpInfo.isIndirect) {
8528 const Value *Ptr = OpInfo.CallOperandVal;
8529 assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
8530 SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
8531 MachinePointerInfo(Ptr));
8532 OutChains.push_back(Store);
8533 } else {
8534 // generate CopyFromRegs to associated registers.
8535 assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
8536 if (Val.getOpcode() == ISD::MERGE_VALUES) {
8537 for (const SDValue &V : Val->op_values())
8538 handleRegAssign(V);
8539 } else
8540 handleRegAssign(Val);
8541 }
8542 }
8543 }
8544
8545 // Set results.
8546 if (!ResultValues.empty()) {
8547 assert(CurResultType == ResultTypes.end() &&
8548 "Mismatch in number of ResultTypes");
8549 assert(ResultValues.size() == ResultTypes.size() &&
8550 "Mismatch in number of output operands in asm result");
8551
8552 SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
8553 DAG.getVTList(ResultVTs), ResultValues);
8554 setValue(&Call, V);
8555 }
8556
8557 // Collect store chains.
8558 if (!OutChains.empty())
8559 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
8560
8561 // Only Update Root if inline assembly has a memory effect.
8562 if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr)
8563 DAG.setRoot(Chain);
8564 }
8565
emitInlineAsmError(const CallBase & Call,const Twine & Message)8566 void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
8567 const Twine &Message) {
8568 LLVMContext &Ctx = *DAG.getContext();
8569 Ctx.emitError(&Call, Message);
8570
8571 // Make sure we leave the DAG in a valid state
8572 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8573 SmallVector<EVT, 1> ValueVTs;
8574 ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
8575
8576 if (ValueVTs.empty())
8577 return;
8578
8579 SmallVector<SDValue, 1> Ops;
8580 for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
8581 Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
8582
8583 setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
8584 }
8585
visitVAStart(const CallInst & I)8586 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
8587 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
8588 MVT::Other, getRoot(),
8589 getValue(I.getArgOperand(0)),
8590 DAG.getSrcValue(I.getArgOperand(0))));
8591 }
8592
visitVAArg(const VAArgInst & I)8593 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
8594 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8595 const DataLayout &DL = DAG.getDataLayout();
8596 SDValue V = DAG.getVAArg(
8597 TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
8598 getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
8599 DL.getABITypeAlign(I.getType()).value());
8600 DAG.setRoot(V.getValue(1));
8601
8602 if (I.getType()->isPointerTy())
8603 V = DAG.getPtrExtOrTrunc(
8604 V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
8605 setValue(&I, V);
8606 }
8607
visitVAEnd(const CallInst & I)8608 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
8609 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
8610 MVT::Other, getRoot(),
8611 getValue(I.getArgOperand(0)),
8612 DAG.getSrcValue(I.getArgOperand(0))));
8613 }
8614
visitVACopy(const CallInst & I)8615 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
8616 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
8617 MVT::Other, getRoot(),
8618 getValue(I.getArgOperand(0)),
8619 getValue(I.getArgOperand(1)),
8620 DAG.getSrcValue(I.getArgOperand(0)),
8621 DAG.getSrcValue(I.getArgOperand(1))));
8622 }
8623
lowerRangeToAssertZExt(SelectionDAG & DAG,const Instruction & I,SDValue Op)8624 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
8625 const Instruction &I,
8626 SDValue Op) {
8627 const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
8628 if (!Range)
8629 return Op;
8630
8631 ConstantRange CR = getConstantRangeFromMetadata(*Range);
8632 if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
8633 return Op;
8634
8635 APInt Lo = CR.getUnsignedMin();
8636 if (!Lo.isMinValue())
8637 return Op;
8638
8639 APInt Hi = CR.getUnsignedMax();
8640 unsigned Bits = std::max(Hi.getActiveBits(),
8641 static_cast<unsigned>(IntegerType::MIN_INT_BITS));
8642
8643 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8644
8645 SDLoc SL = getCurSDLoc();
8646
8647 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
8648 DAG.getValueType(SmallVT));
8649 unsigned NumVals = Op.getNode()->getNumValues();
8650 if (NumVals == 1)
8651 return ZExt;
8652
8653 SmallVector<SDValue, 4> Ops;
8654
8655 Ops.push_back(ZExt);
8656 for (unsigned I = 1; I != NumVals; ++I)
8657 Ops.push_back(Op.getValue(I));
8658
8659 return DAG.getMergeValues(Ops, SL);
8660 }
8661
8662 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
8663 /// the call being lowered.
8664 ///
8665 /// This is a helper for lowering intrinsics that follow a target calling
8666 /// convention or require stack pointer adjustment. Only a subset of the
8667 /// intrinsic's operands need to participate in the calling convention.
populateCallLoweringInfo(TargetLowering::CallLoweringInfo & CLI,const CallBase * Call,unsigned ArgIdx,unsigned NumArgs,SDValue Callee,Type * ReturnTy,bool IsPatchPoint)8668 void SelectionDAGBuilder::populateCallLoweringInfo(
8669 TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
8670 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
8671 bool IsPatchPoint) {
8672 TargetLowering::ArgListTy Args;
8673 Args.reserve(NumArgs);
8674
8675 // Populate the argument list.
8676 // Attributes for args start at offset 1, after the return attribute.
8677 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
8678 ArgI != ArgE; ++ArgI) {
8679 const Value *V = Call->getOperand(ArgI);
8680
8681 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
8682
8683 TargetLowering::ArgListEntry Entry;
8684 Entry.Node = getValue(V);
8685 Entry.Ty = V->getType();
8686 Entry.setAttributes(Call, ArgI);
8687 Args.push_back(Entry);
8688 }
8689
8690 CLI.setDebugLoc(getCurSDLoc())
8691 .setChain(getRoot())
8692 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args))
8693 .setDiscardResult(Call->use_empty())
8694 .setIsPatchPoint(IsPatchPoint)
8695 .setIsPreallocated(
8696 Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
8697 }
8698
8699 /// Add a stack map intrinsic call's live variable operands to a stackmap
8700 /// or patchpoint target node's operand list.
8701 ///
8702 /// Constants are converted to TargetConstants purely as an optimization to
8703 /// avoid constant materialization and register allocation.
8704 ///
8705 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8706 /// generate addess computation nodes, and so FinalizeISel can convert the
8707 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8708 /// address materialization and register allocation, but may also be required
8709 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8710 /// alloca in the entry block, then the runtime may assume that the alloca's
8711 /// StackMap location can be read immediately after compilation and that the
8712 /// location is valid at any point during execution (this is similar to the
8713 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8714 /// only available in a register, then the runtime would need to trap when
8715 /// execution reaches the StackMap in order to read the alloca's location.
addStackMapLiveVars(const CallBase & Call,unsigned StartIdx,const SDLoc & DL,SmallVectorImpl<SDValue> & Ops,SelectionDAGBuilder & Builder)8716 static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
8717 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
8718 SelectionDAGBuilder &Builder) {
8719 for (unsigned i = StartIdx, e = Call.arg_size(); i != e; ++i) {
8720 SDValue OpVal = Builder.getValue(Call.getArgOperand(i));
8721 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
8722 Ops.push_back(
8723 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
8724 Ops.push_back(
8725 Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
8726 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
8727 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
8728 Ops.push_back(Builder.DAG.getTargetFrameIndex(
8729 FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
8730 } else
8731 Ops.push_back(OpVal);
8732 }
8733 }
8734
8735 /// Lower llvm.experimental.stackmap directly to its target opcode.
visitStackmap(const CallInst & CI)8736 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
8737 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8738 // [live variables...])
8739
8740 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
8741
8742 SDValue Chain, InFlag, Callee, NullPtr;
8743 SmallVector<SDValue, 32> Ops;
8744
8745 SDLoc DL = getCurSDLoc();
8746 Callee = getValue(CI.getCalledOperand());
8747 NullPtr = DAG.getIntPtrConstant(0, DL, true);
8748
8749 // The stackmap intrinsic only records the live variables (the arguments
8750 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8751 // intrinsic, this won't be lowered to a function call. This means we don't
8752 // have to worry about calling conventions and target specific lowering code.
8753 // Instead we perform the call lowering right here.
8754 //
8755 // chain, flag = CALLSEQ_START(chain, 0, 0)
8756 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8757 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8758 //
8759 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
8760 InFlag = Chain.getValue(1);
8761
8762 // Add the <id> and <numBytes> constants.
8763 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
8764 Ops.push_back(DAG.getTargetConstant(
8765 cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
8766 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
8767 Ops.push_back(DAG.getTargetConstant(
8768 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8769 MVT::i32));
8770
8771 // Push live variables for the stack map.
8772 addStackMapLiveVars(CI, 2, DL, Ops, *this);
8773
8774 // We are not pushing any register mask info here on the operands list,
8775 // because the stackmap doesn't clobber anything.
8776
8777 // Push the chain and the glue flag.
8778 Ops.push_back(Chain);
8779 Ops.push_back(InFlag);
8780
8781 // Create the STACKMAP node.
8782 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8783 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
8784 Chain = SDValue(SM, 0);
8785 InFlag = Chain.getValue(1);
8786
8787 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
8788
8789 // Stackmaps don't generate values, so nothing goes into the NodeMap.
8790
8791 // Set the root to the target-lowered call chain.
8792 DAG.setRoot(Chain);
8793
8794 // Inform the Frame Information that we have a stackmap in this function.
8795 FuncInfo.MF->getFrameInfo().setHasStackMap();
8796 }
8797
8798 /// Lower llvm.experimental.patchpoint directly to its target opcode.
visitPatchpoint(const CallBase & CB,const BasicBlock * EHPadBB)8799 void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
8800 const BasicBlock *EHPadBB) {
8801 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8802 // i32 <numBytes>,
8803 // i8* <target>,
8804 // i32 <numArgs>,
8805 // [Args...],
8806 // [live variables...])
8807
8808 CallingConv::ID CC = CB.getCallingConv();
8809 bool IsAnyRegCC = CC == CallingConv::AnyReg;
8810 bool HasDef = !CB.getType()->isVoidTy();
8811 SDLoc dl = getCurSDLoc();
8812 SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
8813
8814 // Handle immediate and symbolic callees.
8815 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8816 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
8817 /*isTarget=*/true);
8818 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8819 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
8820 SDLoc(SymbolicCallee),
8821 SymbolicCallee->getValueType(0));
8822
8823 // Get the real number of arguments participating in the call <numArgs>
8824 SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
8825 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8826
8827 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8828 // Intrinsics include all meta-operands up to but not including CC.
8829 unsigned NumMetaOpers = PatchPointOpers::CCPos;
8830 assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
8831 "Not enough arguments provided to the patchpoint intrinsic");
8832
8833 // For AnyRegCC the arguments are lowered later on manually.
8834 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8835 Type *ReturnTy =
8836 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
8837
8838 TargetLowering::CallLoweringInfo CLI(DAG);
8839 populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
8840 ReturnTy, true);
8841 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8842
8843 SDNode *CallEnd = Result.second.getNode();
8844 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
8845 CallEnd = CallEnd->getOperand(0).getNode();
8846
8847 /// Get a call instruction from the call sequence chain.
8848 /// Tail calls are not allowed.
8849 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
8850 "Expected a callseq node.");
8851 SDNode *Call = CallEnd->getOperand(0).getNode();
8852 bool HasGlue = Call->getGluedNode();
8853
8854 // Replace the target specific call node with the patchable intrinsic.
8855 SmallVector<SDValue, 8> Ops;
8856
8857 // Add the <id> and <numBytes> constants.
8858 SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
8859 Ops.push_back(DAG.getTargetConstant(
8860 cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
8861 SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
8862 Ops.push_back(DAG.getTargetConstant(
8863 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8864 MVT::i32));
8865
8866 // Add the callee.
8867 Ops.push_back(Callee);
8868
8869 // Adjust <numArgs> to account for any arguments that have been passed on the
8870 // stack instead.
8871 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8872 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8873 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8874 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
8875
8876 // Add the calling convention
8877 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
8878
8879 // Add the arguments we omitted previously. The register allocator should
8880 // place these in any free register.
8881 if (IsAnyRegCC)
8882 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
8883 Ops.push_back(getValue(CB.getArgOperand(i)));
8884
8885 // Push the arguments from the call instruction up to the register mask.
8886 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
8887 Ops.append(Call->op_begin() + 2, e);
8888
8889 // Push live variables for the stack map.
8890 addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
8891
8892 // Push the register mask info.
8893 if (HasGlue)
8894 Ops.push_back(*(Call->op_end()-2));
8895 else
8896 Ops.push_back(*(Call->op_end()-1));
8897
8898 // Push the chain (this is originally the first operand of the call, but
8899 // becomes now the last or second to last operand).
8900 Ops.push_back(*(Call->op_begin()));
8901
8902 // Push the glue flag (last operand).
8903 if (HasGlue)
8904 Ops.push_back(*(Call->op_end()-1));
8905
8906 SDVTList NodeTys;
8907 if (IsAnyRegCC && HasDef) {
8908 // Create the return types based on the intrinsic definition
8909 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8910 SmallVector<EVT, 3> ValueVTs;
8911 ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
8912 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
8913
8914 // There is always a chain and a glue type at the end
8915 ValueVTs.push_back(MVT::Other);
8916 ValueVTs.push_back(MVT::Glue);
8917 NodeTys = DAG.getVTList(ValueVTs);
8918 } else
8919 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8920
8921 // Replace the target specific call node with a PATCHPOINT node.
8922 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
8923 dl, NodeTys, Ops);
8924
8925 // Update the NodeMap.
8926 if (HasDef) {
8927 if (IsAnyRegCC)
8928 setValue(&CB, SDValue(MN, 0));
8929 else
8930 setValue(&CB, Result.first);
8931 }
8932
8933 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
8934 // call sequence. Furthermore the location of the chain and glue can change
8935 // when the AnyReg calling convention is used and the intrinsic returns a
8936 // value.
8937 if (IsAnyRegCC && HasDef) {
8938 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
8939 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
8940 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
8941 } else
8942 DAG.ReplaceAllUsesWith(Call, MN);
8943 DAG.DeleteNode(Call);
8944
8945 // Inform the Frame Information that we have a patchpoint in this function.
8946 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
8947 }
8948
visitVectorReduce(const CallInst & I,unsigned Intrinsic)8949 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
8950 unsigned Intrinsic) {
8951 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8952 SDValue Op1 = getValue(I.getArgOperand(0));
8953 SDValue Op2;
8954 if (I.getNumArgOperands() > 1)
8955 Op2 = getValue(I.getArgOperand(1));
8956 SDLoc dl = getCurSDLoc();
8957 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8958 SDValue Res;
8959 FastMathFlags FMF;
8960 if (isa<FPMathOperator>(I))
8961 FMF = I.getFastMathFlags();
8962
8963 switch (Intrinsic) {
8964 case Intrinsic::experimental_vector_reduce_v2_fadd:
8965 if (FMF.allowReassoc())
8966 Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
8967 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2));
8968 else
8969 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2);
8970 break;
8971 case Intrinsic::experimental_vector_reduce_v2_fmul:
8972 if (FMF.allowReassoc())
8973 Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
8974 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2));
8975 else
8976 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2);
8977 break;
8978 case Intrinsic::experimental_vector_reduce_add:
8979 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
8980 break;
8981 case Intrinsic::experimental_vector_reduce_mul:
8982 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
8983 break;
8984 case Intrinsic::experimental_vector_reduce_and:
8985 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
8986 break;
8987 case Intrinsic::experimental_vector_reduce_or:
8988 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
8989 break;
8990 case Intrinsic::experimental_vector_reduce_xor:
8991 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
8992 break;
8993 case Intrinsic::experimental_vector_reduce_smax:
8994 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
8995 break;
8996 case Intrinsic::experimental_vector_reduce_smin:
8997 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
8998 break;
8999 case Intrinsic::experimental_vector_reduce_umax:
9000 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
9001 break;
9002 case Intrinsic::experimental_vector_reduce_umin:
9003 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
9004 break;
9005 case Intrinsic::experimental_vector_reduce_fmax:
9006 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1);
9007 break;
9008 case Intrinsic::experimental_vector_reduce_fmin:
9009 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1);
9010 break;
9011 default:
9012 llvm_unreachable("Unhandled vector reduce intrinsic");
9013 }
9014 setValue(&I, Res);
9015 }
9016
9017 /// Returns an AttributeList representing the attributes applied to the return
9018 /// value of the given call.
getReturnAttrs(TargetLowering::CallLoweringInfo & CLI)9019 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
9020 SmallVector<Attribute::AttrKind, 2> Attrs;
9021 if (CLI.RetSExt)
9022 Attrs.push_back(Attribute::SExt);
9023 if (CLI.RetZExt)
9024 Attrs.push_back(Attribute::ZExt);
9025 if (CLI.IsInReg)
9026 Attrs.push_back(Attribute::InReg);
9027
9028 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
9029 Attrs);
9030 }
9031
9032 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
9033 /// implementation, which just calls LowerCall.
9034 /// FIXME: When all targets are
9035 /// migrated to using LowerCall, this hook should be integrated into SDISel.
9036 std::pair<SDValue, SDValue>
LowerCallTo(TargetLowering::CallLoweringInfo & CLI) const9037 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
9038 // Handle the incoming return values from the call.
9039 CLI.Ins.clear();
9040 Type *OrigRetTy = CLI.RetTy;
9041 SmallVector<EVT, 4> RetTys;
9042 SmallVector<uint64_t, 4> Offsets;
9043 auto &DL = CLI.DAG.getDataLayout();
9044 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
9045
9046 if (CLI.IsPostTypeLegalization) {
9047 // If we are lowering a libcall after legalization, split the return type.
9048 SmallVector<EVT, 4> OldRetTys;
9049 SmallVector<uint64_t, 4> OldOffsets;
9050 RetTys.swap(OldRetTys);
9051 Offsets.swap(OldOffsets);
9052
9053 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
9054 EVT RetVT = OldRetTys[i];
9055 uint64_t Offset = OldOffsets[i];
9056 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
9057 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
9058 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
9059 RetTys.append(NumRegs, RegisterVT);
9060 for (unsigned j = 0; j != NumRegs; ++j)
9061 Offsets.push_back(Offset + j * RegisterVTByteSZ);
9062 }
9063 }
9064
9065 SmallVector<ISD::OutputArg, 4> Outs;
9066 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
9067
9068 bool CanLowerReturn =
9069 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
9070 CLI.IsVarArg, Outs, CLI.RetTy->getContext());
9071
9072 SDValue DemoteStackSlot;
9073 int DemoteStackIdx = -100;
9074 if (!CanLowerReturn) {
9075 // FIXME: equivalent assert?
9076 // assert(!CS.hasInAllocaArgument() &&
9077 // "sret demotion is incompatible with inalloca");
9078 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
9079 Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
9080 MachineFunction &MF = CLI.DAG.getMachineFunction();
9081 DemoteStackIdx =
9082 MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
9083 Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
9084 DL.getAllocaAddrSpace());
9085
9086 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
9087 ArgListEntry Entry;
9088 Entry.Node = DemoteStackSlot;
9089 Entry.Ty = StackSlotPtrType;
9090 Entry.IsSExt = false;
9091 Entry.IsZExt = false;
9092 Entry.IsInReg = false;
9093 Entry.IsSRet = true;
9094 Entry.IsNest = false;
9095 Entry.IsByVal = false;
9096 Entry.IsReturned = false;
9097 Entry.IsSwiftSelf = false;
9098 Entry.IsSwiftError = false;
9099 Entry.IsCFGuardTarget = false;
9100 Entry.Alignment = Alignment;
9101 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
9102 CLI.NumFixedArgs += 1;
9103 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
9104
9105 // sret demotion isn't compatible with tail-calls, since the sret argument
9106 // points into the callers stack frame.
9107 CLI.IsTailCall = false;
9108 } else {
9109 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9110 CLI.RetTy, CLI.CallConv, CLI.IsVarArg);
9111 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9112 ISD::ArgFlagsTy Flags;
9113 if (NeedsRegBlock) {
9114 Flags.setInConsecutiveRegs();
9115 if (I == RetTys.size() - 1)
9116 Flags.setInConsecutiveRegsLast();
9117 }
9118 EVT VT = RetTys[I];
9119 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9120 CLI.CallConv, VT);
9121 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9122 CLI.CallConv, VT);
9123 for (unsigned i = 0; i != NumRegs; ++i) {
9124 ISD::InputArg MyFlags;
9125 MyFlags.Flags = Flags;
9126 MyFlags.VT = RegisterVT;
9127 MyFlags.ArgVT = VT;
9128 MyFlags.Used = CLI.IsReturnValueUsed;
9129 if (CLI.RetTy->isPointerTy()) {
9130 MyFlags.Flags.setPointer();
9131 MyFlags.Flags.setPointerAddrSpace(
9132 cast<PointerType>(CLI.RetTy)->getAddressSpace());
9133 }
9134 if (CLI.RetSExt)
9135 MyFlags.Flags.setSExt();
9136 if (CLI.RetZExt)
9137 MyFlags.Flags.setZExt();
9138 if (CLI.IsInReg)
9139 MyFlags.Flags.setInReg();
9140 CLI.Ins.push_back(MyFlags);
9141 }
9142 }
9143 }
9144
9145 // We push in swifterror return as the last element of CLI.Ins.
9146 ArgListTy &Args = CLI.getArgs();
9147 if (supportSwiftError()) {
9148 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9149 if (Args[i].IsSwiftError) {
9150 ISD::InputArg MyFlags;
9151 MyFlags.VT = getPointerTy(DL);
9152 MyFlags.ArgVT = EVT(getPointerTy(DL));
9153 MyFlags.Flags.setSwiftError();
9154 CLI.Ins.push_back(MyFlags);
9155 }
9156 }
9157 }
9158
9159 // Handle all of the outgoing arguments.
9160 CLI.Outs.clear();
9161 CLI.OutVals.clear();
9162 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9163 SmallVector<EVT, 4> ValueVTs;
9164 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
9165 // FIXME: Split arguments if CLI.IsPostTypeLegalization
9166 Type *FinalType = Args[i].Ty;
9167 if (Args[i].IsByVal)
9168 FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
9169 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9170 FinalType, CLI.CallConv, CLI.IsVarArg);
9171 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
9172 ++Value) {
9173 EVT VT = ValueVTs[Value];
9174 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
9175 SDValue Op = SDValue(Args[i].Node.getNode(),
9176 Args[i].Node.getResNo() + Value);
9177 ISD::ArgFlagsTy Flags;
9178
9179 // Certain targets (such as MIPS), may have a different ABI alignment
9180 // for a type depending on the context. Give the target a chance to
9181 // specify the alignment it wants.
9182 const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
9183
9184 if (Args[i].Ty->isPointerTy()) {
9185 Flags.setPointer();
9186 Flags.setPointerAddrSpace(
9187 cast<PointerType>(Args[i].Ty)->getAddressSpace());
9188 }
9189 if (Args[i].IsZExt)
9190 Flags.setZExt();
9191 if (Args[i].IsSExt)
9192 Flags.setSExt();
9193 if (Args[i].IsInReg) {
9194 // If we are using vectorcall calling convention, a structure that is
9195 // passed InReg - is surely an HVA
9196 if (CLI.CallConv == CallingConv::X86_VectorCall &&
9197 isa<StructType>(FinalType)) {
9198 // The first value of a structure is marked
9199 if (0 == Value)
9200 Flags.setHvaStart();
9201 Flags.setHva();
9202 }
9203 // Set InReg Flag
9204 Flags.setInReg();
9205 }
9206 if (Args[i].IsSRet)
9207 Flags.setSRet();
9208 if (Args[i].IsSwiftSelf)
9209 Flags.setSwiftSelf();
9210 if (Args[i].IsSwiftError)
9211 Flags.setSwiftError();
9212 if (Args[i].IsCFGuardTarget)
9213 Flags.setCFGuardTarget();
9214 if (Args[i].IsByVal)
9215 Flags.setByVal();
9216 if (Args[i].IsPreallocated) {
9217 Flags.setPreallocated();
9218 // Set the byval flag for CCAssignFn callbacks that don't know about
9219 // preallocated. This way we can know how many bytes we should've
9220 // allocated and how many bytes a callee cleanup function will pop. If
9221 // we port preallocated to more targets, we'll have to add custom
9222 // preallocated handling in the various CC lowering callbacks.
9223 Flags.setByVal();
9224 }
9225 if (Args[i].IsInAlloca) {
9226 Flags.setInAlloca();
9227 // Set the byval flag for CCAssignFn callbacks that don't know about
9228 // inalloca. This way we can know how many bytes we should've allocated
9229 // and how many bytes a callee cleanup function will pop. If we port
9230 // inalloca to more targets, we'll have to add custom inalloca handling
9231 // in the various CC lowering callbacks.
9232 Flags.setByVal();
9233 }
9234 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
9235 PointerType *Ty = cast<PointerType>(Args[i].Ty);
9236 Type *ElementTy = Ty->getElementType();
9237
9238 unsigned FrameSize = DL.getTypeAllocSize(
9239 Args[i].ByValType ? Args[i].ByValType : ElementTy);
9240 Flags.setByValSize(FrameSize);
9241
9242 // info is not there but there are cases it cannot get right.
9243 Align FrameAlign;
9244 if (auto MA = Args[i].Alignment)
9245 FrameAlign = *MA;
9246 else
9247 FrameAlign = Align(getByValTypeAlignment(ElementTy, DL));
9248 Flags.setByValAlign(FrameAlign);
9249 }
9250 if (Args[i].IsNest)
9251 Flags.setNest();
9252 if (NeedsRegBlock)
9253 Flags.setInConsecutiveRegs();
9254 Flags.setOrigAlign(OriginalAlignment);
9255
9256 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9257 CLI.CallConv, VT);
9258 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9259 CLI.CallConv, VT);
9260 SmallVector<SDValue, 4> Parts(NumParts);
9261 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
9262
9263 if (Args[i].IsSExt)
9264 ExtendKind = ISD::SIGN_EXTEND;
9265 else if (Args[i].IsZExt)
9266 ExtendKind = ISD::ZERO_EXTEND;
9267
9268 // Conservatively only handle 'returned' on non-vectors that can be lowered,
9269 // for now.
9270 if (Args[i].IsReturned && !Op.getValueType().isVector() &&
9271 CanLowerReturn) {
9272 assert((CLI.RetTy == Args[i].Ty ||
9273 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
9274 CLI.RetTy->getPointerAddressSpace() ==
9275 Args[i].Ty->getPointerAddressSpace())) &&
9276 RetTys.size() == NumValues && "unexpected use of 'returned'");
9277 // Before passing 'returned' to the target lowering code, ensure that
9278 // either the register MVT and the actual EVT are the same size or that
9279 // the return value and argument are extended in the same way; in these
9280 // cases it's safe to pass the argument register value unchanged as the
9281 // return register value (although it's at the target's option whether
9282 // to do so)
9283 // TODO: allow code generation to take advantage of partially preserved
9284 // registers rather than clobbering the entire register when the
9285 // parameter extension method is not compatible with the return
9286 // extension method
9287 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
9288 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
9289 CLI.RetZExt == Args[i].IsZExt))
9290 Flags.setReturned();
9291 }
9292
9293 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
9294 CLI.CallConv, ExtendKind);
9295
9296 for (unsigned j = 0; j != NumParts; ++j) {
9297 // if it isn't first piece, alignment must be 1
9298 // For scalable vectors the scalable part is currently handled
9299 // by individual targets, so we just use the known minimum size here.
9300 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
9301 i < CLI.NumFixedArgs, i,
9302 j*Parts[j].getValueType().getStoreSize().getKnownMinSize());
9303 if (NumParts > 1 && j == 0)
9304 MyFlags.Flags.setSplit();
9305 else if (j != 0) {
9306 MyFlags.Flags.setOrigAlign(Align(1));
9307 if (j == NumParts - 1)
9308 MyFlags.Flags.setSplitEnd();
9309 }
9310
9311 CLI.Outs.push_back(MyFlags);
9312 CLI.OutVals.push_back(Parts[j]);
9313 }
9314
9315 if (NeedsRegBlock && Value == NumValues - 1)
9316 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
9317 }
9318 }
9319
9320 SmallVector<SDValue, 4> InVals;
9321 CLI.Chain = LowerCall(CLI, InVals);
9322
9323 // Update CLI.InVals to use outside of this function.
9324 CLI.InVals = InVals;
9325
9326 // Verify that the target's LowerCall behaved as expected.
9327 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
9328 "LowerCall didn't return a valid chain!");
9329 assert((!CLI.IsTailCall || InVals.empty()) &&
9330 "LowerCall emitted a return value for a tail call!");
9331 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
9332 "LowerCall didn't emit the correct number of values!");
9333
9334 // For a tail call, the return value is merely live-out and there aren't
9335 // any nodes in the DAG representing it. Return a special value to
9336 // indicate that a tail call has been emitted and no more Instructions
9337 // should be processed in the current block.
9338 if (CLI.IsTailCall) {
9339 CLI.DAG.setRoot(CLI.Chain);
9340 return std::make_pair(SDValue(), SDValue());
9341 }
9342
9343 #ifndef NDEBUG
9344 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
9345 assert(InVals[i].getNode() && "LowerCall emitted a null value!");
9346 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
9347 "LowerCall emitted a value with the wrong type!");
9348 }
9349 #endif
9350
9351 SmallVector<SDValue, 4> ReturnValues;
9352 if (!CanLowerReturn) {
9353 // The instruction result is the result of loading from the
9354 // hidden sret parameter.
9355 SmallVector<EVT, 1> PVTs;
9356 Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
9357
9358 ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
9359 assert(PVTs.size() == 1 && "Pointers should fit in one register");
9360 EVT PtrVT = PVTs[0];
9361
9362 unsigned NumValues = RetTys.size();
9363 ReturnValues.resize(NumValues);
9364 SmallVector<SDValue, 4> Chains(NumValues);
9365
9366 // An aggregate return value cannot wrap around the address space, so
9367 // offsets to its parts don't wrap either.
9368 SDNodeFlags Flags;
9369 Flags.setNoUnsignedWrap(true);
9370
9371 MachineFunction &MF = CLI.DAG.getMachineFunction();
9372 Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
9373 for (unsigned i = 0; i < NumValues; ++i) {
9374 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
9375 CLI.DAG.getConstant(Offsets[i], CLI.DL,
9376 PtrVT), Flags);
9377 SDValue L = CLI.DAG.getLoad(
9378 RetTys[i], CLI.DL, CLI.Chain, Add,
9379 MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
9380 DemoteStackIdx, Offsets[i]),
9381 HiddenSRetAlign);
9382 ReturnValues[i] = L;
9383 Chains[i] = L.getValue(1);
9384 }
9385
9386 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
9387 } else {
9388 // Collect the legal value parts into potentially illegal values
9389 // that correspond to the original function's return values.
9390 Optional<ISD::NodeType> AssertOp;
9391 if (CLI.RetSExt)
9392 AssertOp = ISD::AssertSext;
9393 else if (CLI.RetZExt)
9394 AssertOp = ISD::AssertZext;
9395 unsigned CurReg = 0;
9396 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9397 EVT VT = RetTys[I];
9398 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9399 CLI.CallConv, VT);
9400 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9401 CLI.CallConv, VT);
9402
9403 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
9404 NumRegs, RegisterVT, VT, nullptr,
9405 CLI.CallConv, AssertOp));
9406 CurReg += NumRegs;
9407 }
9408
9409 // For a function returning void, there is no return value. We can't create
9410 // such a node, so we just return a null return value in that case. In
9411 // that case, nothing will actually look at the value.
9412 if (ReturnValues.empty())
9413 return std::make_pair(SDValue(), CLI.Chain);
9414 }
9415
9416 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
9417 CLI.DAG.getVTList(RetTys), ReturnValues);
9418 return std::make_pair(Res, CLI.Chain);
9419 }
9420
LowerOperationWrapper(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const9421 void TargetLowering::LowerOperationWrapper(SDNode *N,
9422 SmallVectorImpl<SDValue> &Results,
9423 SelectionDAG &DAG) const {
9424 if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
9425 Results.push_back(Res);
9426 }
9427
LowerOperation(SDValue Op,SelectionDAG & DAG) const9428 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9429 llvm_unreachable("LowerOperation not implemented for this target!");
9430 }
9431
9432 void
CopyValueToVirtualRegister(const Value * V,unsigned Reg)9433 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
9434 SDValue Op = getNonRegisterValue(V);
9435 assert((Op.getOpcode() != ISD::CopyFromReg ||
9436 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
9437 "Copy from a reg to the same reg!");
9438 assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
9439
9440 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9441 // If this is an InlineAsm we have to match the registers required, not the
9442 // notional registers required by the type.
9443
9444 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
9445 None); // This is not an ABI copy.
9446 SDValue Chain = DAG.getEntryNode();
9447
9448 ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
9449 FuncInfo.PreferredExtendType.end())
9450 ? ISD::ANY_EXTEND
9451 : FuncInfo.PreferredExtendType[V];
9452 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
9453 PendingExports.push_back(Chain);
9454 }
9455
9456 #include "llvm/CodeGen/SelectionDAGISel.h"
9457
9458 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
9459 /// entry block, return true. This includes arguments used by switches, since
9460 /// the switch may expand into multiple basic blocks.
isOnlyUsedInEntryBlock(const Argument * A,bool FastISel)9461 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
9462 // With FastISel active, we may be splitting blocks, so force creation
9463 // of virtual registers for all non-dead arguments.
9464 if (FastISel)
9465 return A->use_empty();
9466
9467 const BasicBlock &Entry = A->getParent()->front();
9468 for (const User *U : A->users())
9469 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
9470 return false; // Use not in entry block.
9471
9472 return true;
9473 }
9474
9475 using ArgCopyElisionMapTy =
9476 DenseMap<const Argument *,
9477 std::pair<const AllocaInst *, const StoreInst *>>;
9478
9479 /// Scan the entry block of the function in FuncInfo for arguments that look
9480 /// like copies into a local alloca. Record any copied arguments in
9481 /// ArgCopyElisionCandidates.
9482 static void
findArgumentCopyElisionCandidates(const DataLayout & DL,FunctionLoweringInfo * FuncInfo,ArgCopyElisionMapTy & ArgCopyElisionCandidates)9483 findArgumentCopyElisionCandidates(const DataLayout &DL,
9484 FunctionLoweringInfo *FuncInfo,
9485 ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
9486 // Record the state of every static alloca used in the entry block. Argument
9487 // allocas are all used in the entry block, so we need approximately as many
9488 // entries as we have arguments.
9489 enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
9490 SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
9491 unsigned NumArgs = FuncInfo->Fn->arg_size();
9492 StaticAllocas.reserve(NumArgs * 2);
9493
9494 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
9495 if (!V)
9496 return nullptr;
9497 V = V->stripPointerCasts();
9498 const auto *AI = dyn_cast<AllocaInst>(V);
9499 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
9500 return nullptr;
9501 auto Iter = StaticAllocas.insert({AI, Unknown});
9502 return &Iter.first->second;
9503 };
9504
9505 // Look for stores of arguments to static allocas. Look through bitcasts and
9506 // GEPs to handle type coercions, as long as the alloca is fully initialized
9507 // by the store. Any non-store use of an alloca escapes it and any subsequent
9508 // unanalyzed store might write it.
9509 // FIXME: Handle structs initialized with multiple stores.
9510 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
9511 // Look for stores, and handle non-store uses conservatively.
9512 const auto *SI = dyn_cast<StoreInst>(&I);
9513 if (!SI) {
9514 // We will look through cast uses, so ignore them completely.
9515 if (I.isCast())
9516 continue;
9517 // Ignore debug info intrinsics, they don't escape or store to allocas.
9518 if (isa<DbgInfoIntrinsic>(I))
9519 continue;
9520 // This is an unknown instruction. Assume it escapes or writes to all
9521 // static alloca operands.
9522 for (const Use &U : I.operands()) {
9523 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
9524 *Info = StaticAllocaInfo::Clobbered;
9525 }
9526 continue;
9527 }
9528
9529 // If the stored value is a static alloca, mark it as escaped.
9530 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
9531 *Info = StaticAllocaInfo::Clobbered;
9532
9533 // Check if the destination is a static alloca.
9534 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
9535 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
9536 if (!Info)
9537 continue;
9538 const AllocaInst *AI = cast<AllocaInst>(Dst);
9539
9540 // Skip allocas that have been initialized or clobbered.
9541 if (*Info != StaticAllocaInfo::Unknown)
9542 continue;
9543
9544 // Check if the stored value is an argument, and that this store fully
9545 // initializes the alloca. Don't elide copies from the same argument twice.
9546 const Value *Val = SI->getValueOperand()->stripPointerCasts();
9547 const auto *Arg = dyn_cast<Argument>(Val);
9548 if (!Arg || Arg->hasPassPointeeByValueAttr() ||
9549 Arg->getType()->isEmptyTy() ||
9550 DL.getTypeStoreSize(Arg->getType()) !=
9551 DL.getTypeAllocSize(AI->getAllocatedType()) ||
9552 ArgCopyElisionCandidates.count(Arg)) {
9553 *Info = StaticAllocaInfo::Clobbered;
9554 continue;
9555 }
9556
9557 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
9558 << '\n');
9559
9560 // Mark this alloca and store for argument copy elision.
9561 *Info = StaticAllocaInfo::Elidable;
9562 ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
9563
9564 // Stop scanning if we've seen all arguments. This will happen early in -O0
9565 // builds, which is useful, because -O0 builds have large entry blocks and
9566 // many allocas.
9567 if (ArgCopyElisionCandidates.size() == NumArgs)
9568 break;
9569 }
9570 }
9571
9572 /// Try to elide argument copies from memory into a local alloca. Succeeds if
9573 /// ArgVal is a load from a suitable fixed stack object.
tryToElideArgumentCopy(FunctionLoweringInfo & FuncInfo,SmallVectorImpl<SDValue> & Chains,DenseMap<int,int> & ArgCopyElisionFrameIndexMap,SmallPtrSetImpl<const Instruction * > & ElidedArgCopyInstrs,ArgCopyElisionMapTy & ArgCopyElisionCandidates,const Argument & Arg,SDValue ArgVal,bool & ArgHasUses)9574 static void tryToElideArgumentCopy(
9575 FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
9576 DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
9577 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
9578 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
9579 SDValue ArgVal, bool &ArgHasUses) {
9580 // Check if this is a load from a fixed stack object.
9581 auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
9582 if (!LNode)
9583 return;
9584 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
9585 if (!FINode)
9586 return;
9587
9588 // Check that the fixed stack object is the right size and alignment.
9589 // Look at the alignment that the user wrote on the alloca instead of looking
9590 // at the stack object.
9591 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
9592 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
9593 const AllocaInst *AI = ArgCopyIter->second.first;
9594 int FixedIndex = FINode->getIndex();
9595 int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
9596 int OldIndex = AllocaIndex;
9597 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
9598 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
9599 LLVM_DEBUG(
9600 dbgs() << " argument copy elision failed due to bad fixed stack "
9601 "object size\n");
9602 return;
9603 }
9604 Align RequiredAlignment = AI->getAlign();
9605 if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
9606 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
9607 "greater than stack argument alignment ("
9608 << DebugStr(RequiredAlignment) << " vs "
9609 << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
9610 return;
9611 }
9612
9613 // Perform the elision. Delete the old stack object and replace its only use
9614 // in the variable info map. Mark the stack object as mutable.
9615 LLVM_DEBUG({
9616 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
9617 << " Replacing frame index " << OldIndex << " with " << FixedIndex
9618 << '\n';
9619 });
9620 MFI.RemoveStackObject(OldIndex);
9621 MFI.setIsImmutableObjectIndex(FixedIndex, false);
9622 AllocaIndex = FixedIndex;
9623 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
9624 Chains.push_back(ArgVal.getValue(1));
9625
9626 // Avoid emitting code for the store implementing the copy.
9627 const StoreInst *SI = ArgCopyIter->second.second;
9628 ElidedArgCopyInstrs.insert(SI);
9629
9630 // Check for uses of the argument again so that we can avoid exporting ArgVal
9631 // if it is't used by anything other than the store.
9632 for (const Value *U : Arg.users()) {
9633 if (U != SI) {
9634 ArgHasUses = true;
9635 break;
9636 }
9637 }
9638 }
9639
LowerArguments(const Function & F)9640 void SelectionDAGISel::LowerArguments(const Function &F) {
9641 SelectionDAG &DAG = SDB->DAG;
9642 SDLoc dl = SDB->getCurSDLoc();
9643 const DataLayout &DL = DAG.getDataLayout();
9644 SmallVector<ISD::InputArg, 16> Ins;
9645
9646 // In Naked functions we aren't going to save any registers.
9647 if (F.hasFnAttribute(Attribute::Naked))
9648 return;
9649
9650 if (!FuncInfo->CanLowerReturn) {
9651 // Put in an sret pointer parameter before all the other parameters.
9652 SmallVector<EVT, 1> ValueVTs;
9653 ComputeValueVTs(*TLI, DAG.getDataLayout(),
9654 F.getReturnType()->getPointerTo(
9655 DAG.getDataLayout().getAllocaAddrSpace()),
9656 ValueVTs);
9657
9658 // NOTE: Assuming that a pointer will never break down to more than one VT
9659 // or one register.
9660 ISD::ArgFlagsTy Flags;
9661 Flags.setSRet();
9662 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
9663 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
9664 ISD::InputArg::NoArgIndex, 0);
9665 Ins.push_back(RetArg);
9666 }
9667
9668 // Look for stores of arguments to static allocas. Mark such arguments with a
9669 // flag to ask the target to give us the memory location of that argument if
9670 // available.
9671 ArgCopyElisionMapTy ArgCopyElisionCandidates;
9672 findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
9673 ArgCopyElisionCandidates);
9674
9675 // Set up the incoming argument description vector.
9676 for (const Argument &Arg : F.args()) {
9677 unsigned ArgNo = Arg.getArgNo();
9678 SmallVector<EVT, 4> ValueVTs;
9679 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9680 bool isArgValueUsed = !Arg.use_empty();
9681 unsigned PartBase = 0;
9682 Type *FinalType = Arg.getType();
9683 if (Arg.hasAttribute(Attribute::ByVal))
9684 FinalType = Arg.getParamByValType();
9685 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
9686 FinalType, F.getCallingConv(), F.isVarArg());
9687 for (unsigned Value = 0, NumValues = ValueVTs.size();
9688 Value != NumValues; ++Value) {
9689 EVT VT = ValueVTs[Value];
9690 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
9691 ISD::ArgFlagsTy Flags;
9692
9693 // Certain targets (such as MIPS), may have a different ABI alignment
9694 // for a type depending on the context. Give the target a chance to
9695 // specify the alignment it wants.
9696 const Align OriginalAlignment(
9697 TLI->getABIAlignmentForCallingConv(ArgTy, DL));
9698
9699 if (Arg.getType()->isPointerTy()) {
9700 Flags.setPointer();
9701 Flags.setPointerAddrSpace(
9702 cast<PointerType>(Arg.getType())->getAddressSpace());
9703 }
9704 if (Arg.hasAttribute(Attribute::ZExt))
9705 Flags.setZExt();
9706 if (Arg.hasAttribute(Attribute::SExt))
9707 Flags.setSExt();
9708 if (Arg.hasAttribute(Attribute::InReg)) {
9709 // If we are using vectorcall calling convention, a structure that is
9710 // passed InReg - is surely an HVA
9711 if (F.getCallingConv() == CallingConv::X86_VectorCall &&
9712 isa<StructType>(Arg.getType())) {
9713 // The first value of a structure is marked
9714 if (0 == Value)
9715 Flags.setHvaStart();
9716 Flags.setHva();
9717 }
9718 // Set InReg Flag
9719 Flags.setInReg();
9720 }
9721 if (Arg.hasAttribute(Attribute::StructRet))
9722 Flags.setSRet();
9723 if (Arg.hasAttribute(Attribute::SwiftSelf))
9724 Flags.setSwiftSelf();
9725 if (Arg.hasAttribute(Attribute::SwiftError))
9726 Flags.setSwiftError();
9727 if (Arg.hasAttribute(Attribute::ByVal))
9728 Flags.setByVal();
9729 if (Arg.hasAttribute(Attribute::InAlloca)) {
9730 Flags.setInAlloca();
9731 // Set the byval flag for CCAssignFn callbacks that don't know about
9732 // inalloca. This way we can know how many bytes we should've allocated
9733 // and how many bytes a callee cleanup function will pop. If we port
9734 // inalloca to more targets, we'll have to add custom inalloca handling
9735 // in the various CC lowering callbacks.
9736 Flags.setByVal();
9737 }
9738 if (Arg.hasAttribute(Attribute::Preallocated)) {
9739 Flags.setPreallocated();
9740 // Set the byval flag for CCAssignFn callbacks that don't know about
9741 // preallocated. This way we can know how many bytes we should've
9742 // allocated and how many bytes a callee cleanup function will pop. If
9743 // we port preallocated to more targets, we'll have to add custom
9744 // preallocated handling in the various CC lowering callbacks.
9745 Flags.setByVal();
9746 }
9747 if (F.getCallingConv() == CallingConv::X86_INTR) {
9748 // IA Interrupt passes frame (1st parameter) by value in the stack.
9749 if (ArgNo == 0)
9750 Flags.setByVal();
9751 }
9752 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
9753 Type *ElementTy = Arg.getParamByValType();
9754
9755 // For ByVal, size and alignment should be passed from FE. BE will
9756 // guess if this info is not there but there are cases it cannot get
9757 // right.
9758 unsigned FrameSize = DL.getTypeAllocSize(Arg.getParamByValType());
9759 Flags.setByValSize(FrameSize);
9760
9761 unsigned FrameAlign;
9762 if (Arg.getParamAlignment())
9763 FrameAlign = Arg.getParamAlignment();
9764 else
9765 FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
9766 Flags.setByValAlign(Align(FrameAlign));
9767 }
9768 if (Arg.hasAttribute(Attribute::Nest))
9769 Flags.setNest();
9770 if (NeedsRegBlock)
9771 Flags.setInConsecutiveRegs();
9772 Flags.setOrigAlign(OriginalAlignment);
9773 if (ArgCopyElisionCandidates.count(&Arg))
9774 Flags.setCopyElisionCandidate();
9775 if (Arg.hasAttribute(Attribute::Returned))
9776 Flags.setReturned();
9777
9778 MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9779 *CurDAG->getContext(), F.getCallingConv(), VT);
9780 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9781 *CurDAG->getContext(), F.getCallingConv(), VT);
9782 for (unsigned i = 0; i != NumRegs; ++i) {
9783 // For scalable vectors, use the minimum size; individual targets
9784 // are responsible for handling scalable vector arguments and
9785 // return values.
9786 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9787 ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinSize());
9788 if (NumRegs > 1 && i == 0)
9789 MyFlags.Flags.setSplit();
9790 // if it isn't first piece, alignment must be 1
9791 else if (i > 0) {
9792 MyFlags.Flags.setOrigAlign(Align(1));
9793 if (i == NumRegs - 1)
9794 MyFlags.Flags.setSplitEnd();
9795 }
9796 Ins.push_back(MyFlags);
9797 }
9798 if (NeedsRegBlock && Value == NumValues - 1)
9799 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
9800 PartBase += VT.getStoreSize().getKnownMinSize();
9801 }
9802 }
9803
9804 // Call the target to set up the argument values.
9805 SmallVector<SDValue, 8> InVals;
9806 SDValue NewRoot = TLI->LowerFormalArguments(
9807 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
9808
9809 // Verify that the target's LowerFormalArguments behaved as expected.
9810 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
9811 "LowerFormalArguments didn't return a valid chain!");
9812 assert(InVals.size() == Ins.size() &&
9813 "LowerFormalArguments didn't emit the correct number of values!");
9814 LLVM_DEBUG({
9815 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
9816 assert(InVals[i].getNode() &&
9817 "LowerFormalArguments emitted a null value!");
9818 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
9819 "LowerFormalArguments emitted a value with the wrong type!");
9820 }
9821 });
9822
9823 // Update the DAG with the new chain value resulting from argument lowering.
9824 DAG.setRoot(NewRoot);
9825
9826 // Set up the argument values.
9827 unsigned i = 0;
9828 if (!FuncInfo->CanLowerReturn) {
9829 // Create a virtual register for the sret pointer, and put in a copy
9830 // from the sret argument into it.
9831 SmallVector<EVT, 1> ValueVTs;
9832 ComputeValueVTs(*TLI, DAG.getDataLayout(),
9833 F.getReturnType()->getPointerTo(
9834 DAG.getDataLayout().getAllocaAddrSpace()),
9835 ValueVTs);
9836 MVT VT = ValueVTs[0].getSimpleVT();
9837 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9838 Optional<ISD::NodeType> AssertOp = None;
9839 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
9840 nullptr, F.getCallingConv(), AssertOp);
9841
9842 MachineFunction& MF = SDB->DAG.getMachineFunction();
9843 MachineRegisterInfo& RegInfo = MF.getRegInfo();
9844 Register SRetReg =
9845 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
9846 FuncInfo->DemoteRegister = SRetReg;
9847 NewRoot =
9848 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
9849 DAG.setRoot(NewRoot);
9850
9851 // i indexes lowered arguments. Bump it past the hidden sret argument.
9852 ++i;
9853 }
9854
9855 SmallVector<SDValue, 4> Chains;
9856 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
9857 for (const Argument &Arg : F.args()) {
9858 SmallVector<SDValue, 4> ArgValues;
9859 SmallVector<EVT, 4> ValueVTs;
9860 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9861 unsigned NumValues = ValueVTs.size();
9862 if (NumValues == 0)
9863 continue;
9864
9865 bool ArgHasUses = !Arg.use_empty();
9866
9867 // Elide the copying store if the target loaded this argument from a
9868 // suitable fixed stack object.
9869 if (Ins[i].Flags.isCopyElisionCandidate()) {
9870 tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
9871 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
9872 InVals[i], ArgHasUses);
9873 }
9874
9875 // If this argument is unused then remember its value. It is used to generate
9876 // debugging information.
9877 bool isSwiftErrorArg =
9878 TLI->supportSwiftError() &&
9879 Arg.hasAttribute(Attribute::SwiftError);
9880 if (!ArgHasUses && !isSwiftErrorArg) {
9881 SDB->setUnusedArgValue(&Arg, InVals[i]);
9882
9883 // Also remember any frame index for use in FastISel.
9884 if (FrameIndexSDNode *FI =
9885 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
9886 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9887 }
9888
9889 for (unsigned Val = 0; Val != NumValues; ++Val) {
9890 EVT VT = ValueVTs[Val];
9891 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
9892 F.getCallingConv(), VT);
9893 unsigned NumParts = TLI->getNumRegistersForCallingConv(
9894 *CurDAG->getContext(), F.getCallingConv(), VT);
9895
9896 // Even an apparent 'unused' swifterror argument needs to be returned. So
9897 // we do generate a copy for it that can be used on return from the
9898 // function.
9899 if (ArgHasUses || isSwiftErrorArg) {
9900 Optional<ISD::NodeType> AssertOp;
9901 if (Arg.hasAttribute(Attribute::SExt))
9902 AssertOp = ISD::AssertSext;
9903 else if (Arg.hasAttribute(Attribute::ZExt))
9904 AssertOp = ISD::AssertZext;
9905
9906 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
9907 PartVT, VT, nullptr,
9908 F.getCallingConv(), AssertOp));
9909 }
9910
9911 i += NumParts;
9912 }
9913
9914 // We don't need to do anything else for unused arguments.
9915 if (ArgValues.empty())
9916 continue;
9917
9918 // Note down frame index.
9919 if (FrameIndexSDNode *FI =
9920 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
9921 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9922
9923 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
9924 SDB->getCurSDLoc());
9925
9926 SDB->setValue(&Arg, Res);
9927 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
9928 // We want to associate the argument with the frame index, among
9929 // involved operands, that correspond to the lowest address. The
9930 // getCopyFromParts function, called earlier, is swapping the order of
9931 // the operands to BUILD_PAIR depending on endianness. The result of
9932 // that swapping is that the least significant bits of the argument will
9933 // be in the first operand of the BUILD_PAIR node, and the most
9934 // significant bits will be in the second operand.
9935 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9936 if (LoadSDNode *LNode =
9937 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
9938 if (FrameIndexSDNode *FI =
9939 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
9940 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9941 }
9942
9943 // Analyses past this point are naive and don't expect an assertion.
9944 if (Res.getOpcode() == ISD::AssertZext)
9945 Res = Res.getOperand(0);
9946
9947 // Update the SwiftErrorVRegDefMap.
9948 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
9949 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9950 if (Register::isVirtualRegister(Reg))
9951 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
9952 Reg);
9953 }
9954
9955 // If this argument is live outside of the entry block, insert a copy from
9956 // wherever we got it to the vreg that other BB's will reference it as.
9957 if (Res.getOpcode() == ISD::CopyFromReg) {
9958 // If we can, though, try to skip creating an unnecessary vreg.
9959 // FIXME: This isn't very clean... it would be nice to make this more
9960 // general.
9961 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9962 if (Register::isVirtualRegister(Reg)) {
9963 FuncInfo->ValueMap[&Arg] = Reg;
9964 continue;
9965 }
9966 }
9967 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
9968 FuncInfo->InitializeRegForValue(&Arg);
9969 SDB->CopyToExportRegsIfNeeded(&Arg);
9970 }
9971 }
9972
9973 if (!Chains.empty()) {
9974 Chains.push_back(NewRoot);
9975 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
9976 }
9977
9978 DAG.setRoot(NewRoot);
9979
9980 assert(i == InVals.size() && "Argument register count mismatch!");
9981
9982 // If any argument copy elisions occurred and we have debug info, update the
9983 // stale frame indices used in the dbg.declare variable info table.
9984 MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
9985 if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
9986 for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
9987 auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
9988 if (I != ArgCopyElisionFrameIndexMap.end())
9989 VI.Slot = I->second;
9990 }
9991 }
9992
9993 // Finally, if the target has anything special to do, allow it to do so.
9994 emitFunctionEntryCode();
9995 }
9996
9997 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
9998 /// ensure constants are generated when needed. Remember the virtual registers
9999 /// that need to be added to the Machine PHI nodes as input. We cannot just
10000 /// directly add them, because expansion might result in multiple MBB's for one
10001 /// BB. As such, the start of the BB might correspond to a different MBB than
10002 /// the end.
10003 void
HandlePHINodesInSuccessorBlocks(const BasicBlock * LLVMBB)10004 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
10005 const Instruction *TI = LLVMBB->getTerminator();
10006
10007 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
10008
10009 // Check PHI nodes in successors that expect a value to be available from this
10010 // block.
10011 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
10012 const BasicBlock *SuccBB = TI->getSuccessor(succ);
10013 if (!isa<PHINode>(SuccBB->begin())) continue;
10014 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
10015
10016 // If this terminator has multiple identical successors (common for
10017 // switches), only handle each succ once.
10018 if (!SuccsHandled.insert(SuccMBB).second)
10019 continue;
10020
10021 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
10022
10023 // At this point we know that there is a 1-1 correspondence between LLVM PHI
10024 // nodes and Machine PHI nodes, but the incoming operands have not been
10025 // emitted yet.
10026 for (const PHINode &PN : SuccBB->phis()) {
10027 // Ignore dead phi's.
10028 if (PN.use_empty())
10029 continue;
10030
10031 // Skip empty types
10032 if (PN.getType()->isEmptyTy())
10033 continue;
10034
10035 unsigned Reg;
10036 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
10037
10038 if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
10039 unsigned &RegOut = ConstantsOut[C];
10040 if (RegOut == 0) {
10041 RegOut = FuncInfo.CreateRegs(C);
10042 CopyValueToVirtualRegister(C, RegOut);
10043 }
10044 Reg = RegOut;
10045 } else {
10046 DenseMap<const Value *, Register>::iterator I =
10047 FuncInfo.ValueMap.find(PHIOp);
10048 if (I != FuncInfo.ValueMap.end())
10049 Reg = I->second;
10050 else {
10051 assert(isa<AllocaInst>(PHIOp) &&
10052 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
10053 "Didn't codegen value into a register!??");
10054 Reg = FuncInfo.CreateRegs(PHIOp);
10055 CopyValueToVirtualRegister(PHIOp, Reg);
10056 }
10057 }
10058
10059 // Remember that this register needs to added to the machine PHI node as
10060 // the input for this MBB.
10061 SmallVector<EVT, 4> ValueVTs;
10062 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10063 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
10064 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
10065 EVT VT = ValueVTs[vti];
10066 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
10067 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
10068 FuncInfo.PHINodesToUpdate.push_back(
10069 std::make_pair(&*MBBI++, Reg + i));
10070 Reg += NumRegisters;
10071 }
10072 }
10073 }
10074
10075 ConstantsOut.clear();
10076 }
10077
10078 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
10079 /// is 0.
10080 MachineBasicBlock *
10081 SelectionDAGBuilder::StackProtectorDescriptor::
AddSuccessorMBB(const BasicBlock * BB,MachineBasicBlock * ParentMBB,bool IsLikely,MachineBasicBlock * SuccMBB)10082 AddSuccessorMBB(const BasicBlock *BB,
10083 MachineBasicBlock *ParentMBB,
10084 bool IsLikely,
10085 MachineBasicBlock *SuccMBB) {
10086 // If SuccBB has not been created yet, create it.
10087 if (!SuccMBB) {
10088 MachineFunction *MF = ParentMBB->getParent();
10089 MachineFunction::iterator BBI(ParentMBB);
10090 SuccMBB = MF->CreateMachineBasicBlock(BB);
10091 MF->insert(++BBI, SuccMBB);
10092 }
10093 // Add it as a successor of ParentMBB.
10094 ParentMBB->addSuccessor(
10095 SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
10096 return SuccMBB;
10097 }
10098
NextBlock(MachineBasicBlock * MBB)10099 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
10100 MachineFunction::iterator I(MBB);
10101 if (++I == FuncInfo.MF->end())
10102 return nullptr;
10103 return &*I;
10104 }
10105
10106 /// During lowering new call nodes can be created (such as memset, etc.).
10107 /// Those will become new roots of the current DAG, but complications arise
10108 /// when they are tail calls. In such cases, the call lowering will update
10109 /// the root, but the builder still needs to know that a tail call has been
10110 /// lowered in order to avoid generating an additional return.
updateDAGForMaybeTailCall(SDValue MaybeTC)10111 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
10112 // If the node is null, we do have a tail call.
10113 if (MaybeTC.getNode() != nullptr)
10114 DAG.setRoot(MaybeTC);
10115 else
10116 HasTailCall = true;
10117 }
10118
lowerWorkItem(SwitchWorkListItem W,Value * Cond,MachineBasicBlock * SwitchMBB,MachineBasicBlock * DefaultMBB)10119 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
10120 MachineBasicBlock *SwitchMBB,
10121 MachineBasicBlock *DefaultMBB) {
10122 MachineFunction *CurMF = FuncInfo.MF;
10123 MachineBasicBlock *NextMBB = nullptr;
10124 MachineFunction::iterator BBI(W.MBB);
10125 if (++BBI != FuncInfo.MF->end())
10126 NextMBB = &*BBI;
10127
10128 unsigned Size = W.LastCluster - W.FirstCluster + 1;
10129
10130 BranchProbabilityInfo *BPI = FuncInfo.BPI;
10131
10132 if (Size == 2 && W.MBB == SwitchMBB) {
10133 // If any two of the cases has the same destination, and if one value
10134 // is the same as the other, but has one bit unset that the other has set,
10135 // use bit manipulation to do two compares at once. For example:
10136 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
10137 // TODO: This could be extended to merge any 2 cases in switches with 3
10138 // cases.
10139 // TODO: Handle cases where W.CaseBB != SwitchBB.
10140 CaseCluster &Small = *W.FirstCluster;
10141 CaseCluster &Big = *W.LastCluster;
10142
10143 if (Small.Low == Small.High && Big.Low == Big.High &&
10144 Small.MBB == Big.MBB) {
10145 const APInt &SmallValue = Small.Low->getValue();
10146 const APInt &BigValue = Big.Low->getValue();
10147
10148 // Check that there is only one bit different.
10149 APInt CommonBit = BigValue ^ SmallValue;
10150 if (CommonBit.isPowerOf2()) {
10151 SDValue CondLHS = getValue(Cond);
10152 EVT VT = CondLHS.getValueType();
10153 SDLoc DL = getCurSDLoc();
10154
10155 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
10156 DAG.getConstant(CommonBit, DL, VT));
10157 SDValue Cond = DAG.getSetCC(
10158 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
10159 ISD::SETEQ);
10160
10161 // Update successor info.
10162 // Both Small and Big will jump to Small.BB, so we sum up the
10163 // probabilities.
10164 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
10165 if (BPI)
10166 addSuccessorWithProb(
10167 SwitchMBB, DefaultMBB,
10168 // The default destination is the first successor in IR.
10169 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
10170 else
10171 addSuccessorWithProb(SwitchMBB, DefaultMBB);
10172
10173 // Insert the true branch.
10174 SDValue BrCond =
10175 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
10176 DAG.getBasicBlock(Small.MBB));
10177 // Insert the false branch.
10178 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
10179 DAG.getBasicBlock(DefaultMBB));
10180
10181 DAG.setRoot(BrCond);
10182 return;
10183 }
10184 }
10185 }
10186
10187 if (TM.getOptLevel() != CodeGenOpt::None) {
10188 // Here, we order cases by probability so the most likely case will be
10189 // checked first. However, two clusters can have the same probability in
10190 // which case their relative ordering is non-deterministic. So we use Low
10191 // as a tie-breaker as clusters are guaranteed to never overlap.
10192 llvm::sort(W.FirstCluster, W.LastCluster + 1,
10193 [](const CaseCluster &a, const CaseCluster &b) {
10194 return a.Prob != b.Prob ?
10195 a.Prob > b.Prob :
10196 a.Low->getValue().slt(b.Low->getValue());
10197 });
10198
10199 // Rearrange the case blocks so that the last one falls through if possible
10200 // without changing the order of probabilities.
10201 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
10202 --I;
10203 if (I->Prob > W.LastCluster->Prob)
10204 break;
10205 if (I->Kind == CC_Range && I->MBB == NextMBB) {
10206 std::swap(*I, *W.LastCluster);
10207 break;
10208 }
10209 }
10210 }
10211
10212 // Compute total probability.
10213 BranchProbability DefaultProb = W.DefaultProb;
10214 BranchProbability UnhandledProbs = DefaultProb;
10215 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
10216 UnhandledProbs += I->Prob;
10217
10218 MachineBasicBlock *CurMBB = W.MBB;
10219 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
10220 bool FallthroughUnreachable = false;
10221 MachineBasicBlock *Fallthrough;
10222 if (I == W.LastCluster) {
10223 // For the last cluster, fall through to the default destination.
10224 Fallthrough = DefaultMBB;
10225 FallthroughUnreachable = isa<UnreachableInst>(
10226 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
10227 } else {
10228 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
10229 CurMF->insert(BBI, Fallthrough);
10230 // Put Cond in a virtual register to make it available from the new blocks.
10231 ExportFromCurrentBlock(Cond);
10232 }
10233 UnhandledProbs -= I->Prob;
10234
10235 switch (I->Kind) {
10236 case CC_JumpTable: {
10237 // FIXME: Optimize away range check based on pivot comparisons.
10238 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
10239 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
10240
10241 // The jump block hasn't been inserted yet; insert it here.
10242 MachineBasicBlock *JumpMBB = JT->MBB;
10243 CurMF->insert(BBI, JumpMBB);
10244
10245 auto JumpProb = I->Prob;
10246 auto FallthroughProb = UnhandledProbs;
10247
10248 // If the default statement is a target of the jump table, we evenly
10249 // distribute the default probability to successors of CurMBB. Also
10250 // update the probability on the edge from JumpMBB to Fallthrough.
10251 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
10252 SE = JumpMBB->succ_end();
10253 SI != SE; ++SI) {
10254 if (*SI == DefaultMBB) {
10255 JumpProb += DefaultProb / 2;
10256 FallthroughProb -= DefaultProb / 2;
10257 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
10258 JumpMBB->normalizeSuccProbs();
10259 break;
10260 }
10261 }
10262
10263 if (FallthroughUnreachable) {
10264 // Skip the range check if the fallthrough block is unreachable.
10265 JTH->OmitRangeCheck = true;
10266 }
10267
10268 if (!JTH->OmitRangeCheck)
10269 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
10270 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
10271 CurMBB->normalizeSuccProbs();
10272
10273 // The jump table header will be inserted in our current block, do the
10274 // range check, and fall through to our fallthrough block.
10275 JTH->HeaderBB = CurMBB;
10276 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
10277
10278 // If we're in the right place, emit the jump table header right now.
10279 if (CurMBB == SwitchMBB) {
10280 visitJumpTableHeader(*JT, *JTH, SwitchMBB);
10281 JTH->Emitted = true;
10282 }
10283 break;
10284 }
10285 case CC_BitTests: {
10286 // FIXME: Optimize away range check based on pivot comparisons.
10287 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
10288
10289 // The bit test blocks haven't been inserted yet; insert them here.
10290 for (BitTestCase &BTC : BTB->Cases)
10291 CurMF->insert(BBI, BTC.ThisBB);
10292
10293 // Fill in fields of the BitTestBlock.
10294 BTB->Parent = CurMBB;
10295 BTB->Default = Fallthrough;
10296
10297 BTB->DefaultProb = UnhandledProbs;
10298 // If the cases in bit test don't form a contiguous range, we evenly
10299 // distribute the probability on the edge to Fallthrough to two
10300 // successors of CurMBB.
10301 if (!BTB->ContiguousRange) {
10302 BTB->Prob += DefaultProb / 2;
10303 BTB->DefaultProb -= DefaultProb / 2;
10304 }
10305
10306 if (FallthroughUnreachable) {
10307 // Skip the range check if the fallthrough block is unreachable.
10308 BTB->OmitRangeCheck = true;
10309 }
10310
10311 // If we're in the right place, emit the bit test header right now.
10312 if (CurMBB == SwitchMBB) {
10313 visitBitTestHeader(*BTB, SwitchMBB);
10314 BTB->Emitted = true;
10315 }
10316 break;
10317 }
10318 case CC_Range: {
10319 const Value *RHS, *LHS, *MHS;
10320 ISD::CondCode CC;
10321 if (I->Low == I->High) {
10322 // Check Cond == I->Low.
10323 CC = ISD::SETEQ;
10324 LHS = Cond;
10325 RHS=I->Low;
10326 MHS = nullptr;
10327 } else {
10328 // Check I->Low <= Cond <= I->High.
10329 CC = ISD::SETLE;
10330 LHS = I->Low;
10331 MHS = Cond;
10332 RHS = I->High;
10333 }
10334
10335 // If Fallthrough is unreachable, fold away the comparison.
10336 if (FallthroughUnreachable)
10337 CC = ISD::SETTRUE;
10338
10339 // The false probability is the sum of all unhandled cases.
10340 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
10341 getCurSDLoc(), I->Prob, UnhandledProbs);
10342
10343 if (CurMBB == SwitchMBB)
10344 visitSwitchCase(CB, SwitchMBB);
10345 else
10346 SL->SwitchCases.push_back(CB);
10347
10348 break;
10349 }
10350 }
10351 CurMBB = Fallthrough;
10352 }
10353 }
10354
caseClusterRank(const CaseCluster & CC,CaseClusterIt First,CaseClusterIt Last)10355 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10356 CaseClusterIt First,
10357 CaseClusterIt Last) {
10358 return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10359 if (X.Prob != CC.Prob)
10360 return X.Prob > CC.Prob;
10361
10362 // Ties are broken by comparing the case value.
10363 return X.Low->getValue().slt(CC.Low->getValue());
10364 });
10365 }
10366
splitWorkItem(SwitchWorkList & WorkList,const SwitchWorkListItem & W,Value * Cond,MachineBasicBlock * SwitchMBB)10367 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10368 const SwitchWorkListItem &W,
10369 Value *Cond,
10370 MachineBasicBlock *SwitchMBB) {
10371 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10372 "Clusters not sorted?");
10373
10374 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
10375
10376 // Balance the tree based on branch probabilities to create a near-optimal (in
10377 // terms of search time given key frequency) binary search tree. See e.g. Kurt
10378 // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10379 CaseClusterIt LastLeft = W.FirstCluster;
10380 CaseClusterIt FirstRight = W.LastCluster;
10381 auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10382 auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10383
10384 // Move LastLeft and FirstRight towards each other from opposite directions to
10385 // find a partitioning of the clusters which balances the probability on both
10386 // sides. If LeftProb and RightProb are equal, alternate which side is
10387 // taken to ensure 0-probability nodes are distributed evenly.
10388 unsigned I = 0;
10389 while (LastLeft + 1 < FirstRight) {
10390 if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10391 LeftProb += (++LastLeft)->Prob;
10392 else
10393 RightProb += (--FirstRight)->Prob;
10394 I++;
10395 }
10396
10397 while (true) {
10398 // Our binary search tree differs from a typical BST in that ours can have up
10399 // to three values in each leaf. The pivot selection above doesn't take that
10400 // into account, which means the tree might require more nodes and be less
10401 // efficient. We compensate for this here.
10402
10403 unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10404 unsigned NumRight = W.LastCluster - FirstRight + 1;
10405
10406 if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10407 // If one side has less than 3 clusters, and the other has more than 3,
10408 // consider taking a cluster from the other side.
10409
10410 if (NumLeft < NumRight) {
10411 // Consider moving the first cluster on the right to the left side.
10412 CaseCluster &CC = *FirstRight;
10413 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10414 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10415 if (LeftSideRank <= RightSideRank) {
10416 // Moving the cluster to the left does not demote it.
10417 ++LastLeft;
10418 ++FirstRight;
10419 continue;
10420 }
10421 } else {
10422 assert(NumRight < NumLeft);
10423 // Consider moving the last element on the left to the right side.
10424 CaseCluster &CC = *LastLeft;
10425 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10426 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10427 if (RightSideRank <= LeftSideRank) {
10428 // Moving the cluster to the right does not demot it.
10429 --LastLeft;
10430 --FirstRight;
10431 continue;
10432 }
10433 }
10434 }
10435 break;
10436 }
10437
10438 assert(LastLeft + 1 == FirstRight);
10439 assert(LastLeft >= W.FirstCluster);
10440 assert(FirstRight <= W.LastCluster);
10441
10442 // Use the first element on the right as pivot since we will make less-than
10443 // comparisons against it.
10444 CaseClusterIt PivotCluster = FirstRight;
10445 assert(PivotCluster > W.FirstCluster);
10446 assert(PivotCluster <= W.LastCluster);
10447
10448 CaseClusterIt FirstLeft = W.FirstCluster;
10449 CaseClusterIt LastRight = W.LastCluster;
10450
10451 const ConstantInt *Pivot = PivotCluster->Low;
10452
10453 // New blocks will be inserted immediately after the current one.
10454 MachineFunction::iterator BBI(W.MBB);
10455 ++BBI;
10456
10457 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10458 // we can branch to its destination directly if it's squeezed exactly in
10459 // between the known lower bound and Pivot - 1.
10460 MachineBasicBlock *LeftMBB;
10461 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10462 FirstLeft->Low == W.GE &&
10463 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
10464 LeftMBB = FirstLeft->MBB;
10465 } else {
10466 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10467 FuncInfo.MF->insert(BBI, LeftMBB);
10468 WorkList.push_back(
10469 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10470 // Put Cond in a virtual register to make it available from the new blocks.
10471 ExportFromCurrentBlock(Cond);
10472 }
10473
10474 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10475 // single cluster, RHS.Low == Pivot, and we can branch to its destination
10476 // directly if RHS.High equals the current upper bound.
10477 MachineBasicBlock *RightMBB;
10478 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10479 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10480 RightMBB = FirstRight->MBB;
10481 } else {
10482 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10483 FuncInfo.MF->insert(BBI, RightMBB);
10484 WorkList.push_back(
10485 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10486 // Put Cond in a virtual register to make it available from the new blocks.
10487 ExportFromCurrentBlock(Cond);
10488 }
10489
10490 // Create the CaseBlock record that will be used to lower the branch.
10491 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
10492 getCurSDLoc(), LeftProb, RightProb);
10493
10494 if (W.MBB == SwitchMBB)
10495 visitSwitchCase(CB, SwitchMBB);
10496 else
10497 SL->SwitchCases.push_back(CB);
10498 }
10499
10500 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10501 // from the swith statement.
scaleCaseProbality(BranchProbability CaseProb,BranchProbability PeeledCaseProb)10502 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
10503 BranchProbability PeeledCaseProb) {
10504 if (PeeledCaseProb == BranchProbability::getOne())
10505 return BranchProbability::getZero();
10506 BranchProbability SwitchProb = PeeledCaseProb.getCompl();
10507
10508 uint32_t Numerator = CaseProb.getNumerator();
10509 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
10510 return BranchProbability(Numerator, std::max(Numerator, Denominator));
10511 }
10512
10513 // Try to peel the top probability case if it exceeds the threshold.
10514 // Return current MachineBasicBlock for the switch statement if the peeling
10515 // does not occur.
10516 // If the peeling is performed, return the newly created MachineBasicBlock
10517 // for the peeled switch statement. Also update Clusters to remove the peeled
10518 // case. PeeledCaseProb is the BranchProbability for the peeled case.
peelDominantCaseCluster(const SwitchInst & SI,CaseClusterVector & Clusters,BranchProbability & PeeledCaseProb)10519 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
10520 const SwitchInst &SI, CaseClusterVector &Clusters,
10521 BranchProbability &PeeledCaseProb) {
10522 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10523 // Don't perform if there is only one cluster or optimizing for size.
10524 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
10525 TM.getOptLevel() == CodeGenOpt::None ||
10526 SwitchMBB->getParent()->getFunction().hasMinSize())
10527 return SwitchMBB;
10528
10529 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
10530 unsigned PeeledCaseIndex = 0;
10531 bool SwitchPeeled = false;
10532 for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
10533 CaseCluster &CC = Clusters[Index];
10534 if (CC.Prob < TopCaseProb)
10535 continue;
10536 TopCaseProb = CC.Prob;
10537 PeeledCaseIndex = Index;
10538 SwitchPeeled = true;
10539 }
10540 if (!SwitchPeeled)
10541 return SwitchMBB;
10542
10543 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10544 << TopCaseProb << "\n");
10545
10546 // Record the MBB for the peeled switch statement.
10547 MachineFunction::iterator BBI(SwitchMBB);
10548 ++BBI;
10549 MachineBasicBlock *PeeledSwitchMBB =
10550 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
10551 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
10552
10553 ExportFromCurrentBlock(SI.getCondition());
10554 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10555 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10556 nullptr, nullptr, TopCaseProb.getCompl()};
10557 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
10558
10559 Clusters.erase(PeeledCaseIt);
10560 for (CaseCluster &CC : Clusters) {
10561 LLVM_DEBUG(
10562 dbgs() << "Scale the probablity for one cluster, before scaling: "
10563 << CC.Prob << "\n");
10564 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
10565 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
10566 }
10567 PeeledCaseProb = TopCaseProb;
10568 return PeeledSwitchMBB;
10569 }
10570
visitSwitch(const SwitchInst & SI)10571 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
10572 // Extract cases from the switch.
10573 BranchProbabilityInfo *BPI = FuncInfo.BPI;
10574 CaseClusterVector Clusters;
10575 Clusters.reserve(SI.getNumCases());
10576 for (auto I : SI.cases()) {
10577 MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
10578 const ConstantInt *CaseVal = I.getCaseValue();
10579 BranchProbability Prob =
10580 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
10581 : BranchProbability(1, SI.getNumCases() + 1);
10582 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10583 }
10584
10585 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
10586
10587 // Cluster adjacent cases with the same destination. We do this at all
10588 // optimization levels because it's cheap to do and will make codegen faster
10589 // if there are many clusters.
10590 sortAndRangeify(Clusters);
10591
10592 // The branch probablity of the peeled case.
10593 BranchProbability PeeledCaseProb = BranchProbability::getZero();
10594 MachineBasicBlock *PeeledSwitchMBB =
10595 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10596
10597 // If there is only the default destination, jump there directly.
10598 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10599 if (Clusters.empty()) {
10600 assert(PeeledSwitchMBB == SwitchMBB);
10601 SwitchMBB->addSuccessor(DefaultMBB);
10602 if (DefaultMBB != NextBlock(SwitchMBB)) {
10603 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
10604 getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10605 }
10606 return;
10607 }
10608
10609 SL->findJumpTables(Clusters, &SI, DefaultMBB, DAG.getPSI(), DAG.getBFI());
10610 SL->findBitTestClusters(Clusters, &SI);
10611
10612 LLVM_DEBUG({
10613 dbgs() << "Case clusters: ";
10614 for (const CaseCluster &C : Clusters) {
10615 if (C.Kind == CC_JumpTable)
10616 dbgs() << "JT:";
10617 if (C.Kind == CC_BitTests)
10618 dbgs() << "BT:";
10619
10620 C.Low->getValue().print(dbgs(), true);
10621 if (C.Low != C.High) {
10622 dbgs() << '-';
10623 C.High->getValue().print(dbgs(), true);
10624 }
10625 dbgs() << ' ';
10626 }
10627 dbgs() << '\n';
10628 });
10629
10630 assert(!Clusters.empty());
10631 SwitchWorkList WorkList;
10632 CaseClusterIt First = Clusters.begin();
10633 CaseClusterIt Last = Clusters.end() - 1;
10634 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10635 // Scale the branchprobability for DefaultMBB if the peel occurs and
10636 // DefaultMBB is not replaced.
10637 if (PeeledCaseProb != BranchProbability::getZero() &&
10638 DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
10639 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
10640 WorkList.push_back(
10641 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
10642
10643 while (!WorkList.empty()) {
10644 SwitchWorkListItem W = WorkList.back();
10645 WorkList.pop_back();
10646 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10647
10648 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10649 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
10650 // For optimized builds, lower large range as a balanced binary tree.
10651 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
10652 continue;
10653 }
10654
10655 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
10656 }
10657 }
10658
visitFreeze(const FreezeInst & I)10659 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
10660 SmallVector<EVT, 4> ValueVTs;
10661 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
10662 ValueVTs);
10663 unsigned NumValues = ValueVTs.size();
10664 if (NumValues == 0) return;
10665
10666 SmallVector<SDValue, 4> Values(NumValues);
10667 SDValue Op = getValue(I.getOperand(0));
10668
10669 for (unsigned i = 0; i != NumValues; ++i)
10670 Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
10671 SDValue(Op.getNode(), Op.getResNo() + i));
10672
10673 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
10674 DAG.getVTList(ValueVTs), Values));
10675 }
10676