1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAG class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/FoldingSet.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/Analysis/BlockFrequencyInfo.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/CodeGen/FunctionLoweringInfo.h"
32 #include "llvm/CodeGen/ISDOpcodes.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineConstantPool.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/RuntimeLibcalls.h"
39 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
40 #include "llvm/CodeGen/SelectionDAGNodes.h"
41 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
42 #include "llvm/CodeGen/TargetFrameLowering.h"
43 #include "llvm/CodeGen/TargetLowering.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSubtargetInfo.h"
46 #include "llvm/CodeGen/ValueTypes.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DataLayout.h"
50 #include "llvm/IR/DebugInfoMetadata.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/GlobalValue.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/KnownBits.h"
64 #include "llvm/Support/MachineValueType.h"
65 #include "llvm/Support/ManagedStatic.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/Mutex.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Target/TargetMachine.h"
70 #include "llvm/Target/TargetOptions.h"
71 #include "llvm/Transforms/Utils/SizeOpts.h"
72 #include <algorithm>
73 #include <cassert>
74 #include <cstdint>
75 #include <cstdlib>
76 #include <limits>
77 #include <set>
78 #include <string>
79 #include <utility>
80 #include <vector>
81
82 using namespace llvm;
83
84 /// makeVTList - Return an instance of the SDVTList struct initialized with the
85 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)86 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
87 SDVTList Res = {VTs, NumVTs};
88 return Res;
89 }
90
91 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)92 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)93 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
NodeInserted(SDNode *)94 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
95
anchor()96 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
97
98 #define DEBUG_TYPE "selectiondag"
99
100 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
101 cl::Hidden, cl::init(true),
102 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
103
104 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
105 cl::desc("Number limit for gluing ld/st of memcpy."),
106 cl::Hidden, cl::init(0));
107
NewSDValueDbgMsg(SDValue V,StringRef Msg,SelectionDAG * G)108 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
109 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
110 }
111
112 //===----------------------------------------------------------------------===//
113 // ConstantFPSDNode Class
114 //===----------------------------------------------------------------------===//
115
116 /// isExactlyValue - We don't rely on operator== working on double values, as
117 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
118 /// As such, this method can be used to do an exact bit-for-bit comparison of
119 /// two floating point values.
isExactlyValue(const APFloat & V) const120 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
121 return getValueAPF().bitwiseIsEqual(V);
122 }
123
isValueValidForType(EVT VT,const APFloat & Val)124 bool ConstantFPSDNode::isValueValidForType(EVT VT,
125 const APFloat& Val) {
126 assert(VT.isFloatingPoint() && "Can only convert between FP types");
127
128 // convert modifies in place, so make a copy.
129 APFloat Val2 = APFloat(Val);
130 bool losesInfo;
131 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
132 APFloat::rmNearestTiesToEven,
133 &losesInfo);
134 return !losesInfo;
135 }
136
137 //===----------------------------------------------------------------------===//
138 // ISD Namespace
139 //===----------------------------------------------------------------------===//
140
isConstantSplatVector(const SDNode * N,APInt & SplatVal)141 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
142 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
143 unsigned EltSize =
144 N->getValueType(0).getVectorElementType().getSizeInBits();
145 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
146 SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
147 return true;
148 }
149 }
150
151 auto *BV = dyn_cast<BuildVectorSDNode>(N);
152 if (!BV)
153 return false;
154
155 APInt SplatUndef;
156 unsigned SplatBitSize;
157 bool HasUndefs;
158 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
159 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
160 EltSize) &&
161 EltSize == SplatBitSize;
162 }
163
164 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
165 // specializations of the more general isConstantSplatVector()?
166
isConstantSplatVectorAllOnes(const SDNode * N,bool BuildVectorOnly)167 bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
168 // Look through a bit convert.
169 while (N->getOpcode() == ISD::BITCAST)
170 N = N->getOperand(0).getNode();
171
172 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
173 APInt SplatVal;
174 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue();
175 }
176
177 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
178
179 unsigned i = 0, e = N->getNumOperands();
180
181 // Skip over all of the undef values.
182 while (i != e && N->getOperand(i).isUndef())
183 ++i;
184
185 // Do not accept an all-undef vector.
186 if (i == e) return false;
187
188 // Do not accept build_vectors that aren't all constants or which have non-~0
189 // elements. We have to be a bit careful here, as the type of the constant
190 // may not be the same as the type of the vector elements due to type
191 // legalization (the elements are promoted to a legal type for the target and
192 // a vector of a type may be legal when the base element type is not).
193 // We only want to check enough bits to cover the vector elements, because
194 // we care if the resultant vector is all ones, not whether the individual
195 // constants are.
196 SDValue NotZero = N->getOperand(i);
197 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
198 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
199 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
200 return false;
201 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
202 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
203 return false;
204 } else
205 return false;
206
207 // Okay, we have at least one ~0 value, check to see if the rest match or are
208 // undefs. Even with the above element type twiddling, this should be OK, as
209 // the same type legalization should have applied to all the elements.
210 for (++i; i != e; ++i)
211 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
212 return false;
213 return true;
214 }
215
isConstantSplatVectorAllZeros(const SDNode * N,bool BuildVectorOnly)216 bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
217 // Look through a bit convert.
218 while (N->getOpcode() == ISD::BITCAST)
219 N = N->getOperand(0).getNode();
220
221 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
222 APInt SplatVal;
223 return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue();
224 }
225
226 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
227
228 bool IsAllUndef = true;
229 for (const SDValue &Op : N->op_values()) {
230 if (Op.isUndef())
231 continue;
232 IsAllUndef = false;
233 // Do not accept build_vectors that aren't all constants or which have non-0
234 // elements. We have to be a bit careful here, as the type of the constant
235 // may not be the same as the type of the vector elements due to type
236 // legalization (the elements are promoted to a legal type for the target
237 // and a vector of a type may be legal when the base element type is not).
238 // We only want to check enough bits to cover the vector elements, because
239 // we care if the resultant vector is all zeros, not whether the individual
240 // constants are.
241 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
242 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
243 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
244 return false;
245 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
246 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
247 return false;
248 } else
249 return false;
250 }
251
252 // Do not accept an all-undef vector.
253 if (IsAllUndef)
254 return false;
255 return true;
256 }
257
isBuildVectorAllOnes(const SDNode * N)258 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
259 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
260 }
261
isBuildVectorAllZeros(const SDNode * N)262 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
263 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
264 }
265
isBuildVectorOfConstantSDNodes(const SDNode * N)266 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
267 if (N->getOpcode() != ISD::BUILD_VECTOR)
268 return false;
269
270 for (const SDValue &Op : N->op_values()) {
271 if (Op.isUndef())
272 continue;
273 if (!isa<ConstantSDNode>(Op))
274 return false;
275 }
276 return true;
277 }
278
isBuildVectorOfConstantFPSDNodes(const SDNode * N)279 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
280 if (N->getOpcode() != ISD::BUILD_VECTOR)
281 return false;
282
283 for (const SDValue &Op : N->op_values()) {
284 if (Op.isUndef())
285 continue;
286 if (!isa<ConstantFPSDNode>(Op))
287 return false;
288 }
289 return true;
290 }
291
allOperandsUndef(const SDNode * N)292 bool ISD::allOperandsUndef(const SDNode *N) {
293 // Return false if the node has no operands.
294 // This is "logically inconsistent" with the definition of "all" but
295 // is probably the desired behavior.
296 if (N->getNumOperands() == 0)
297 return false;
298 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
299 }
300
matchUnaryPredicate(SDValue Op,std::function<bool (ConstantSDNode *)> Match,bool AllowUndefs)301 bool ISD::matchUnaryPredicate(SDValue Op,
302 std::function<bool(ConstantSDNode *)> Match,
303 bool AllowUndefs) {
304 // FIXME: Add support for scalar UNDEF cases?
305 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
306 return Match(Cst);
307
308 // FIXME: Add support for vector UNDEF cases?
309 if (ISD::BUILD_VECTOR != Op.getOpcode() &&
310 ISD::SPLAT_VECTOR != Op.getOpcode())
311 return false;
312
313 EVT SVT = Op.getValueType().getScalarType();
314 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
315 if (AllowUndefs && Op.getOperand(i).isUndef()) {
316 if (!Match(nullptr))
317 return false;
318 continue;
319 }
320
321 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
322 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
323 return false;
324 }
325 return true;
326 }
327
matchBinaryPredicate(SDValue LHS,SDValue RHS,std::function<bool (ConstantSDNode *,ConstantSDNode *)> Match,bool AllowUndefs,bool AllowTypeMismatch)328 bool ISD::matchBinaryPredicate(
329 SDValue LHS, SDValue RHS,
330 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
331 bool AllowUndefs, bool AllowTypeMismatch) {
332 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
333 return false;
334
335 // TODO: Add support for scalar UNDEF cases?
336 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
337 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
338 return Match(LHSCst, RHSCst);
339
340 // TODO: Add support for vector UNDEF cases?
341 if (ISD::BUILD_VECTOR != LHS.getOpcode() ||
342 ISD::BUILD_VECTOR != RHS.getOpcode())
343 return false;
344
345 EVT SVT = LHS.getValueType().getScalarType();
346 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
347 SDValue LHSOp = LHS.getOperand(i);
348 SDValue RHSOp = RHS.getOperand(i);
349 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
350 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
351 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
352 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
353 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
354 return false;
355 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
356 LHSOp.getValueType() != RHSOp.getValueType()))
357 return false;
358 if (!Match(LHSCst, RHSCst))
359 return false;
360 }
361 return true;
362 }
363
getVecReduceBaseOpcode(unsigned VecReduceOpcode)364 ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
365 switch (VecReduceOpcode) {
366 default:
367 llvm_unreachable("Expected VECREDUCE opcode");
368 case ISD::VECREDUCE_FADD:
369 case ISD::VECREDUCE_SEQ_FADD:
370 return ISD::FADD;
371 case ISD::VECREDUCE_FMUL:
372 case ISD::VECREDUCE_SEQ_FMUL:
373 return ISD::FMUL;
374 case ISD::VECREDUCE_ADD:
375 return ISD::ADD;
376 case ISD::VECREDUCE_MUL:
377 return ISD::MUL;
378 case ISD::VECREDUCE_AND:
379 return ISD::AND;
380 case ISD::VECREDUCE_OR:
381 return ISD::OR;
382 case ISD::VECREDUCE_XOR:
383 return ISD::XOR;
384 case ISD::VECREDUCE_SMAX:
385 return ISD::SMAX;
386 case ISD::VECREDUCE_SMIN:
387 return ISD::SMIN;
388 case ISD::VECREDUCE_UMAX:
389 return ISD::UMAX;
390 case ISD::VECREDUCE_UMIN:
391 return ISD::UMIN;
392 case ISD::VECREDUCE_FMAX:
393 return ISD::FMAXNUM;
394 case ISD::VECREDUCE_FMIN:
395 return ISD::FMINNUM;
396 }
397 }
398
isVPOpcode(unsigned Opcode)399 bool ISD::isVPOpcode(unsigned Opcode) {
400 switch (Opcode) {
401 default:
402 return false;
403 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
404 case ISD::SDOPC: \
405 return true;
406 #include "llvm/IR/VPIntrinsics.def"
407 }
408 }
409
410 /// The operand position of the vector mask.
getVPMaskIdx(unsigned Opcode)411 Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
412 switch (Opcode) {
413 default:
414 return None;
415 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...) \
416 case ISD::SDOPC: \
417 return MASKPOS;
418 #include "llvm/IR/VPIntrinsics.def"
419 }
420 }
421
422 /// The operand position of the explicit vector length parameter.
getVPExplicitVectorLengthIdx(unsigned Opcode)423 Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
424 switch (Opcode) {
425 default:
426 return None;
427 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
428 case ISD::SDOPC: \
429 return EVLPOS;
430 #include "llvm/IR/VPIntrinsics.def"
431 }
432 }
433
getExtForLoadExtType(bool IsFP,ISD::LoadExtType ExtType)434 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
435 switch (ExtType) {
436 case ISD::EXTLOAD:
437 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
438 case ISD::SEXTLOAD:
439 return ISD::SIGN_EXTEND;
440 case ISD::ZEXTLOAD:
441 return ISD::ZERO_EXTEND;
442 default:
443 break;
444 }
445
446 llvm_unreachable("Invalid LoadExtType");
447 }
448
getSetCCSwappedOperands(ISD::CondCode Operation)449 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
450 // To perform this operation, we just need to swap the L and G bits of the
451 // operation.
452 unsigned OldL = (Operation >> 2) & 1;
453 unsigned OldG = (Operation >> 1) & 1;
454 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
455 (OldL << 1) | // New G bit
456 (OldG << 2)); // New L bit.
457 }
458
getSetCCInverseImpl(ISD::CondCode Op,bool isIntegerLike)459 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
460 unsigned Operation = Op;
461 if (isIntegerLike)
462 Operation ^= 7; // Flip L, G, E bits, but not U.
463 else
464 Operation ^= 15; // Flip all of the condition bits.
465
466 if (Operation > ISD::SETTRUE2)
467 Operation &= ~8; // Don't let N and U bits get set.
468
469 return ISD::CondCode(Operation);
470 }
471
getSetCCInverse(ISD::CondCode Op,EVT Type)472 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
473 return getSetCCInverseImpl(Op, Type.isInteger());
474 }
475
getSetCCInverse(ISD::CondCode Op,bool isIntegerLike)476 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
477 bool isIntegerLike) {
478 return getSetCCInverseImpl(Op, isIntegerLike);
479 }
480
481 /// For an integer comparison, return 1 if the comparison is a signed operation
482 /// and 2 if the result is an unsigned comparison. Return zero if the operation
483 /// does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)484 static int isSignedOp(ISD::CondCode Opcode) {
485 switch (Opcode) {
486 default: llvm_unreachable("Illegal integer setcc operation!");
487 case ISD::SETEQ:
488 case ISD::SETNE: return 0;
489 case ISD::SETLT:
490 case ISD::SETLE:
491 case ISD::SETGT:
492 case ISD::SETGE: return 1;
493 case ISD::SETULT:
494 case ISD::SETULE:
495 case ISD::SETUGT:
496 case ISD::SETUGE: return 2;
497 }
498 }
499
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)500 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
501 EVT Type) {
502 bool IsInteger = Type.isInteger();
503 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
504 // Cannot fold a signed integer setcc with an unsigned integer setcc.
505 return ISD::SETCC_INVALID;
506
507 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
508
509 // If the N and U bits get set, then the resultant comparison DOES suddenly
510 // care about orderedness, and it is true when ordered.
511 if (Op > ISD::SETTRUE2)
512 Op &= ~16; // Clear the U bit if the N bit is set.
513
514 // Canonicalize illegal integer setcc's.
515 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
516 Op = ISD::SETNE;
517
518 return ISD::CondCode(Op);
519 }
520
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)521 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
522 EVT Type) {
523 bool IsInteger = Type.isInteger();
524 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
525 // Cannot fold a signed setcc with an unsigned setcc.
526 return ISD::SETCC_INVALID;
527
528 // Combine all of the condition bits.
529 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
530
531 // Canonicalize illegal integer setcc's.
532 if (IsInteger) {
533 switch (Result) {
534 default: break;
535 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
536 case ISD::SETOEQ: // SETEQ & SETU[LG]E
537 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
538 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
539 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
540 }
541 }
542
543 return Result;
544 }
545
546 //===----------------------------------------------------------------------===//
547 // SDNode Profile Support
548 //===----------------------------------------------------------------------===//
549
550 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)551 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
552 ID.AddInteger(OpC);
553 }
554
555 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
556 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)557 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
558 ID.AddPointer(VTList.VTs);
559 }
560
561 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)562 static void AddNodeIDOperands(FoldingSetNodeID &ID,
563 ArrayRef<SDValue> Ops) {
564 for (auto& Op : Ops) {
565 ID.AddPointer(Op.getNode());
566 ID.AddInteger(Op.getResNo());
567 }
568 }
569
570 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)571 static void AddNodeIDOperands(FoldingSetNodeID &ID,
572 ArrayRef<SDUse> Ops) {
573 for (auto& Op : Ops) {
574 ID.AddPointer(Op.getNode());
575 ID.AddInteger(Op.getResNo());
576 }
577 }
578
AddNodeIDNode(FoldingSetNodeID & ID,unsigned short OpC,SDVTList VTList,ArrayRef<SDValue> OpList)579 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
580 SDVTList VTList, ArrayRef<SDValue> OpList) {
581 AddNodeIDOpcode(ID, OpC);
582 AddNodeIDValueTypes(ID, VTList);
583 AddNodeIDOperands(ID, OpList);
584 }
585
586 /// If this is an SDNode with special info, add this info to the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)587 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
588 switch (N->getOpcode()) {
589 case ISD::TargetExternalSymbol:
590 case ISD::ExternalSymbol:
591 case ISD::MCSymbol:
592 llvm_unreachable("Should only be used on nodes with operands");
593 default: break; // Normal nodes don't need extra info.
594 case ISD::TargetConstant:
595 case ISD::Constant: {
596 const ConstantSDNode *C = cast<ConstantSDNode>(N);
597 ID.AddPointer(C->getConstantIntValue());
598 ID.AddBoolean(C->isOpaque());
599 break;
600 }
601 case ISD::TargetConstantFP:
602 case ISD::ConstantFP:
603 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
604 break;
605 case ISD::TargetGlobalAddress:
606 case ISD::GlobalAddress:
607 case ISD::TargetGlobalTLSAddress:
608 case ISD::GlobalTLSAddress: {
609 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
610 ID.AddPointer(GA->getGlobal());
611 ID.AddInteger(GA->getOffset());
612 ID.AddInteger(GA->getTargetFlags());
613 break;
614 }
615 case ISD::BasicBlock:
616 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
617 break;
618 case ISD::Register:
619 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
620 break;
621 case ISD::RegisterMask:
622 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
623 break;
624 case ISD::SRCVALUE:
625 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
626 break;
627 case ISD::FrameIndex:
628 case ISD::TargetFrameIndex:
629 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
630 break;
631 case ISD::LIFETIME_START:
632 case ISD::LIFETIME_END:
633 if (cast<LifetimeSDNode>(N)->hasOffset()) {
634 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
635 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
636 }
637 break;
638 case ISD::PSEUDO_PROBE:
639 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
640 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
641 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
642 break;
643 case ISD::JumpTable:
644 case ISD::TargetJumpTable:
645 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
646 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
647 break;
648 case ISD::ConstantPool:
649 case ISD::TargetConstantPool: {
650 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
651 ID.AddInteger(CP->getAlign().value());
652 ID.AddInteger(CP->getOffset());
653 if (CP->isMachineConstantPoolEntry())
654 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
655 else
656 ID.AddPointer(CP->getConstVal());
657 ID.AddInteger(CP->getTargetFlags());
658 break;
659 }
660 case ISD::TargetIndex: {
661 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
662 ID.AddInteger(TI->getIndex());
663 ID.AddInteger(TI->getOffset());
664 ID.AddInteger(TI->getTargetFlags());
665 break;
666 }
667 case ISD::LOAD: {
668 const LoadSDNode *LD = cast<LoadSDNode>(N);
669 ID.AddInteger(LD->getMemoryVT().getRawBits());
670 ID.AddInteger(LD->getRawSubclassData());
671 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
672 break;
673 }
674 case ISD::STORE: {
675 const StoreSDNode *ST = cast<StoreSDNode>(N);
676 ID.AddInteger(ST->getMemoryVT().getRawBits());
677 ID.AddInteger(ST->getRawSubclassData());
678 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
679 break;
680 }
681 case ISD::MLOAD: {
682 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
683 ID.AddInteger(MLD->getMemoryVT().getRawBits());
684 ID.AddInteger(MLD->getRawSubclassData());
685 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
686 break;
687 }
688 case ISD::MSTORE: {
689 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
690 ID.AddInteger(MST->getMemoryVT().getRawBits());
691 ID.AddInteger(MST->getRawSubclassData());
692 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
693 break;
694 }
695 case ISD::MGATHER: {
696 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
697 ID.AddInteger(MG->getMemoryVT().getRawBits());
698 ID.AddInteger(MG->getRawSubclassData());
699 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
700 break;
701 }
702 case ISD::MSCATTER: {
703 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
704 ID.AddInteger(MS->getMemoryVT().getRawBits());
705 ID.AddInteger(MS->getRawSubclassData());
706 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
707 break;
708 }
709 case ISD::ATOMIC_CMP_SWAP:
710 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
711 case ISD::ATOMIC_SWAP:
712 case ISD::ATOMIC_LOAD_ADD:
713 case ISD::ATOMIC_LOAD_SUB:
714 case ISD::ATOMIC_LOAD_AND:
715 case ISD::ATOMIC_LOAD_CLR:
716 case ISD::ATOMIC_LOAD_OR:
717 case ISD::ATOMIC_LOAD_XOR:
718 case ISD::ATOMIC_LOAD_NAND:
719 case ISD::ATOMIC_LOAD_MIN:
720 case ISD::ATOMIC_LOAD_MAX:
721 case ISD::ATOMIC_LOAD_UMIN:
722 case ISD::ATOMIC_LOAD_UMAX:
723 case ISD::ATOMIC_LOAD:
724 case ISD::ATOMIC_STORE: {
725 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
726 ID.AddInteger(AT->getMemoryVT().getRawBits());
727 ID.AddInteger(AT->getRawSubclassData());
728 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
729 break;
730 }
731 case ISD::PREFETCH: {
732 const MemSDNode *PF = cast<MemSDNode>(N);
733 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
734 break;
735 }
736 case ISD::VECTOR_SHUFFLE: {
737 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
738 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
739 i != e; ++i)
740 ID.AddInteger(SVN->getMaskElt(i));
741 break;
742 }
743 case ISD::TargetBlockAddress:
744 case ISD::BlockAddress: {
745 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
746 ID.AddPointer(BA->getBlockAddress());
747 ID.AddInteger(BA->getOffset());
748 ID.AddInteger(BA->getTargetFlags());
749 break;
750 }
751 } // end switch (N->getOpcode())
752
753 // Target specific memory nodes could also have address spaces to check.
754 if (N->isTargetMemoryOpcode())
755 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
756 }
757
758 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
759 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)760 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
761 AddNodeIDOpcode(ID, N->getOpcode());
762 // Add the return value info.
763 AddNodeIDValueTypes(ID, N->getVTList());
764 // Add the operand info.
765 AddNodeIDOperands(ID, N->ops());
766
767 // Handle SDNode leafs with special info.
768 AddNodeIDCustom(ID, N);
769 }
770
771 //===----------------------------------------------------------------------===//
772 // SelectionDAG Class
773 //===----------------------------------------------------------------------===//
774
775 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)776 static bool doNotCSE(SDNode *N) {
777 if (N->getValueType(0) == MVT::Glue)
778 return true; // Never CSE anything that produces a flag.
779
780 switch (N->getOpcode()) {
781 default: break;
782 case ISD::HANDLENODE:
783 case ISD::EH_LABEL:
784 return true; // Never CSE these nodes.
785 }
786
787 // Check that remaining values produced are not flags.
788 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
789 if (N->getValueType(i) == MVT::Glue)
790 return true; // Never CSE anything that produces a flag.
791
792 return false;
793 }
794
795 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
796 /// SelectionDAG.
RemoveDeadNodes()797 void SelectionDAG::RemoveDeadNodes() {
798 // Create a dummy node (which is not added to allnodes), that adds a reference
799 // to the root node, preventing it from being deleted.
800 HandleSDNode Dummy(getRoot());
801
802 SmallVector<SDNode*, 128> DeadNodes;
803
804 // Add all obviously-dead nodes to the DeadNodes worklist.
805 for (SDNode &Node : allnodes())
806 if (Node.use_empty())
807 DeadNodes.push_back(&Node);
808
809 RemoveDeadNodes(DeadNodes);
810
811 // If the root changed (e.g. it was a dead load, update the root).
812 setRoot(Dummy.getValue());
813 }
814
815 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
816 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)817 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
818
819 // Process the worklist, deleting the nodes and adding their uses to the
820 // worklist.
821 while (!DeadNodes.empty()) {
822 SDNode *N = DeadNodes.pop_back_val();
823 // Skip to next node if we've already managed to delete the node. This could
824 // happen if replacing a node causes a node previously added to the node to
825 // be deleted.
826 if (N->getOpcode() == ISD::DELETED_NODE)
827 continue;
828
829 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
830 DUL->NodeDeleted(N, nullptr);
831
832 // Take the node out of the appropriate CSE map.
833 RemoveNodeFromCSEMaps(N);
834
835 // Next, brutally remove the operand list. This is safe to do, as there are
836 // no cycles in the graph.
837 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
838 SDUse &Use = *I++;
839 SDNode *Operand = Use.getNode();
840 Use.set(SDValue());
841
842 // Now that we removed this operand, see if there are no uses of it left.
843 if (Operand->use_empty())
844 DeadNodes.push_back(Operand);
845 }
846
847 DeallocateNode(N);
848 }
849 }
850
RemoveDeadNode(SDNode * N)851 void SelectionDAG::RemoveDeadNode(SDNode *N){
852 SmallVector<SDNode*, 16> DeadNodes(1, N);
853
854 // Create a dummy node that adds a reference to the root node, preventing
855 // it from being deleted. (This matters if the root is an operand of the
856 // dead node.)
857 HandleSDNode Dummy(getRoot());
858
859 RemoveDeadNodes(DeadNodes);
860 }
861
DeleteNode(SDNode * N)862 void SelectionDAG::DeleteNode(SDNode *N) {
863 // First take this out of the appropriate CSE map.
864 RemoveNodeFromCSEMaps(N);
865
866 // Finally, remove uses due to operands of this node, remove from the
867 // AllNodes list, and delete the node.
868 DeleteNodeNotInCSEMaps(N);
869 }
870
DeleteNodeNotInCSEMaps(SDNode * N)871 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
872 assert(N->getIterator() != AllNodes.begin() &&
873 "Cannot delete the entry node!");
874 assert(N->use_empty() && "Cannot delete a node that is not dead!");
875
876 // Drop all of the operands and decrement used node's use counts.
877 N->DropOperands();
878
879 DeallocateNode(N);
880 }
881
erase(const SDNode * Node)882 void SDDbgInfo::erase(const SDNode *Node) {
883 DbgValMapType::iterator I = DbgValMap.find(Node);
884 if (I == DbgValMap.end())
885 return;
886 for (auto &Val: I->second)
887 Val->setIsInvalidated();
888 DbgValMap.erase(I);
889 }
890
DeallocateNode(SDNode * N)891 void SelectionDAG::DeallocateNode(SDNode *N) {
892 // If we have operands, deallocate them.
893 removeOperands(N);
894
895 NodeAllocator.Deallocate(AllNodes.remove(N));
896
897 // Set the opcode to DELETED_NODE to help catch bugs when node
898 // memory is reallocated.
899 // FIXME: There are places in SDag that have grown a dependency on the opcode
900 // value in the released node.
901 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
902 N->NodeType = ISD::DELETED_NODE;
903
904 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
905 // them and forget about that node.
906 DbgInfo->erase(N);
907 }
908
909 #ifndef NDEBUG
910 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
VerifySDNode(SDNode * N)911 static void VerifySDNode(SDNode *N) {
912 switch (N->getOpcode()) {
913 default:
914 break;
915 case ISD::BUILD_PAIR: {
916 EVT VT = N->getValueType(0);
917 assert(N->getNumValues() == 1 && "Too many results!");
918 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
919 "Wrong return type!");
920 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
921 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
922 "Mismatched operand types!");
923 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
924 "Wrong operand type!");
925 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
926 "Wrong return type size");
927 break;
928 }
929 case ISD::BUILD_VECTOR: {
930 assert(N->getNumValues() == 1 && "Too many results!");
931 assert(N->getValueType(0).isVector() && "Wrong return type!");
932 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
933 "Wrong number of operands!");
934 EVT EltVT = N->getValueType(0).getVectorElementType();
935 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
936 assert((I->getValueType() == EltVT ||
937 (EltVT.isInteger() && I->getValueType().isInteger() &&
938 EltVT.bitsLE(I->getValueType()))) &&
939 "Wrong operand type!");
940 assert(I->getValueType() == N->getOperand(0).getValueType() &&
941 "Operands must all have the same type");
942 }
943 break;
944 }
945 }
946 }
947 #endif // NDEBUG
948
949 /// Insert a newly allocated node into the DAG.
950 ///
951 /// Handles insertion into the all nodes list and CSE map, as well as
952 /// verification and other common operations when a new node is allocated.
InsertNode(SDNode * N)953 void SelectionDAG::InsertNode(SDNode *N) {
954 AllNodes.push_back(N);
955 #ifndef NDEBUG
956 N->PersistentId = NextPersistentId++;
957 VerifySDNode(N);
958 #endif
959 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
960 DUL->NodeInserted(N);
961 }
962
963 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
964 /// correspond to it. This is useful when we're about to delete or repurpose
965 /// the node. We don't want future request for structurally identical nodes
966 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)967 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
968 bool Erased = false;
969 switch (N->getOpcode()) {
970 case ISD::HANDLENODE: return false; // noop.
971 case ISD::CONDCODE:
972 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
973 "Cond code doesn't exist!");
974 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
975 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
976 break;
977 case ISD::ExternalSymbol:
978 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
979 break;
980 case ISD::TargetExternalSymbol: {
981 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
982 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
983 ESN->getSymbol(), ESN->getTargetFlags()));
984 break;
985 }
986 case ISD::MCSymbol: {
987 auto *MCSN = cast<MCSymbolSDNode>(N);
988 Erased = MCSymbols.erase(MCSN->getMCSymbol());
989 break;
990 }
991 case ISD::VALUETYPE: {
992 EVT VT = cast<VTSDNode>(N)->getVT();
993 if (VT.isExtended()) {
994 Erased = ExtendedValueTypeNodes.erase(VT);
995 } else {
996 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
997 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
998 }
999 break;
1000 }
1001 default:
1002 // Remove it from the CSE Map.
1003 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
1004 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
1005 Erased = CSEMap.RemoveNode(N);
1006 break;
1007 }
1008 #ifndef NDEBUG
1009 // Verify that the node was actually in one of the CSE maps, unless it has a
1010 // flag result (which cannot be CSE'd) or is one of the special cases that are
1011 // not subject to CSE.
1012 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1013 !N->isMachineOpcode() && !doNotCSE(N)) {
1014 N->dump(this);
1015 dbgs() << "\n";
1016 llvm_unreachable("Node is not in map!");
1017 }
1018 #endif
1019 return Erased;
1020 }
1021
1022 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1023 /// maps and modified in place. Add it back to the CSE maps, unless an identical
1024 /// node already exists, in which case transfer all its users to the existing
1025 /// node. This transfer can potentially trigger recursive merging.
1026 void
AddModifiedNodeToCSEMaps(SDNode * N)1027 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1028 // For node types that aren't CSE'd, just act as if no identical node
1029 // already exists.
1030 if (!doNotCSE(N)) {
1031 SDNode *Existing = CSEMap.GetOrInsertNode(N);
1032 if (Existing != N) {
1033 // If there was already an existing matching node, use ReplaceAllUsesWith
1034 // to replace the dead one with the existing one. This can cause
1035 // recursive merging of other unrelated nodes down the line.
1036 ReplaceAllUsesWith(N, Existing);
1037
1038 // N is now dead. Inform the listeners and delete it.
1039 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1040 DUL->NodeDeleted(N, Existing);
1041 DeleteNodeNotInCSEMaps(N);
1042 return;
1043 }
1044 }
1045
1046 // If the node doesn't already exist, we updated it. Inform listeners.
1047 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1048 DUL->NodeUpdated(N);
1049 }
1050
1051 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1052 /// were replaced with those specified. If this node is never memoized,
1053 /// return null, otherwise return a pointer to the slot it would take. If a
1054 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)1055 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1056 void *&InsertPos) {
1057 if (doNotCSE(N))
1058 return nullptr;
1059
1060 SDValue Ops[] = { Op };
1061 FoldingSetNodeID ID;
1062 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1063 AddNodeIDCustom(ID, N);
1064 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1065 if (Node)
1066 Node->intersectFlagsWith(N->getFlags());
1067 return Node;
1068 }
1069
1070 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1071 /// were replaced with those specified. If this node is never memoized,
1072 /// return null, otherwise return a pointer to the slot it would take. If a
1073 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)1074 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1075 SDValue Op1, SDValue Op2,
1076 void *&InsertPos) {
1077 if (doNotCSE(N))
1078 return nullptr;
1079
1080 SDValue Ops[] = { Op1, Op2 };
1081 FoldingSetNodeID ID;
1082 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1083 AddNodeIDCustom(ID, N);
1084 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1085 if (Node)
1086 Node->intersectFlagsWith(N->getFlags());
1087 return Node;
1088 }
1089
1090 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1091 /// were replaced with those specified. If this node is never memoized,
1092 /// return null, otherwise return a pointer to the slot it would take. If a
1093 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)1094 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1095 void *&InsertPos) {
1096 if (doNotCSE(N))
1097 return nullptr;
1098
1099 FoldingSetNodeID ID;
1100 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1101 AddNodeIDCustom(ID, N);
1102 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1103 if (Node)
1104 Node->intersectFlagsWith(N->getFlags());
1105 return Node;
1106 }
1107
getEVTAlign(EVT VT) const1108 Align SelectionDAG::getEVTAlign(EVT VT) const {
1109 Type *Ty = VT == MVT::iPTR ?
1110 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
1111 VT.getTypeForEVT(*getContext());
1112
1113 return getDataLayout().getABITypeAlign(Ty);
1114 }
1115
1116 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOpt::Level OL)1117 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
1118 : TM(tm), OptLevel(OL),
1119 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1120 Root(getEntryNode()) {
1121 InsertNode(&EntryNode);
1122 DbgInfo = new SDDbgInfo();
1123 }
1124
init(MachineFunction & NewMF,OptimizationRemarkEmitter & NewORE,Pass * PassPtr,const TargetLibraryInfo * LibraryInfo,LegacyDivergenceAnalysis * Divergence,ProfileSummaryInfo * PSIin,BlockFrequencyInfo * BFIin)1125 void SelectionDAG::init(MachineFunction &NewMF,
1126 OptimizationRemarkEmitter &NewORE,
1127 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1128 LegacyDivergenceAnalysis * Divergence,
1129 ProfileSummaryInfo *PSIin,
1130 BlockFrequencyInfo *BFIin) {
1131 MF = &NewMF;
1132 SDAGISelPass = PassPtr;
1133 ORE = &NewORE;
1134 TLI = getSubtarget().getTargetLowering();
1135 TSI = getSubtarget().getSelectionDAGInfo();
1136 LibInfo = LibraryInfo;
1137 Context = &MF->getFunction().getContext();
1138 DA = Divergence;
1139 PSI = PSIin;
1140 BFI = BFIin;
1141 }
1142
~SelectionDAG()1143 SelectionDAG::~SelectionDAG() {
1144 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1145 allnodes_clear();
1146 OperandRecycler.clear(OperandAllocator);
1147 delete DbgInfo;
1148 }
1149
shouldOptForSize() const1150 bool SelectionDAG::shouldOptForSize() const {
1151 return MF->getFunction().hasOptSize() ||
1152 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1153 }
1154
allnodes_clear()1155 void SelectionDAG::allnodes_clear() {
1156 assert(&*AllNodes.begin() == &EntryNode);
1157 AllNodes.remove(AllNodes.begin());
1158 while (!AllNodes.empty())
1159 DeallocateNode(&AllNodes.front());
1160 #ifndef NDEBUG
1161 NextPersistentId = 0;
1162 #endif
1163 }
1164
FindNodeOrInsertPos(const FoldingSetNodeID & ID,void * & InsertPos)1165 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1166 void *&InsertPos) {
1167 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1168 if (N) {
1169 switch (N->getOpcode()) {
1170 default: break;
1171 case ISD::Constant:
1172 case ISD::ConstantFP:
1173 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1174 "debug location. Use another overload.");
1175 }
1176 }
1177 return N;
1178 }
1179
FindNodeOrInsertPos(const FoldingSetNodeID & ID,const SDLoc & DL,void * & InsertPos)1180 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1181 const SDLoc &DL, void *&InsertPos) {
1182 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1183 if (N) {
1184 switch (N->getOpcode()) {
1185 case ISD::Constant:
1186 case ISD::ConstantFP:
1187 // Erase debug location from the node if the node is used at several
1188 // different places. Do not propagate one location to all uses as it
1189 // will cause a worse single stepping debugging experience.
1190 if (N->getDebugLoc() != DL.getDebugLoc())
1191 N->setDebugLoc(DebugLoc());
1192 break;
1193 default:
1194 // When the node's point of use is located earlier in the instruction
1195 // sequence than its prior point of use, update its debug info to the
1196 // earlier location.
1197 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1198 N->setDebugLoc(DL.getDebugLoc());
1199 break;
1200 }
1201 }
1202 return N;
1203 }
1204
clear()1205 void SelectionDAG::clear() {
1206 allnodes_clear();
1207 OperandRecycler.clear(OperandAllocator);
1208 OperandAllocator.Reset();
1209 CSEMap.clear();
1210
1211 ExtendedValueTypeNodes.clear();
1212 ExternalSymbols.clear();
1213 TargetExternalSymbols.clear();
1214 MCSymbols.clear();
1215 SDCallSiteDbgInfo.clear();
1216 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1217 static_cast<CondCodeSDNode*>(nullptr));
1218 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1219 static_cast<SDNode*>(nullptr));
1220
1221 EntryNode.UseList = nullptr;
1222 InsertNode(&EntryNode);
1223 Root = getEntryNode();
1224 DbgInfo->clear();
1225 }
1226
getFPExtendOrRound(SDValue Op,const SDLoc & DL,EVT VT)1227 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1228 return VT.bitsGT(Op.getValueType())
1229 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1230 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1231 }
1232
1233 std::pair<SDValue, SDValue>
getStrictFPExtendOrRound(SDValue Op,SDValue Chain,const SDLoc & DL,EVT VT)1234 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1235 const SDLoc &DL, EVT VT) {
1236 assert(!VT.bitsEq(Op.getValueType()) &&
1237 "Strict no-op FP extend/round not allowed.");
1238 SDValue Res =
1239 VT.bitsGT(Op.getValueType())
1240 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1241 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1242 {Chain, Op, getIntPtrConstant(0, DL)});
1243
1244 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1245 }
1246
getAnyExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1247 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1248 return VT.bitsGT(Op.getValueType()) ?
1249 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1250 getNode(ISD::TRUNCATE, DL, VT, Op);
1251 }
1252
getSExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1253 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1254 return VT.bitsGT(Op.getValueType()) ?
1255 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1256 getNode(ISD::TRUNCATE, DL, VT, Op);
1257 }
1258
getZExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1259 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1260 return VT.bitsGT(Op.getValueType()) ?
1261 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1262 getNode(ISD::TRUNCATE, DL, VT, Op);
1263 }
1264
getBoolExtOrTrunc(SDValue Op,const SDLoc & SL,EVT VT,EVT OpVT)1265 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1266 EVT OpVT) {
1267 if (VT.bitsLE(Op.getValueType()))
1268 return getNode(ISD::TRUNCATE, SL, VT, Op);
1269
1270 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1271 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1272 }
1273
getZeroExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1274 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1275 EVT OpVT = Op.getValueType();
1276 assert(VT.isInteger() && OpVT.isInteger() &&
1277 "Cannot getZeroExtendInReg FP types");
1278 assert(VT.isVector() == OpVT.isVector() &&
1279 "getZeroExtendInReg type should be vector iff the operand "
1280 "type is vector!");
1281 assert((!VT.isVector() ||
1282 VT.getVectorElementCount() == OpVT.getVectorElementCount()) &&
1283 "Vector element counts must match in getZeroExtendInReg");
1284 assert(VT.bitsLE(OpVT) && "Not extending!");
1285 if (OpVT == VT)
1286 return Op;
1287 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
1288 VT.getScalarSizeInBits());
1289 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1290 }
1291
getPtrExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1292 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1293 // Only unsigned pointer semantics are supported right now. In the future this
1294 // might delegate to TLI to check pointer signedness.
1295 return getZExtOrTrunc(Op, DL, VT);
1296 }
1297
getPtrExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1298 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1299 // Only unsigned pointer semantics are supported right now. In the future this
1300 // might delegate to TLI to check pointer signedness.
1301 return getZeroExtendInReg(Op, DL, VT);
1302 }
1303
1304 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
getNOT(const SDLoc & DL,SDValue Val,EVT VT)1305 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1306 EVT EltVT = VT.getScalarType();
1307 SDValue NegOne =
1308 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1309 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1310 }
1311
getLogicalNOT(const SDLoc & DL,SDValue Val,EVT VT)1312 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1313 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1314 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1315 }
1316
getBoolConstant(bool V,const SDLoc & DL,EVT VT,EVT OpVT)1317 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1318 EVT OpVT) {
1319 if (!V)
1320 return getConstant(0, DL, VT);
1321
1322 switch (TLI->getBooleanContents(OpVT)) {
1323 case TargetLowering::ZeroOrOneBooleanContent:
1324 case TargetLowering::UndefinedBooleanContent:
1325 return getConstant(1, DL, VT);
1326 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1327 return getAllOnesConstant(DL, VT);
1328 }
1329 llvm_unreachable("Unexpected boolean content enum!");
1330 }
1331
getConstant(uint64_t Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1332 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1333 bool isT, bool isO) {
1334 EVT EltVT = VT.getScalarType();
1335 assert((EltVT.getSizeInBits() >= 64 ||
1336 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1337 "getConstant with a uint64_t value that doesn't fit in the type!");
1338 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1339 }
1340
getConstant(const APInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1341 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1342 bool isT, bool isO) {
1343 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1344 }
1345
getConstant(const ConstantInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1346 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1347 EVT VT, bool isT, bool isO) {
1348 assert(VT.isInteger() && "Cannot create FP integer constant!");
1349
1350 EVT EltVT = VT.getScalarType();
1351 const ConstantInt *Elt = &Val;
1352
1353 // In some cases the vector type is legal but the element type is illegal and
1354 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1355 // inserted value (the type does not need to match the vector element type).
1356 // Any extra bits introduced will be truncated away.
1357 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1358 TargetLowering::TypePromoteInteger) {
1359 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1360 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1361 Elt = ConstantInt::get(*getContext(), NewVal);
1362 }
1363 // In other cases the element type is illegal and needs to be expanded, for
1364 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1365 // the value into n parts and use a vector type with n-times the elements.
1366 // Then bitcast to the type requested.
1367 // Legalizing constants too early makes the DAGCombiner's job harder so we
1368 // only legalize if the DAG tells us we must produce legal types.
1369 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1370 TLI->getTypeAction(*getContext(), EltVT) ==
1371 TargetLowering::TypeExpandInteger) {
1372 const APInt &NewVal = Elt->getValue();
1373 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1374 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1375 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1376 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1377
1378 // Check the temporary vector is the correct size. If this fails then
1379 // getTypeToTransformTo() probably returned a type whose size (in bits)
1380 // isn't a power-of-2 factor of the requested type size.
1381 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1382
1383 SmallVector<SDValue, 2> EltParts;
1384 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1385 EltParts.push_back(getConstant(
1386 NewVal.lshr(i * ViaEltSizeInBits).zextOrTrunc(ViaEltSizeInBits), DL,
1387 ViaEltVT, isT, isO));
1388 }
1389
1390 // EltParts is currently in little endian order. If we actually want
1391 // big-endian order then reverse it now.
1392 if (getDataLayout().isBigEndian())
1393 std::reverse(EltParts.begin(), EltParts.end());
1394
1395 // The elements must be reversed when the element order is different
1396 // to the endianness of the elements (because the BITCAST is itself a
1397 // vector shuffle in this situation). However, we do not need any code to
1398 // perform this reversal because getConstant() is producing a vector
1399 // splat.
1400 // This situation occurs in MIPS MSA.
1401
1402 SmallVector<SDValue, 8> Ops;
1403 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1404 llvm::append_range(Ops, EltParts);
1405
1406 SDValue V =
1407 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1408 return V;
1409 }
1410
1411 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1412 "APInt size does not match type size!");
1413 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1414 FoldingSetNodeID ID;
1415 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1416 ID.AddPointer(Elt);
1417 ID.AddBoolean(isO);
1418 void *IP = nullptr;
1419 SDNode *N = nullptr;
1420 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1421 if (!VT.isVector())
1422 return SDValue(N, 0);
1423
1424 if (!N) {
1425 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1426 CSEMap.InsertNode(N, IP);
1427 InsertNode(N);
1428 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1429 }
1430
1431 SDValue Result(N, 0);
1432 if (VT.isScalableVector())
1433 Result = getSplatVector(VT, DL, Result);
1434 else if (VT.isVector())
1435 Result = getSplatBuildVector(VT, DL, Result);
1436
1437 return Result;
1438 }
1439
getIntPtrConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1440 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1441 bool isTarget) {
1442 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1443 }
1444
getShiftAmountConstant(uint64_t Val,EVT VT,const SDLoc & DL,bool LegalTypes)1445 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1446 const SDLoc &DL, bool LegalTypes) {
1447 assert(VT.isInteger() && "Shift amount is not an integer type!");
1448 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1449 return getConstant(Val, DL, ShiftVT);
1450 }
1451
getVectorIdxConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1452 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
1453 bool isTarget) {
1454 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1455 }
1456
getConstantFP(const APFloat & V,const SDLoc & DL,EVT VT,bool isTarget)1457 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1458 bool isTarget) {
1459 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1460 }
1461
getConstantFP(const ConstantFP & V,const SDLoc & DL,EVT VT,bool isTarget)1462 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1463 EVT VT, bool isTarget) {
1464 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1465
1466 EVT EltVT = VT.getScalarType();
1467
1468 // Do the map lookup using the actual bit pattern for the floating point
1469 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1470 // we don't have issues with SNANs.
1471 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1472 FoldingSetNodeID ID;
1473 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1474 ID.AddPointer(&V);
1475 void *IP = nullptr;
1476 SDNode *N = nullptr;
1477 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1478 if (!VT.isVector())
1479 return SDValue(N, 0);
1480
1481 if (!N) {
1482 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1483 CSEMap.InsertNode(N, IP);
1484 InsertNode(N);
1485 }
1486
1487 SDValue Result(N, 0);
1488 if (VT.isScalableVector())
1489 Result = getSplatVector(VT, DL, Result);
1490 else if (VT.isVector())
1491 Result = getSplatBuildVector(VT, DL, Result);
1492 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1493 return Result;
1494 }
1495
getConstantFP(double Val,const SDLoc & DL,EVT VT,bool isTarget)1496 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1497 bool isTarget) {
1498 EVT EltVT = VT.getScalarType();
1499 if (EltVT == MVT::f32)
1500 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1501 else if (EltVT == MVT::f64)
1502 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1503 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1504 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1505 bool Ignored;
1506 APFloat APF = APFloat(Val);
1507 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1508 &Ignored);
1509 return getConstantFP(APF, DL, VT, isTarget);
1510 } else
1511 llvm_unreachable("Unsupported type in getConstantFP");
1512 }
1513
getGlobalAddress(const GlobalValue * GV,const SDLoc & DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned TargetFlags)1514 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1515 EVT VT, int64_t Offset, bool isTargetGA,
1516 unsigned TargetFlags) {
1517 assert((TargetFlags == 0 || isTargetGA) &&
1518 "Cannot set target flags on target-independent globals");
1519
1520 // Truncate (with sign-extension) the offset value to the pointer size.
1521 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1522 if (BitWidth < 64)
1523 Offset = SignExtend64(Offset, BitWidth);
1524
1525 unsigned Opc;
1526 if (GV->isThreadLocal())
1527 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1528 else
1529 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1530
1531 FoldingSetNodeID ID;
1532 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1533 ID.AddPointer(GV);
1534 ID.AddInteger(Offset);
1535 ID.AddInteger(TargetFlags);
1536 void *IP = nullptr;
1537 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1538 return SDValue(E, 0);
1539
1540 auto *N = newSDNode<GlobalAddressSDNode>(
1541 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1542 CSEMap.InsertNode(N, IP);
1543 InsertNode(N);
1544 return SDValue(N, 0);
1545 }
1546
getFrameIndex(int FI,EVT VT,bool isTarget)1547 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1548 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1549 FoldingSetNodeID ID;
1550 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1551 ID.AddInteger(FI);
1552 void *IP = nullptr;
1553 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1554 return SDValue(E, 0);
1555
1556 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1557 CSEMap.InsertNode(N, IP);
1558 InsertNode(N);
1559 return SDValue(N, 0);
1560 }
1561
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned TargetFlags)1562 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1563 unsigned TargetFlags) {
1564 assert((TargetFlags == 0 || isTarget) &&
1565 "Cannot set target flags on target-independent jump tables");
1566 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1567 FoldingSetNodeID ID;
1568 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1569 ID.AddInteger(JTI);
1570 ID.AddInteger(TargetFlags);
1571 void *IP = nullptr;
1572 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1573 return SDValue(E, 0);
1574
1575 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1576 CSEMap.InsertNode(N, IP);
1577 InsertNode(N);
1578 return SDValue(N, 0);
1579 }
1580
getConstantPool(const Constant * C,EVT VT,MaybeAlign Alignment,int Offset,bool isTarget,unsigned TargetFlags)1581 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1582 MaybeAlign Alignment, int Offset,
1583 bool isTarget, unsigned TargetFlags) {
1584 assert((TargetFlags == 0 || isTarget) &&
1585 "Cannot set target flags on target-independent globals");
1586 if (!Alignment)
1587 Alignment = shouldOptForSize()
1588 ? getDataLayout().getABITypeAlign(C->getType())
1589 : getDataLayout().getPrefTypeAlign(C->getType());
1590 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1591 FoldingSetNodeID ID;
1592 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1593 ID.AddInteger(Alignment->value());
1594 ID.AddInteger(Offset);
1595 ID.AddPointer(C);
1596 ID.AddInteger(TargetFlags);
1597 void *IP = nullptr;
1598 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1599 return SDValue(E, 0);
1600
1601 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1602 TargetFlags);
1603 CSEMap.InsertNode(N, IP);
1604 InsertNode(N);
1605 SDValue V = SDValue(N, 0);
1606 NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
1607 return V;
1608 }
1609
getConstantPool(MachineConstantPoolValue * C,EVT VT,MaybeAlign Alignment,int Offset,bool isTarget,unsigned TargetFlags)1610 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1611 MaybeAlign Alignment, int Offset,
1612 bool isTarget, unsigned TargetFlags) {
1613 assert((TargetFlags == 0 || isTarget) &&
1614 "Cannot set target flags on target-independent globals");
1615 if (!Alignment)
1616 Alignment = getDataLayout().getPrefTypeAlign(C->getType());
1617 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1618 FoldingSetNodeID ID;
1619 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1620 ID.AddInteger(Alignment->value());
1621 ID.AddInteger(Offset);
1622 C->addSelectionDAGCSEId(ID);
1623 ID.AddInteger(TargetFlags);
1624 void *IP = nullptr;
1625 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1626 return SDValue(E, 0);
1627
1628 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1629 TargetFlags);
1630 CSEMap.InsertNode(N, IP);
1631 InsertNode(N);
1632 return SDValue(N, 0);
1633 }
1634
getTargetIndex(int Index,EVT VT,int64_t Offset,unsigned TargetFlags)1635 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1636 unsigned TargetFlags) {
1637 FoldingSetNodeID ID;
1638 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1639 ID.AddInteger(Index);
1640 ID.AddInteger(Offset);
1641 ID.AddInteger(TargetFlags);
1642 void *IP = nullptr;
1643 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1644 return SDValue(E, 0);
1645
1646 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1647 CSEMap.InsertNode(N, IP);
1648 InsertNode(N);
1649 return SDValue(N, 0);
1650 }
1651
getBasicBlock(MachineBasicBlock * MBB)1652 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1653 FoldingSetNodeID ID;
1654 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1655 ID.AddPointer(MBB);
1656 void *IP = nullptr;
1657 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1658 return SDValue(E, 0);
1659
1660 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1661 CSEMap.InsertNode(N, IP);
1662 InsertNode(N);
1663 return SDValue(N, 0);
1664 }
1665
getValueType(EVT VT)1666 SDValue SelectionDAG::getValueType(EVT VT) {
1667 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1668 ValueTypeNodes.size())
1669 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1670
1671 SDNode *&N = VT.isExtended() ?
1672 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1673
1674 if (N) return SDValue(N, 0);
1675 N = newSDNode<VTSDNode>(VT);
1676 InsertNode(N);
1677 return SDValue(N, 0);
1678 }
1679
getExternalSymbol(const char * Sym,EVT VT)1680 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1681 SDNode *&N = ExternalSymbols[Sym];
1682 if (N) return SDValue(N, 0);
1683 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1684 InsertNode(N);
1685 return SDValue(N, 0);
1686 }
1687
getMCSymbol(MCSymbol * Sym,EVT VT)1688 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1689 SDNode *&N = MCSymbols[Sym];
1690 if (N)
1691 return SDValue(N, 0);
1692 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1693 InsertNode(N);
1694 return SDValue(N, 0);
1695 }
1696
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned TargetFlags)1697 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1698 unsigned TargetFlags) {
1699 SDNode *&N =
1700 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1701 if (N) return SDValue(N, 0);
1702 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1703 InsertNode(N);
1704 return SDValue(N, 0);
1705 }
1706
getCondCode(ISD::CondCode Cond)1707 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1708 if ((unsigned)Cond >= CondCodeNodes.size())
1709 CondCodeNodes.resize(Cond+1);
1710
1711 if (!CondCodeNodes[Cond]) {
1712 auto *N = newSDNode<CondCodeSDNode>(Cond);
1713 CondCodeNodes[Cond] = N;
1714 InsertNode(N);
1715 }
1716
1717 return SDValue(CondCodeNodes[Cond], 0);
1718 }
1719
1720 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1721 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,MutableArrayRef<int> M)1722 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1723 std::swap(N1, N2);
1724 ShuffleVectorSDNode::commuteMask(M);
1725 }
1726
getVectorShuffle(EVT VT,const SDLoc & dl,SDValue N1,SDValue N2,ArrayRef<int> Mask)1727 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1728 SDValue N2, ArrayRef<int> Mask) {
1729 assert(VT.getVectorNumElements() == Mask.size() &&
1730 "Must have the same number of vector elements as mask elements!");
1731 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1732 "Invalid VECTOR_SHUFFLE");
1733
1734 // Canonicalize shuffle undef, undef -> undef
1735 if (N1.isUndef() && N2.isUndef())
1736 return getUNDEF(VT);
1737
1738 // Validate that all indices in Mask are within the range of the elements
1739 // input to the shuffle.
1740 int NElts = Mask.size();
1741 assert(llvm::all_of(Mask,
1742 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1743 "Index out of range");
1744
1745 // Copy the mask so we can do any needed cleanup.
1746 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1747
1748 // Canonicalize shuffle v, v -> v, undef
1749 if (N1 == N2) {
1750 N2 = getUNDEF(VT);
1751 for (int i = 0; i != NElts; ++i)
1752 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1753 }
1754
1755 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1756 if (N1.isUndef())
1757 commuteShuffle(N1, N2, MaskVec);
1758
1759 if (TLI->hasVectorBlend()) {
1760 // If shuffling a splat, try to blend the splat instead. We do this here so
1761 // that even when this arises during lowering we don't have to re-handle it.
1762 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1763 BitVector UndefElements;
1764 SDValue Splat = BV->getSplatValue(&UndefElements);
1765 if (!Splat)
1766 return;
1767
1768 for (int i = 0; i < NElts; ++i) {
1769 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1770 continue;
1771
1772 // If this input comes from undef, mark it as such.
1773 if (UndefElements[MaskVec[i] - Offset]) {
1774 MaskVec[i] = -1;
1775 continue;
1776 }
1777
1778 // If we can blend a non-undef lane, use that instead.
1779 if (!UndefElements[i])
1780 MaskVec[i] = i + Offset;
1781 }
1782 };
1783 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1784 BlendSplat(N1BV, 0);
1785 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1786 BlendSplat(N2BV, NElts);
1787 }
1788
1789 // Canonicalize all index into lhs, -> shuffle lhs, undef
1790 // Canonicalize all index into rhs, -> shuffle rhs, undef
1791 bool AllLHS = true, AllRHS = true;
1792 bool N2Undef = N2.isUndef();
1793 for (int i = 0; i != NElts; ++i) {
1794 if (MaskVec[i] >= NElts) {
1795 if (N2Undef)
1796 MaskVec[i] = -1;
1797 else
1798 AllLHS = false;
1799 } else if (MaskVec[i] >= 0) {
1800 AllRHS = false;
1801 }
1802 }
1803 if (AllLHS && AllRHS)
1804 return getUNDEF(VT);
1805 if (AllLHS && !N2Undef)
1806 N2 = getUNDEF(VT);
1807 if (AllRHS) {
1808 N1 = getUNDEF(VT);
1809 commuteShuffle(N1, N2, MaskVec);
1810 }
1811 // Reset our undef status after accounting for the mask.
1812 N2Undef = N2.isUndef();
1813 // Re-check whether both sides ended up undef.
1814 if (N1.isUndef() && N2Undef)
1815 return getUNDEF(VT);
1816
1817 // If Identity shuffle return that node.
1818 bool Identity = true, AllSame = true;
1819 for (int i = 0; i != NElts; ++i) {
1820 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1821 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1822 }
1823 if (Identity && NElts)
1824 return N1;
1825
1826 // Shuffling a constant splat doesn't change the result.
1827 if (N2Undef) {
1828 SDValue V = N1;
1829
1830 // Look through any bitcasts. We check that these don't change the number
1831 // (and size) of elements and just changes their types.
1832 while (V.getOpcode() == ISD::BITCAST)
1833 V = V->getOperand(0);
1834
1835 // A splat should always show up as a build vector node.
1836 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1837 BitVector UndefElements;
1838 SDValue Splat = BV->getSplatValue(&UndefElements);
1839 // If this is a splat of an undef, shuffling it is also undef.
1840 if (Splat && Splat.isUndef())
1841 return getUNDEF(VT);
1842
1843 bool SameNumElts =
1844 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1845
1846 // We only have a splat which can skip shuffles if there is a splatted
1847 // value and no undef lanes rearranged by the shuffle.
1848 if (Splat && UndefElements.none()) {
1849 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1850 // number of elements match or the value splatted is a zero constant.
1851 if (SameNumElts)
1852 return N1;
1853 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1854 if (C->isNullValue())
1855 return N1;
1856 }
1857
1858 // If the shuffle itself creates a splat, build the vector directly.
1859 if (AllSame && SameNumElts) {
1860 EVT BuildVT = BV->getValueType(0);
1861 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1862 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1863
1864 // We may have jumped through bitcasts, so the type of the
1865 // BUILD_VECTOR may not match the type of the shuffle.
1866 if (BuildVT != VT)
1867 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1868 return NewBV;
1869 }
1870 }
1871 }
1872
1873 FoldingSetNodeID ID;
1874 SDValue Ops[2] = { N1, N2 };
1875 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1876 for (int i = 0; i != NElts; ++i)
1877 ID.AddInteger(MaskVec[i]);
1878
1879 void* IP = nullptr;
1880 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1881 return SDValue(E, 0);
1882
1883 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1884 // SDNode doesn't have access to it. This memory will be "leaked" when
1885 // the node is deallocated, but recovered when the NodeAllocator is released.
1886 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1887 llvm::copy(MaskVec, MaskAlloc);
1888
1889 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1890 dl.getDebugLoc(), MaskAlloc);
1891 createOperands(N, Ops);
1892
1893 CSEMap.InsertNode(N, IP);
1894 InsertNode(N);
1895 SDValue V = SDValue(N, 0);
1896 NewSDValueDbgMsg(V, "Creating new node: ", this);
1897 return V;
1898 }
1899
getCommutedVectorShuffle(const ShuffleVectorSDNode & SV)1900 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1901 EVT VT = SV.getValueType(0);
1902 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1903 ShuffleVectorSDNode::commuteMask(MaskVec);
1904
1905 SDValue Op0 = SV.getOperand(0);
1906 SDValue Op1 = SV.getOperand(1);
1907 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1908 }
1909
getRegister(unsigned RegNo,EVT VT)1910 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1911 FoldingSetNodeID ID;
1912 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1913 ID.AddInteger(RegNo);
1914 void *IP = nullptr;
1915 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1916 return SDValue(E, 0);
1917
1918 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1919 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
1920 CSEMap.InsertNode(N, IP);
1921 InsertNode(N);
1922 return SDValue(N, 0);
1923 }
1924
getRegisterMask(const uint32_t * RegMask)1925 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1926 FoldingSetNodeID ID;
1927 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1928 ID.AddPointer(RegMask);
1929 void *IP = nullptr;
1930 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1931 return SDValue(E, 0);
1932
1933 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1934 CSEMap.InsertNode(N, IP);
1935 InsertNode(N);
1936 return SDValue(N, 0);
1937 }
1938
getEHLabel(const SDLoc & dl,SDValue Root,MCSymbol * Label)1939 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1940 MCSymbol *Label) {
1941 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
1942 }
1943
getLabelNode(unsigned Opcode,const SDLoc & dl,SDValue Root,MCSymbol * Label)1944 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
1945 SDValue Root, MCSymbol *Label) {
1946 FoldingSetNodeID ID;
1947 SDValue Ops[] = { Root };
1948 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
1949 ID.AddPointer(Label);
1950 void *IP = nullptr;
1951 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1952 return SDValue(E, 0);
1953
1954 auto *N =
1955 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
1956 createOperands(N, Ops);
1957
1958 CSEMap.InsertNode(N, IP);
1959 InsertNode(N);
1960 return SDValue(N, 0);
1961 }
1962
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned TargetFlags)1963 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1964 int64_t Offset, bool isTarget,
1965 unsigned TargetFlags) {
1966 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1967
1968 FoldingSetNodeID ID;
1969 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1970 ID.AddPointer(BA);
1971 ID.AddInteger(Offset);
1972 ID.AddInteger(TargetFlags);
1973 void *IP = nullptr;
1974 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1975 return SDValue(E, 0);
1976
1977 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1978 CSEMap.InsertNode(N, IP);
1979 InsertNode(N);
1980 return SDValue(N, 0);
1981 }
1982
getSrcValue(const Value * V)1983 SDValue SelectionDAG::getSrcValue(const Value *V) {
1984 FoldingSetNodeID ID;
1985 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1986 ID.AddPointer(V);
1987
1988 void *IP = nullptr;
1989 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1990 return SDValue(E, 0);
1991
1992 auto *N = newSDNode<SrcValueSDNode>(V);
1993 CSEMap.InsertNode(N, IP);
1994 InsertNode(N);
1995 return SDValue(N, 0);
1996 }
1997
getMDNode(const MDNode * MD)1998 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1999 FoldingSetNodeID ID;
2000 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
2001 ID.AddPointer(MD);
2002
2003 void *IP = nullptr;
2004 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2005 return SDValue(E, 0);
2006
2007 auto *N = newSDNode<MDNodeSDNode>(MD);
2008 CSEMap.InsertNode(N, IP);
2009 InsertNode(N);
2010 return SDValue(N, 0);
2011 }
2012
getBitcast(EVT VT,SDValue V)2013 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
2014 if (VT == V.getValueType())
2015 return V;
2016
2017 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2018 }
2019
getAddrSpaceCast(const SDLoc & dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)2020 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
2021 unsigned SrcAS, unsigned DestAS) {
2022 SDValue Ops[] = {Ptr};
2023 FoldingSetNodeID ID;
2024 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
2025 ID.AddInteger(SrcAS);
2026 ID.AddInteger(DestAS);
2027
2028 void *IP = nullptr;
2029 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2030 return SDValue(E, 0);
2031
2032 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2033 VT, SrcAS, DestAS);
2034 createOperands(N, Ops);
2035
2036 CSEMap.InsertNode(N, IP);
2037 InsertNode(N);
2038 return SDValue(N, 0);
2039 }
2040
getFreeze(SDValue V)2041 SDValue SelectionDAG::getFreeze(SDValue V) {
2042 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2043 }
2044
2045 /// getShiftAmountOperand - Return the specified value casted to
2046 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)2047 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
2048 EVT OpTy = Op.getValueType();
2049 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2050 if (OpTy == ShTy || OpTy.isVector()) return Op;
2051
2052 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2053 }
2054
expandVAArg(SDNode * Node)2055 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
2056 SDLoc dl(Node);
2057 const TargetLowering &TLI = getTargetLoweringInfo();
2058 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2059 EVT VT = Node->getValueType(0);
2060 SDValue Tmp1 = Node->getOperand(0);
2061 SDValue Tmp2 = Node->getOperand(1);
2062 const MaybeAlign MA(Node->getConstantOperandVal(3));
2063
2064 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2065 Tmp2, MachinePointerInfo(V));
2066 SDValue VAList = VAListLoad;
2067
2068 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2069 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2070 getConstant(MA->value() - 1, dl, VAList.getValueType()));
2071
2072 VAList =
2073 getNode(ISD::AND, dl, VAList.getValueType(), VAList,
2074 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2075 }
2076
2077 // Increment the pointer, VAList, to the next vaarg
2078 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2079 getConstant(getDataLayout().getTypeAllocSize(
2080 VT.getTypeForEVT(*getContext())),
2081 dl, VAList.getValueType()));
2082 // Store the incremented VAList to the legalized pointer
2083 Tmp1 =
2084 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2085 // Load the actual argument out of the pointer VAList
2086 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2087 }
2088
expandVACopy(SDNode * Node)2089 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
2090 SDLoc dl(Node);
2091 const TargetLowering &TLI = getTargetLoweringInfo();
2092 // This defaults to loading a pointer from the input and storing it to the
2093 // output, returning the chain.
2094 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2095 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2096 SDValue Tmp1 =
2097 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2098 Node->getOperand(2), MachinePointerInfo(VS));
2099 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2100 MachinePointerInfo(VD));
2101 }
2102
getReducedAlign(EVT VT,bool UseABI)2103 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) {
2104 const DataLayout &DL = getDataLayout();
2105 Type *Ty = VT.getTypeForEVT(*getContext());
2106 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2107
2108 if (TLI->isTypeLegal(VT) || !VT.isVector())
2109 return RedAlign;
2110
2111 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2112 const Align StackAlign = TFI->getStackAlign();
2113
2114 // See if we can choose a smaller ABI alignment in cases where it's an
2115 // illegal vector type that will get broken down.
2116 if (RedAlign > StackAlign) {
2117 EVT IntermediateVT;
2118 MVT RegisterVT;
2119 unsigned NumIntermediates;
2120 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2121 NumIntermediates, RegisterVT);
2122 Ty = IntermediateVT.getTypeForEVT(*getContext());
2123 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2124 if (RedAlign2 < RedAlign)
2125 RedAlign = RedAlign2;
2126 }
2127
2128 return RedAlign;
2129 }
2130
CreateStackTemporary(TypeSize Bytes,Align Alignment)2131 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) {
2132 MachineFrameInfo &MFI = MF->getFrameInfo();
2133 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2134 int StackID = 0;
2135 if (Bytes.isScalable())
2136 StackID = TFI->getStackIDForScalableVectors();
2137 // The stack id gives an indication of whether the object is scalable or
2138 // not, so it's safe to pass in the minimum size here.
2139 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
2140 false, nullptr, StackID);
2141 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2142 }
2143
CreateStackTemporary(EVT VT,unsigned minAlign)2144 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
2145 Type *Ty = VT.getTypeForEVT(*getContext());
2146 Align StackAlign =
2147 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2148 return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2149 }
2150
CreateStackTemporary(EVT VT1,EVT VT2)2151 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
2152 TypeSize VT1Size = VT1.getStoreSize();
2153 TypeSize VT2Size = VT2.getStoreSize();
2154 assert(VT1Size.isScalable() == VT2Size.isScalable() &&
2155 "Don't know how to choose the maximum size when creating a stack "
2156 "temporary");
2157 TypeSize Bytes =
2158 VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
2159
2160 Type *Ty1 = VT1.getTypeForEVT(*getContext());
2161 Type *Ty2 = VT2.getTypeForEVT(*getContext());
2162 const DataLayout &DL = getDataLayout();
2163 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2164 return CreateStackTemporary(Bytes, Align);
2165 }
2166
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,const SDLoc & dl)2167 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2168 ISD::CondCode Cond, const SDLoc &dl) {
2169 EVT OpVT = N1.getValueType();
2170
2171 // These setcc operations always fold.
2172 switch (Cond) {
2173 default: break;
2174 case ISD::SETFALSE:
2175 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2176 case ISD::SETTRUE:
2177 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2178
2179 case ISD::SETOEQ:
2180 case ISD::SETOGT:
2181 case ISD::SETOGE:
2182 case ISD::SETOLT:
2183 case ISD::SETOLE:
2184 case ISD::SETONE:
2185 case ISD::SETO:
2186 case ISD::SETUO:
2187 case ISD::SETUEQ:
2188 case ISD::SETUNE:
2189 assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2190 break;
2191 }
2192
2193 if (OpVT.isInteger()) {
2194 // For EQ and NE, we can always pick a value for the undef to make the
2195 // predicate pass or fail, so we can return undef.
2196 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2197 // icmp eq/ne X, undef -> undef.
2198 if ((N1.isUndef() || N2.isUndef()) &&
2199 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2200 return getUNDEF(VT);
2201
2202 // If both operands are undef, we can return undef for int comparison.
2203 // icmp undef, undef -> undef.
2204 if (N1.isUndef() && N2.isUndef())
2205 return getUNDEF(VT);
2206
2207 // icmp X, X -> true/false
2208 // icmp X, undef -> true/false because undef could be X.
2209 if (N1 == N2)
2210 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2211 }
2212
2213 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2214 const APInt &C2 = N2C->getAPIntValue();
2215 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2216 const APInt &C1 = N1C->getAPIntValue();
2217
2218 switch (Cond) {
2219 default: llvm_unreachable("Unknown integer setcc!");
2220 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT);
2221 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT);
2222 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
2223 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
2224 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
2225 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
2226 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
2227 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
2228 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
2229 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
2230 }
2231 }
2232 }
2233
2234 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2235 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2236
2237 if (N1CFP && N2CFP) {
2238 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2239 switch (Cond) {
2240 default: break;
2241 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2242 return getUNDEF(VT);
2243 LLVM_FALLTHROUGH;
2244 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2245 OpVT);
2246 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2247 return getUNDEF(VT);
2248 LLVM_FALLTHROUGH;
2249 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2250 R==APFloat::cmpLessThan, dl, VT,
2251 OpVT);
2252 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2253 return getUNDEF(VT);
2254 LLVM_FALLTHROUGH;
2255 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2256 OpVT);
2257 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2258 return getUNDEF(VT);
2259 LLVM_FALLTHROUGH;
2260 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2261 VT, OpVT);
2262 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2263 return getUNDEF(VT);
2264 LLVM_FALLTHROUGH;
2265 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2266 R==APFloat::cmpEqual, dl, VT,
2267 OpVT);
2268 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2269 return getUNDEF(VT);
2270 LLVM_FALLTHROUGH;
2271 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2272 R==APFloat::cmpEqual, dl, VT, OpVT);
2273 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2274 OpVT);
2275 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2276 OpVT);
2277 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2278 R==APFloat::cmpEqual, dl, VT,
2279 OpVT);
2280 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2281 OpVT);
2282 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2283 R==APFloat::cmpLessThan, dl, VT,
2284 OpVT);
2285 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2286 R==APFloat::cmpUnordered, dl, VT,
2287 OpVT);
2288 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2289 VT, OpVT);
2290 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2291 OpVT);
2292 }
2293 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2294 // Ensure that the constant occurs on the RHS.
2295 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2296 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2297 return SDValue();
2298 return getSetCC(dl, VT, N2, N1, SwappedCond);
2299 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2300 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2301 // If an operand is known to be a nan (or undef that could be a nan), we can
2302 // fold it.
2303 // Choosing NaN for the undef will always make unordered comparison succeed
2304 // and ordered comparison fails.
2305 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2306 switch (ISD::getUnorderedFlavor(Cond)) {
2307 default:
2308 llvm_unreachable("Unknown flavor!");
2309 case 0: // Known false.
2310 return getBoolConstant(false, dl, VT, OpVT);
2311 case 1: // Known true.
2312 return getBoolConstant(true, dl, VT, OpVT);
2313 case 2: // Undefined.
2314 return getUNDEF(VT);
2315 }
2316 }
2317
2318 // Could not fold it.
2319 return SDValue();
2320 }
2321
2322 /// See if the specified operand can be simplified with the knowledge that only
2323 /// the bits specified by DemandedBits are used.
2324 /// TODO: really we should be making this into the DAG equivalent of
2325 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits)2326 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2327 EVT VT = V.getValueType();
2328
2329 if (VT.isScalableVector())
2330 return SDValue();
2331
2332 APInt DemandedElts = VT.isVector()
2333 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2334 : APInt(1, 1);
2335 return GetDemandedBits(V, DemandedBits, DemandedElts);
2336 }
2337
2338 /// See if the specified operand can be simplified with the knowledge that only
2339 /// the bits specified by DemandedBits are used in the elements specified by
2340 /// DemandedElts.
2341 /// TODO: really we should be making this into the DAG equivalent of
2342 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits,const APInt & DemandedElts)2343 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2344 const APInt &DemandedElts) {
2345 switch (V.getOpcode()) {
2346 default:
2347 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2348 *this, 0);
2349 case ISD::Constant: {
2350 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue();
2351 APInt NewVal = CVal & DemandedBits;
2352 if (NewVal != CVal)
2353 return getConstant(NewVal, SDLoc(V), V.getValueType());
2354 break;
2355 }
2356 case ISD::SRL:
2357 // Only look at single-use SRLs.
2358 if (!V.getNode()->hasOneUse())
2359 break;
2360 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2361 // See if we can recursively simplify the LHS.
2362 unsigned Amt = RHSC->getZExtValue();
2363
2364 // Watch out for shift count overflow though.
2365 if (Amt >= DemandedBits.getBitWidth())
2366 break;
2367 APInt SrcDemandedBits = DemandedBits << Amt;
2368 if (SDValue SimplifyLHS =
2369 GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2370 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2371 V.getOperand(1));
2372 }
2373 break;
2374 }
2375 return SDValue();
2376 }
2377
2378 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2379 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const2380 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2381 unsigned BitWidth = Op.getScalarValueSizeInBits();
2382 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2383 }
2384
2385 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2386 /// this predicate to simplify operations downstream. Mask is known to be zero
2387 /// for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,unsigned Depth) const2388 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2389 unsigned Depth) const {
2390 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2391 }
2392
2393 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2394 /// DemandedElts. We use this predicate to simplify operations downstream.
2395 /// Mask is known to be zero for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,const APInt & DemandedElts,unsigned Depth) const2396 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2397 const APInt &DemandedElts,
2398 unsigned Depth) const {
2399 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2400 }
2401
2402 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
MaskedValueIsAllOnes(SDValue V,const APInt & Mask,unsigned Depth) const2403 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2404 unsigned Depth) const {
2405 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2406 }
2407
2408 /// isSplatValue - Return true if the vector V has the same value
2409 /// across all DemandedElts. For scalable vectors it does not make
2410 /// sense to specify which elements are demanded or undefined, therefore
2411 /// they are simply ignored.
isSplatValue(SDValue V,const APInt & DemandedElts,APInt & UndefElts,unsigned Depth)2412 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2413 APInt &UndefElts, unsigned Depth) {
2414 EVT VT = V.getValueType();
2415 assert(VT.isVector() && "Vector type expected");
2416
2417 if (!VT.isScalableVector() && !DemandedElts)
2418 return false; // No demanded elts, better to assume we don't know anything.
2419
2420 if (Depth >= MaxRecursionDepth)
2421 return false; // Limit search depth.
2422
2423 // Deal with some common cases here that work for both fixed and scalable
2424 // vector types.
2425 switch (V.getOpcode()) {
2426 case ISD::SPLAT_VECTOR:
2427 UndefElts = V.getOperand(0).isUndef()
2428 ? APInt::getAllOnesValue(DemandedElts.getBitWidth())
2429 : APInt(DemandedElts.getBitWidth(), 0);
2430 return true;
2431 case ISD::ADD:
2432 case ISD::SUB:
2433 case ISD::AND: {
2434 APInt UndefLHS, UndefRHS;
2435 SDValue LHS = V.getOperand(0);
2436 SDValue RHS = V.getOperand(1);
2437 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
2438 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
2439 UndefElts = UndefLHS | UndefRHS;
2440 return true;
2441 }
2442 break;
2443 }
2444 case ISD::TRUNCATE:
2445 case ISD::SIGN_EXTEND:
2446 case ISD::ZERO_EXTEND:
2447 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
2448 }
2449
2450 // We don't support other cases than those above for scalable vectors at
2451 // the moment.
2452 if (VT.isScalableVector())
2453 return false;
2454
2455 unsigned NumElts = VT.getVectorNumElements();
2456 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2457 UndefElts = APInt::getNullValue(NumElts);
2458
2459 switch (V.getOpcode()) {
2460 case ISD::BUILD_VECTOR: {
2461 SDValue Scl;
2462 for (unsigned i = 0; i != NumElts; ++i) {
2463 SDValue Op = V.getOperand(i);
2464 if (Op.isUndef()) {
2465 UndefElts.setBit(i);
2466 continue;
2467 }
2468 if (!DemandedElts[i])
2469 continue;
2470 if (Scl && Scl != Op)
2471 return false;
2472 Scl = Op;
2473 }
2474 return true;
2475 }
2476 case ISD::VECTOR_SHUFFLE: {
2477 // Check if this is a shuffle node doing a splat.
2478 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2479 int SplatIndex = -1;
2480 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2481 for (int i = 0; i != (int)NumElts; ++i) {
2482 int M = Mask[i];
2483 if (M < 0) {
2484 UndefElts.setBit(i);
2485 continue;
2486 }
2487 if (!DemandedElts[i])
2488 continue;
2489 if (0 <= SplatIndex && SplatIndex != M)
2490 return false;
2491 SplatIndex = M;
2492 }
2493 return true;
2494 }
2495 case ISD::EXTRACT_SUBVECTOR: {
2496 // Offset the demanded elts by the subvector index.
2497 SDValue Src = V.getOperand(0);
2498 uint64_t Idx = V.getConstantOperandVal(1);
2499 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2500 APInt UndefSrcElts;
2501 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2502 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2503 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2504 return true;
2505 }
2506 break;
2507 }
2508 }
2509
2510 return false;
2511 }
2512
2513 /// Helper wrapper to main isSplatValue function.
isSplatValue(SDValue V,bool AllowUndefs)2514 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
2515 EVT VT = V.getValueType();
2516 assert(VT.isVector() && "Vector type expected");
2517
2518 APInt UndefElts;
2519 APInt DemandedElts;
2520
2521 // For now we don't support this with scalable vectors.
2522 if (!VT.isScalableVector())
2523 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2524 return isSplatValue(V, DemandedElts, UndefElts) &&
2525 (AllowUndefs || !UndefElts);
2526 }
2527
getSplatSourceVector(SDValue V,int & SplatIdx)2528 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2529 V = peekThroughExtractSubvectors(V);
2530
2531 EVT VT = V.getValueType();
2532 unsigned Opcode = V.getOpcode();
2533 switch (Opcode) {
2534 default: {
2535 APInt UndefElts;
2536 APInt DemandedElts;
2537
2538 if (!VT.isScalableVector())
2539 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2540
2541 if (isSplatValue(V, DemandedElts, UndefElts)) {
2542 if (VT.isScalableVector()) {
2543 // DemandedElts and UndefElts are ignored for scalable vectors, since
2544 // the only supported cases are SPLAT_VECTOR nodes.
2545 SplatIdx = 0;
2546 } else {
2547 // Handle case where all demanded elements are UNDEF.
2548 if (DemandedElts.isSubsetOf(UndefElts)) {
2549 SplatIdx = 0;
2550 return getUNDEF(VT);
2551 }
2552 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2553 }
2554 return V;
2555 }
2556 break;
2557 }
2558 case ISD::SPLAT_VECTOR:
2559 SplatIdx = 0;
2560 return V;
2561 case ISD::VECTOR_SHUFFLE: {
2562 if (VT.isScalableVector())
2563 return SDValue();
2564
2565 // Check if this is a shuffle node doing a splat.
2566 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2567 // getTargetVShiftNode currently struggles without the splat source.
2568 auto *SVN = cast<ShuffleVectorSDNode>(V);
2569 if (!SVN->isSplat())
2570 break;
2571 int Idx = SVN->getSplatIndex();
2572 int NumElts = V.getValueType().getVectorNumElements();
2573 SplatIdx = Idx % NumElts;
2574 return V.getOperand(Idx / NumElts);
2575 }
2576 }
2577
2578 return SDValue();
2579 }
2580
getSplatValue(SDValue V)2581 SDValue SelectionDAG::getSplatValue(SDValue V) {
2582 int SplatIdx;
2583 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx))
2584 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V),
2585 SrcVector.getValueType().getScalarType(), SrcVector,
2586 getVectorIdxConstant(SplatIdx, SDLoc(V)));
2587 return SDValue();
2588 }
2589
2590 const APInt *
getValidShiftAmountConstant(SDValue V,const APInt & DemandedElts) const2591 SelectionDAG::getValidShiftAmountConstant(SDValue V,
2592 const APInt &DemandedElts) const {
2593 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2594 V.getOpcode() == ISD::SRA) &&
2595 "Unknown shift node");
2596 unsigned BitWidth = V.getScalarValueSizeInBits();
2597 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) {
2598 // Shifting more than the bitwidth is not valid.
2599 const APInt &ShAmt = SA->getAPIntValue();
2600 if (ShAmt.ult(BitWidth))
2601 return &ShAmt;
2602 }
2603 return nullptr;
2604 }
2605
getValidMinimumShiftAmountConstant(SDValue V,const APInt & DemandedElts) const2606 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant(
2607 SDValue V, const APInt &DemandedElts) const {
2608 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2609 V.getOpcode() == ISD::SRA) &&
2610 "Unknown shift node");
2611 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2612 return ValidAmt;
2613 unsigned BitWidth = V.getScalarValueSizeInBits();
2614 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2615 if (!BV)
2616 return nullptr;
2617 const APInt *MinShAmt = nullptr;
2618 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2619 if (!DemandedElts[i])
2620 continue;
2621 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2622 if (!SA)
2623 return nullptr;
2624 // Shifting more than the bitwidth is not valid.
2625 const APInt &ShAmt = SA->getAPIntValue();
2626 if (ShAmt.uge(BitWidth))
2627 return nullptr;
2628 if (MinShAmt && MinShAmt->ule(ShAmt))
2629 continue;
2630 MinShAmt = &ShAmt;
2631 }
2632 return MinShAmt;
2633 }
2634
getValidMaximumShiftAmountConstant(SDValue V,const APInt & DemandedElts) const2635 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant(
2636 SDValue V, const APInt &DemandedElts) const {
2637 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2638 V.getOpcode() == ISD::SRA) &&
2639 "Unknown shift node");
2640 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2641 return ValidAmt;
2642 unsigned BitWidth = V.getScalarValueSizeInBits();
2643 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2644 if (!BV)
2645 return nullptr;
2646 const APInt *MaxShAmt = nullptr;
2647 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2648 if (!DemandedElts[i])
2649 continue;
2650 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2651 if (!SA)
2652 return nullptr;
2653 // Shifting more than the bitwidth is not valid.
2654 const APInt &ShAmt = SA->getAPIntValue();
2655 if (ShAmt.uge(BitWidth))
2656 return nullptr;
2657 if (MaxShAmt && MaxShAmt->uge(ShAmt))
2658 continue;
2659 MaxShAmt = &ShAmt;
2660 }
2661 return MaxShAmt;
2662 }
2663
2664 /// Determine which bits of Op are known to be either zero or one and return
2665 /// them in Known. For vectors, the known bits are those that are shared by
2666 /// every vector element.
computeKnownBits(SDValue Op,unsigned Depth) const2667 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2668 EVT VT = Op.getValueType();
2669
2670 // TOOD: Until we have a plan for how to represent demanded elements for
2671 // scalable vectors, we can just bail out for now.
2672 if (Op.getValueType().isScalableVector()) {
2673 unsigned BitWidth = Op.getScalarValueSizeInBits();
2674 return KnownBits(BitWidth);
2675 }
2676
2677 APInt DemandedElts = VT.isVector()
2678 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2679 : APInt(1, 1);
2680 return computeKnownBits(Op, DemandedElts, Depth);
2681 }
2682
2683 /// Determine which bits of Op are known to be either zero or one and return
2684 /// them in Known. The DemandedElts argument allows us to only collect the known
2685 /// bits that are shared by the requested vector elements.
computeKnownBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const2686 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2687 unsigned Depth) const {
2688 unsigned BitWidth = Op.getScalarValueSizeInBits();
2689
2690 KnownBits Known(BitWidth); // Don't know anything.
2691
2692 // TOOD: Until we have a plan for how to represent demanded elements for
2693 // scalable vectors, we can just bail out for now.
2694 if (Op.getValueType().isScalableVector())
2695 return Known;
2696
2697 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2698 // We know all of the bits for a constant!
2699 return KnownBits::makeConstant(C->getAPIntValue());
2700 }
2701 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2702 // We know all of the bits for a constant fp!
2703 return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
2704 }
2705
2706 if (Depth >= MaxRecursionDepth)
2707 return Known; // Limit search depth.
2708
2709 KnownBits Known2;
2710 unsigned NumElts = DemandedElts.getBitWidth();
2711 assert((!Op.getValueType().isVector() ||
2712 NumElts == Op.getValueType().getVectorNumElements()) &&
2713 "Unexpected vector size");
2714
2715 if (!DemandedElts)
2716 return Known; // No demanded elts, better to assume we don't know anything.
2717
2718 unsigned Opcode = Op.getOpcode();
2719 switch (Opcode) {
2720 case ISD::BUILD_VECTOR:
2721 // Collect the known bits that are shared by every demanded vector element.
2722 Known.Zero.setAllBits(); Known.One.setAllBits();
2723 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2724 if (!DemandedElts[i])
2725 continue;
2726
2727 SDValue SrcOp = Op.getOperand(i);
2728 Known2 = computeKnownBits(SrcOp, Depth + 1);
2729
2730 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2731 if (SrcOp.getValueSizeInBits() != BitWidth) {
2732 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2733 "Expected BUILD_VECTOR implicit truncation");
2734 Known2 = Known2.trunc(BitWidth);
2735 }
2736
2737 // Known bits are the values that are shared by every demanded element.
2738 Known = KnownBits::commonBits(Known, Known2);
2739
2740 // If we don't know any bits, early out.
2741 if (Known.isUnknown())
2742 break;
2743 }
2744 break;
2745 case ISD::VECTOR_SHUFFLE: {
2746 // Collect the known bits that are shared by every vector element referenced
2747 // by the shuffle.
2748 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2749 Known.Zero.setAllBits(); Known.One.setAllBits();
2750 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2751 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2752 for (unsigned i = 0; i != NumElts; ++i) {
2753 if (!DemandedElts[i])
2754 continue;
2755
2756 int M = SVN->getMaskElt(i);
2757 if (M < 0) {
2758 // For UNDEF elements, we don't know anything about the common state of
2759 // the shuffle result.
2760 Known.resetAll();
2761 DemandedLHS.clearAllBits();
2762 DemandedRHS.clearAllBits();
2763 break;
2764 }
2765
2766 if ((unsigned)M < NumElts)
2767 DemandedLHS.setBit((unsigned)M % NumElts);
2768 else
2769 DemandedRHS.setBit((unsigned)M % NumElts);
2770 }
2771 // Known bits are the values that are shared by every demanded element.
2772 if (!!DemandedLHS) {
2773 SDValue LHS = Op.getOperand(0);
2774 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2775 Known = KnownBits::commonBits(Known, Known2);
2776 }
2777 // If we don't know any bits, early out.
2778 if (Known.isUnknown())
2779 break;
2780 if (!!DemandedRHS) {
2781 SDValue RHS = Op.getOperand(1);
2782 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2783 Known = KnownBits::commonBits(Known, Known2);
2784 }
2785 break;
2786 }
2787 case ISD::CONCAT_VECTORS: {
2788 // Split DemandedElts and test each of the demanded subvectors.
2789 Known.Zero.setAllBits(); Known.One.setAllBits();
2790 EVT SubVectorVT = Op.getOperand(0).getValueType();
2791 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2792 unsigned NumSubVectors = Op.getNumOperands();
2793 for (unsigned i = 0; i != NumSubVectors; ++i) {
2794 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2795 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2796 if (!!DemandedSub) {
2797 SDValue Sub = Op.getOperand(i);
2798 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2799 Known = KnownBits::commonBits(Known, Known2);
2800 }
2801 // If we don't know any bits, early out.
2802 if (Known.isUnknown())
2803 break;
2804 }
2805 break;
2806 }
2807 case ISD::INSERT_SUBVECTOR: {
2808 // Demand any elements from the subvector and the remainder from the src its
2809 // inserted into.
2810 SDValue Src = Op.getOperand(0);
2811 SDValue Sub = Op.getOperand(1);
2812 uint64_t Idx = Op.getConstantOperandVal(2);
2813 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2814 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2815 APInt DemandedSrcElts = DemandedElts;
2816 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
2817
2818 Known.One.setAllBits();
2819 Known.Zero.setAllBits();
2820 if (!!DemandedSubElts) {
2821 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2822 if (Known.isUnknown())
2823 break; // early-out.
2824 }
2825 if (!!DemandedSrcElts) {
2826 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2827 Known = KnownBits::commonBits(Known, Known2);
2828 }
2829 break;
2830 }
2831 case ISD::EXTRACT_SUBVECTOR: {
2832 // Offset the demanded elts by the subvector index.
2833 SDValue Src = Op.getOperand(0);
2834 // Bail until we can represent demanded elements for scalable vectors.
2835 if (Src.getValueType().isScalableVector())
2836 break;
2837 uint64_t Idx = Op.getConstantOperandVal(1);
2838 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2839 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2840 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2841 break;
2842 }
2843 case ISD::SCALAR_TO_VECTOR: {
2844 // We know about scalar_to_vector as much as we know about it source,
2845 // which becomes the first element of otherwise unknown vector.
2846 if (DemandedElts != 1)
2847 break;
2848
2849 SDValue N0 = Op.getOperand(0);
2850 Known = computeKnownBits(N0, Depth + 1);
2851 if (N0.getValueSizeInBits() != BitWidth)
2852 Known = Known.trunc(BitWidth);
2853
2854 break;
2855 }
2856 case ISD::BITCAST: {
2857 SDValue N0 = Op.getOperand(0);
2858 EVT SubVT = N0.getValueType();
2859 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2860
2861 // Ignore bitcasts from unsupported types.
2862 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2863 break;
2864
2865 // Fast handling of 'identity' bitcasts.
2866 if (BitWidth == SubBitWidth) {
2867 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2868 break;
2869 }
2870
2871 bool IsLE = getDataLayout().isLittleEndian();
2872
2873 // Bitcast 'small element' vector to 'large element' scalar/vector.
2874 if ((BitWidth % SubBitWidth) == 0) {
2875 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2876
2877 // Collect known bits for the (larger) output by collecting the known
2878 // bits from each set of sub elements and shift these into place.
2879 // We need to separately call computeKnownBits for each set of
2880 // sub elements as the knownbits for each is likely to be different.
2881 unsigned SubScale = BitWidth / SubBitWidth;
2882 APInt SubDemandedElts(NumElts * SubScale, 0);
2883 for (unsigned i = 0; i != NumElts; ++i)
2884 if (DemandedElts[i])
2885 SubDemandedElts.setBit(i * SubScale);
2886
2887 for (unsigned i = 0; i != SubScale; ++i) {
2888 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
2889 Depth + 1);
2890 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
2891 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts);
2892 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts);
2893 }
2894 }
2895
2896 // Bitcast 'large element' scalar/vector to 'small element' vector.
2897 if ((SubBitWidth % BitWidth) == 0) {
2898 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2899
2900 // Collect known bits for the (smaller) output by collecting the known
2901 // bits from the overlapping larger input elements and extracting the
2902 // sub sections we actually care about.
2903 unsigned SubScale = SubBitWidth / BitWidth;
2904 APInt SubDemandedElts(NumElts / SubScale, 0);
2905 for (unsigned i = 0; i != NumElts; ++i)
2906 if (DemandedElts[i])
2907 SubDemandedElts.setBit(i / SubScale);
2908
2909 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
2910
2911 Known.Zero.setAllBits(); Known.One.setAllBits();
2912 for (unsigned i = 0; i != NumElts; ++i)
2913 if (DemandedElts[i]) {
2914 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
2915 unsigned Offset = (Shifts % SubScale) * BitWidth;
2916 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2917 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
2918 // If we don't know any bits, early out.
2919 if (Known.isUnknown())
2920 break;
2921 }
2922 }
2923 break;
2924 }
2925 case ISD::AND:
2926 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2927 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2928
2929 Known &= Known2;
2930 break;
2931 case ISD::OR:
2932 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2933 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2934
2935 Known |= Known2;
2936 break;
2937 case ISD::XOR:
2938 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2939 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2940
2941 Known ^= Known2;
2942 break;
2943 case ISD::MUL: {
2944 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2945 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2946 Known = KnownBits::computeForMul(Known, Known2);
2947 break;
2948 }
2949 case ISD::UDIV: {
2950 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2951 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2952 Known = KnownBits::udiv(Known, Known2);
2953 break;
2954 }
2955 case ISD::SELECT:
2956 case ISD::VSELECT:
2957 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2958 // If we don't know any bits, early out.
2959 if (Known.isUnknown())
2960 break;
2961 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
2962
2963 // Only known if known in both the LHS and RHS.
2964 Known = KnownBits::commonBits(Known, Known2);
2965 break;
2966 case ISD::SELECT_CC:
2967 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
2968 // If we don't know any bits, early out.
2969 if (Known.isUnknown())
2970 break;
2971 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2972
2973 // Only known if known in both the LHS and RHS.
2974 Known = KnownBits::commonBits(Known, Known2);
2975 break;
2976 case ISD::SMULO:
2977 case ISD::UMULO:
2978 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
2979 if (Op.getResNo() != 1)
2980 break;
2981 // The boolean result conforms to getBooleanContents.
2982 // If we know the result of a setcc has the top bits zero, use this info.
2983 // We know that we have an integer-based boolean since these operations
2984 // are only available for integer.
2985 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2986 TargetLowering::ZeroOrOneBooleanContent &&
2987 BitWidth > 1)
2988 Known.Zero.setBitsFrom(1);
2989 break;
2990 case ISD::SETCC:
2991 case ISD::STRICT_FSETCC:
2992 case ISD::STRICT_FSETCCS: {
2993 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
2994 // If we know the result of a setcc has the top bits zero, use this info.
2995 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
2996 TargetLowering::ZeroOrOneBooleanContent &&
2997 BitWidth > 1)
2998 Known.Zero.setBitsFrom(1);
2999 break;
3000 }
3001 case ISD::SHL:
3002 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3003 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3004 Known = KnownBits::shl(Known, Known2);
3005
3006 // Minimum shift low bits are known zero.
3007 if (const APInt *ShMinAmt =
3008 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3009 Known.Zero.setLowBits(ShMinAmt->getZExtValue());
3010 break;
3011 case ISD::SRL:
3012 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3013 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3014 Known = KnownBits::lshr(Known, Known2);
3015
3016 // Minimum shift high bits are known zero.
3017 if (const APInt *ShMinAmt =
3018 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3019 Known.Zero.setHighBits(ShMinAmt->getZExtValue());
3020 break;
3021 case ISD::SRA:
3022 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3023 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3024 Known = KnownBits::ashr(Known, Known2);
3025 // TODO: Add minimum shift high known sign bits.
3026 break;
3027 case ISD::FSHL:
3028 case ISD::FSHR:
3029 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3030 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3031
3032 // For fshl, 0-shift returns the 1st arg.
3033 // For fshr, 0-shift returns the 2nd arg.
3034 if (Amt == 0) {
3035 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3036 DemandedElts, Depth + 1);
3037 break;
3038 }
3039
3040 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3041 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3042 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3043 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3044 if (Opcode == ISD::FSHL) {
3045 Known.One <<= Amt;
3046 Known.Zero <<= Amt;
3047 Known2.One.lshrInPlace(BitWidth - Amt);
3048 Known2.Zero.lshrInPlace(BitWidth - Amt);
3049 } else {
3050 Known.One <<= BitWidth - Amt;
3051 Known.Zero <<= BitWidth - Amt;
3052 Known2.One.lshrInPlace(Amt);
3053 Known2.Zero.lshrInPlace(Amt);
3054 }
3055 Known.One |= Known2.One;
3056 Known.Zero |= Known2.Zero;
3057 }
3058 break;
3059 case ISD::SIGN_EXTEND_INREG: {
3060 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3061 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3062 Known = Known.sextInReg(EVT.getScalarSizeInBits());
3063 break;
3064 }
3065 case ISD::CTTZ:
3066 case ISD::CTTZ_ZERO_UNDEF: {
3067 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3068 // If we have a known 1, its position is our upper bound.
3069 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3070 unsigned LowBits = Log2_32(PossibleTZ) + 1;
3071 Known.Zero.setBitsFrom(LowBits);
3072 break;
3073 }
3074 case ISD::CTLZ:
3075 case ISD::CTLZ_ZERO_UNDEF: {
3076 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3077 // If we have a known 1, its position is our upper bound.
3078 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3079 unsigned LowBits = Log2_32(PossibleLZ) + 1;
3080 Known.Zero.setBitsFrom(LowBits);
3081 break;
3082 }
3083 case ISD::CTPOP: {
3084 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3085 // If we know some of the bits are zero, they can't be one.
3086 unsigned PossibleOnes = Known2.countMaxPopulation();
3087 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
3088 break;
3089 }
3090 case ISD::PARITY: {
3091 // Parity returns 0 everywhere but the LSB.
3092 Known.Zero.setBitsFrom(1);
3093 break;
3094 }
3095 case ISD::LOAD: {
3096 LoadSDNode *LD = cast<LoadSDNode>(Op);
3097 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3098 if (ISD::isNON_EXTLoad(LD) && Cst) {
3099 // Determine any common known bits from the loaded constant pool value.
3100 Type *CstTy = Cst->getType();
3101 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
3102 // If its a vector splat, then we can (quickly) reuse the scalar path.
3103 // NOTE: We assume all elements match and none are UNDEF.
3104 if (CstTy->isVectorTy()) {
3105 if (const Constant *Splat = Cst->getSplatValue()) {
3106 Cst = Splat;
3107 CstTy = Cst->getType();
3108 }
3109 }
3110 // TODO - do we need to handle different bitwidths?
3111 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3112 // Iterate across all vector elements finding common known bits.
3113 Known.One.setAllBits();
3114 Known.Zero.setAllBits();
3115 for (unsigned i = 0; i != NumElts; ++i) {
3116 if (!DemandedElts[i])
3117 continue;
3118 if (Constant *Elt = Cst->getAggregateElement(i)) {
3119 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3120 const APInt &Value = CInt->getValue();
3121 Known.One &= Value;
3122 Known.Zero &= ~Value;
3123 continue;
3124 }
3125 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3126 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3127 Known.One &= Value;
3128 Known.Zero &= ~Value;
3129 continue;
3130 }
3131 }
3132 Known.One.clearAllBits();
3133 Known.Zero.clearAllBits();
3134 break;
3135 }
3136 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3137 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3138 Known = KnownBits::makeConstant(CInt->getValue());
3139 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3140 Known =
3141 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
3142 }
3143 }
3144 }
3145 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
3146 // If this is a ZEXTLoad and we are looking at the loaded value.
3147 EVT VT = LD->getMemoryVT();
3148 unsigned MemBits = VT.getScalarSizeInBits();
3149 Known.Zero.setBitsFrom(MemBits);
3150 } else if (const MDNode *Ranges = LD->getRanges()) {
3151 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
3152 computeKnownBitsFromRangeMetadata(*Ranges, Known);
3153 }
3154 break;
3155 }
3156 case ISD::ZERO_EXTEND_VECTOR_INREG: {
3157 EVT InVT = Op.getOperand(0).getValueType();
3158 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3159 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3160 Known = Known.zext(BitWidth);
3161 break;
3162 }
3163 case ISD::ZERO_EXTEND: {
3164 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3165 Known = Known.zext(BitWidth);
3166 break;
3167 }
3168 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3169 EVT InVT = Op.getOperand(0).getValueType();
3170 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3171 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3172 // If the sign bit is known to be zero or one, then sext will extend
3173 // it to the top bits, else it will just zext.
3174 Known = Known.sext(BitWidth);
3175 break;
3176 }
3177 case ISD::SIGN_EXTEND: {
3178 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3179 // If the sign bit is known to be zero or one, then sext will extend
3180 // it to the top bits, else it will just zext.
3181 Known = Known.sext(BitWidth);
3182 break;
3183 }
3184 case ISD::ANY_EXTEND_VECTOR_INREG: {
3185 EVT InVT = Op.getOperand(0).getValueType();
3186 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3187 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3188 Known = Known.anyext(BitWidth);
3189 break;
3190 }
3191 case ISD::ANY_EXTEND: {
3192 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3193 Known = Known.anyext(BitWidth);
3194 break;
3195 }
3196 case ISD::TRUNCATE: {
3197 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3198 Known = Known.trunc(BitWidth);
3199 break;
3200 }
3201 case ISD::AssertZext: {
3202 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3203 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3204 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3205 Known.Zero |= (~InMask);
3206 Known.One &= (~Known.Zero);
3207 break;
3208 }
3209 case ISD::AssertAlign: {
3210 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
3211 assert(LogOfAlign != 0);
3212 // If a node is guaranteed to be aligned, set low zero bits accordingly as
3213 // well as clearing one bits.
3214 Known.Zero.setLowBits(LogOfAlign);
3215 Known.One.clearLowBits(LogOfAlign);
3216 break;
3217 }
3218 case ISD::FGETSIGN:
3219 // All bits are zero except the low bit.
3220 Known.Zero.setBitsFrom(1);
3221 break;
3222 case ISD::USUBO:
3223 case ISD::SSUBO:
3224 if (Op.getResNo() == 1) {
3225 // If we know the result of a setcc has the top bits zero, use this info.
3226 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3227 TargetLowering::ZeroOrOneBooleanContent &&
3228 BitWidth > 1)
3229 Known.Zero.setBitsFrom(1);
3230 break;
3231 }
3232 LLVM_FALLTHROUGH;
3233 case ISD::SUB:
3234 case ISD::SUBC: {
3235 assert(Op.getResNo() == 0 &&
3236 "We only compute knownbits for the difference here.");
3237
3238 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3239 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3240 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3241 Known, Known2);
3242 break;
3243 }
3244 case ISD::UADDO:
3245 case ISD::SADDO:
3246 case ISD::ADDCARRY:
3247 if (Op.getResNo() == 1) {
3248 // If we know the result of a setcc has the top bits zero, use this info.
3249 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3250 TargetLowering::ZeroOrOneBooleanContent &&
3251 BitWidth > 1)
3252 Known.Zero.setBitsFrom(1);
3253 break;
3254 }
3255 LLVM_FALLTHROUGH;
3256 case ISD::ADD:
3257 case ISD::ADDC:
3258 case ISD::ADDE: {
3259 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
3260
3261 // With ADDE and ADDCARRY, a carry bit may be added in.
3262 KnownBits Carry(1);
3263 if (Opcode == ISD::ADDE)
3264 // Can't track carry from glue, set carry to unknown.
3265 Carry.resetAll();
3266 else if (Opcode == ISD::ADDCARRY)
3267 // TODO: Compute known bits for the carry operand. Not sure if it is worth
3268 // the trouble (how often will we find a known carry bit). And I haven't
3269 // tested this very much yet, but something like this might work:
3270 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3271 // Carry = Carry.zextOrTrunc(1, false);
3272 Carry.resetAll();
3273 else
3274 Carry.setAllZero();
3275
3276 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3277 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3278 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3279 break;
3280 }
3281 case ISD::SREM: {
3282 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3283 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3284 Known = KnownBits::srem(Known, Known2);
3285 break;
3286 }
3287 case ISD::UREM: {
3288 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3289 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3290 Known = KnownBits::urem(Known, Known2);
3291 break;
3292 }
3293 case ISD::EXTRACT_ELEMENT: {
3294 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3295 const unsigned Index = Op.getConstantOperandVal(1);
3296 const unsigned EltBitWidth = Op.getValueSizeInBits();
3297
3298 // Remove low part of known bits mask
3299 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3300 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3301
3302 // Remove high part of known bit mask
3303 Known = Known.trunc(EltBitWidth);
3304 break;
3305 }
3306 case ISD::EXTRACT_VECTOR_ELT: {
3307 SDValue InVec = Op.getOperand(0);
3308 SDValue EltNo = Op.getOperand(1);
3309 EVT VecVT = InVec.getValueType();
3310 // computeKnownBits not yet implemented for scalable vectors.
3311 if (VecVT.isScalableVector())
3312 break;
3313 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3314 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3315
3316 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3317 // anything about the extended bits.
3318 if (BitWidth > EltBitWidth)
3319 Known = Known.trunc(EltBitWidth);
3320
3321 // If we know the element index, just demand that vector element, else for
3322 // an unknown element index, ignore DemandedElts and demand them all.
3323 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3324 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3325 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3326 DemandedSrcElts =
3327 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3328
3329 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
3330 if (BitWidth > EltBitWidth)
3331 Known = Known.anyext(BitWidth);
3332 break;
3333 }
3334 case ISD::INSERT_VECTOR_ELT: {
3335 // If we know the element index, split the demand between the
3336 // source vector and the inserted element, otherwise assume we need
3337 // the original demanded vector elements and the value.
3338 SDValue InVec = Op.getOperand(0);
3339 SDValue InVal = Op.getOperand(1);
3340 SDValue EltNo = Op.getOperand(2);
3341 bool DemandedVal = true;
3342 APInt DemandedVecElts = DemandedElts;
3343 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3344 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3345 unsigned EltIdx = CEltNo->getZExtValue();
3346 DemandedVal = !!DemandedElts[EltIdx];
3347 DemandedVecElts.clearBit(EltIdx);
3348 }
3349 Known.One.setAllBits();
3350 Known.Zero.setAllBits();
3351 if (DemandedVal) {
3352 Known2 = computeKnownBits(InVal, Depth + 1);
3353 Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth));
3354 }
3355 if (!!DemandedVecElts) {
3356 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
3357 Known = KnownBits::commonBits(Known, Known2);
3358 }
3359 break;
3360 }
3361 case ISD::BITREVERSE: {
3362 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3363 Known = Known2.reverseBits();
3364 break;
3365 }
3366 case ISD::BSWAP: {
3367 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3368 Known = Known2.byteSwap();
3369 break;
3370 }
3371 case ISD::ABS: {
3372 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3373 Known = Known2.abs();
3374 break;
3375 }
3376 case ISD::UMIN: {
3377 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3378 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3379 Known = KnownBits::umin(Known, Known2);
3380 break;
3381 }
3382 case ISD::UMAX: {
3383 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3384 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3385 Known = KnownBits::umax(Known, Known2);
3386 break;
3387 }
3388 case ISD::SMIN:
3389 case ISD::SMAX: {
3390 // If we have a clamp pattern, we know that the number of sign bits will be
3391 // the minimum of the clamp min/max range.
3392 bool IsMax = (Opcode == ISD::SMAX);
3393 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3394 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3395 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3396 CstHigh =
3397 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3398 if (CstLow && CstHigh) {
3399 if (!IsMax)
3400 std::swap(CstLow, CstHigh);
3401
3402 const APInt &ValueLow = CstLow->getAPIntValue();
3403 const APInt &ValueHigh = CstHigh->getAPIntValue();
3404 if (ValueLow.sle(ValueHigh)) {
3405 unsigned LowSignBits = ValueLow.getNumSignBits();
3406 unsigned HighSignBits = ValueHigh.getNumSignBits();
3407 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3408 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3409 Known.One.setHighBits(MinSignBits);
3410 break;
3411 }
3412 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3413 Known.Zero.setHighBits(MinSignBits);
3414 break;
3415 }
3416 }
3417 }
3418
3419 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3420 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3421 if (IsMax)
3422 Known = KnownBits::smax(Known, Known2);
3423 else
3424 Known = KnownBits::smin(Known, Known2);
3425 break;
3426 }
3427 case ISD::FrameIndex:
3428 case ISD::TargetFrameIndex:
3429 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
3430 Known, getMachineFunction());
3431 break;
3432
3433 default:
3434 if (Opcode < ISD::BUILTIN_OP_END)
3435 break;
3436 LLVM_FALLTHROUGH;
3437 case ISD::INTRINSIC_WO_CHAIN:
3438 case ISD::INTRINSIC_W_CHAIN:
3439 case ISD::INTRINSIC_VOID:
3440 // Allow the target to implement this method for its nodes.
3441 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3442 break;
3443 }
3444
3445 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3446 return Known;
3447 }
3448
computeOverflowKind(SDValue N0,SDValue N1) const3449 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3450 SDValue N1) const {
3451 // X + 0 never overflow
3452 if (isNullConstant(N1))
3453 return OFK_Never;
3454
3455 KnownBits N1Known = computeKnownBits(N1);
3456 if (N1Known.Zero.getBoolValue()) {
3457 KnownBits N0Known = computeKnownBits(N0);
3458
3459 bool overflow;
3460 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow);
3461 if (!overflow)
3462 return OFK_Never;
3463 }
3464
3465 // mulhi + 1 never overflow
3466 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3467 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue())
3468 return OFK_Never;
3469
3470 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3471 KnownBits N0Known = computeKnownBits(N0);
3472
3473 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue())
3474 return OFK_Never;
3475 }
3476
3477 return OFK_Sometime;
3478 }
3479
isKnownToBeAPowerOfTwo(SDValue Val) const3480 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3481 EVT OpVT = Val.getValueType();
3482 unsigned BitWidth = OpVT.getScalarSizeInBits();
3483
3484 // Is the constant a known power of 2?
3485 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3486 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3487
3488 // A left-shift of a constant one will have exactly one bit set because
3489 // shifting the bit off the end is undefined.
3490 if (Val.getOpcode() == ISD::SHL) {
3491 auto *C = isConstOrConstSplat(Val.getOperand(0));
3492 if (C && C->getAPIntValue() == 1)
3493 return true;
3494 }
3495
3496 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3497 // one bit set.
3498 if (Val.getOpcode() == ISD::SRL) {
3499 auto *C = isConstOrConstSplat(Val.getOperand(0));
3500 if (C && C->getAPIntValue().isSignMask())
3501 return true;
3502 }
3503
3504 // Are all operands of a build vector constant powers of two?
3505 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3506 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3507 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3508 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3509 return false;
3510 }))
3511 return true;
3512
3513 // More could be done here, though the above checks are enough
3514 // to handle some common cases.
3515
3516 // Fall back to computeKnownBits to catch other known cases.
3517 KnownBits Known = computeKnownBits(Val);
3518 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3519 }
3520
ComputeNumSignBits(SDValue Op,unsigned Depth) const3521 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3522 EVT VT = Op.getValueType();
3523
3524 // TODO: Assume we don't know anything for now.
3525 if (VT.isScalableVector())
3526 return 1;
3527
3528 APInt DemandedElts = VT.isVector()
3529 ? APInt::getAllOnesValue(VT.getVectorNumElements())
3530 : APInt(1, 1);
3531 return ComputeNumSignBits(Op, DemandedElts, Depth);
3532 }
3533
ComputeNumSignBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const3534 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3535 unsigned Depth) const {
3536 EVT VT = Op.getValueType();
3537 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3538 unsigned VTBits = VT.getScalarSizeInBits();
3539 unsigned NumElts = DemandedElts.getBitWidth();
3540 unsigned Tmp, Tmp2;
3541 unsigned FirstAnswer = 1;
3542
3543 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3544 const APInt &Val = C->getAPIntValue();
3545 return Val.getNumSignBits();
3546 }
3547
3548 if (Depth >= MaxRecursionDepth)
3549 return 1; // Limit search depth.
3550
3551 if (!DemandedElts || VT.isScalableVector())
3552 return 1; // No demanded elts, better to assume we don't know anything.
3553
3554 unsigned Opcode = Op.getOpcode();
3555 switch (Opcode) {
3556 default: break;
3557 case ISD::AssertSext:
3558 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3559 return VTBits-Tmp+1;
3560 case ISD::AssertZext:
3561 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3562 return VTBits-Tmp;
3563
3564 case ISD::BUILD_VECTOR:
3565 Tmp = VTBits;
3566 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3567 if (!DemandedElts[i])
3568 continue;
3569
3570 SDValue SrcOp = Op.getOperand(i);
3571 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
3572
3573 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3574 if (SrcOp.getValueSizeInBits() != VTBits) {
3575 assert(SrcOp.getValueSizeInBits() > VTBits &&
3576 "Expected BUILD_VECTOR implicit truncation");
3577 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3578 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3579 }
3580 Tmp = std::min(Tmp, Tmp2);
3581 }
3582 return Tmp;
3583
3584 case ISD::VECTOR_SHUFFLE: {
3585 // Collect the minimum number of sign bits that are shared by every vector
3586 // element referenced by the shuffle.
3587 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3588 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3589 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3590 for (unsigned i = 0; i != NumElts; ++i) {
3591 int M = SVN->getMaskElt(i);
3592 if (!DemandedElts[i])
3593 continue;
3594 // For UNDEF elements, we don't know anything about the common state of
3595 // the shuffle result.
3596 if (M < 0)
3597 return 1;
3598 if ((unsigned)M < NumElts)
3599 DemandedLHS.setBit((unsigned)M % NumElts);
3600 else
3601 DemandedRHS.setBit((unsigned)M % NumElts);
3602 }
3603 Tmp = std::numeric_limits<unsigned>::max();
3604 if (!!DemandedLHS)
3605 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3606 if (!!DemandedRHS) {
3607 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3608 Tmp = std::min(Tmp, Tmp2);
3609 }
3610 // If we don't know anything, early out and try computeKnownBits fall-back.
3611 if (Tmp == 1)
3612 break;
3613 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3614 return Tmp;
3615 }
3616
3617 case ISD::BITCAST: {
3618 SDValue N0 = Op.getOperand(0);
3619 EVT SrcVT = N0.getValueType();
3620 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3621
3622 // Ignore bitcasts from unsupported types..
3623 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3624 break;
3625
3626 // Fast handling of 'identity' bitcasts.
3627 if (VTBits == SrcBits)
3628 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3629
3630 bool IsLE = getDataLayout().isLittleEndian();
3631
3632 // Bitcast 'large element' scalar/vector to 'small element' vector.
3633 if ((SrcBits % VTBits) == 0) {
3634 assert(VT.isVector() && "Expected bitcast to vector");
3635
3636 unsigned Scale = SrcBits / VTBits;
3637 APInt SrcDemandedElts(NumElts / Scale, 0);
3638 for (unsigned i = 0; i != NumElts; ++i)
3639 if (DemandedElts[i])
3640 SrcDemandedElts.setBit(i / Scale);
3641
3642 // Fast case - sign splat can be simply split across the small elements.
3643 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3644 if (Tmp == SrcBits)
3645 return VTBits;
3646
3647 // Slow case - determine how far the sign extends into each sub-element.
3648 Tmp2 = VTBits;
3649 for (unsigned i = 0; i != NumElts; ++i)
3650 if (DemandedElts[i]) {
3651 unsigned SubOffset = i % Scale;
3652 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3653 SubOffset = SubOffset * VTBits;
3654 if (Tmp <= SubOffset)
3655 return 1;
3656 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3657 }
3658 return Tmp2;
3659 }
3660 break;
3661 }
3662
3663 case ISD::SIGN_EXTEND:
3664 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3665 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3666 case ISD::SIGN_EXTEND_INREG:
3667 // Max of the input and what this extends.
3668 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3669 Tmp = VTBits-Tmp+1;
3670 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3671 return std::max(Tmp, Tmp2);
3672 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3673 SDValue Src = Op.getOperand(0);
3674 EVT SrcVT = Src.getValueType();
3675 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3676 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3677 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3678 }
3679 case ISD::SRA:
3680 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3681 // SRA X, C -> adds C sign bits.
3682 if (const APInt *ShAmt =
3683 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3684 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3685 return Tmp;
3686 case ISD::SHL:
3687 if (const APInt *ShAmt =
3688 getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
3689 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
3690 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3691 if (ShAmt->ult(Tmp))
3692 return Tmp - ShAmt->getZExtValue();
3693 }
3694 break;
3695 case ISD::AND:
3696 case ISD::OR:
3697 case ISD::XOR: // NOT is handled here.
3698 // Logical binary ops preserve the number of sign bits at the worst.
3699 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3700 if (Tmp != 1) {
3701 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3702 FirstAnswer = std::min(Tmp, Tmp2);
3703 // We computed what we know about the sign bits as our first
3704 // answer. Now proceed to the generic code that uses
3705 // computeKnownBits, and pick whichever answer is better.
3706 }
3707 break;
3708
3709 case ISD::SELECT:
3710 case ISD::VSELECT:
3711 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3712 if (Tmp == 1) return 1; // Early out.
3713 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3714 return std::min(Tmp, Tmp2);
3715 case ISD::SELECT_CC:
3716 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3717 if (Tmp == 1) return 1; // Early out.
3718 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3719 return std::min(Tmp, Tmp2);
3720
3721 case ISD::SMIN:
3722 case ISD::SMAX: {
3723 // If we have a clamp pattern, we know that the number of sign bits will be
3724 // the minimum of the clamp min/max range.
3725 bool IsMax = (Opcode == ISD::SMAX);
3726 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3727 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3728 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3729 CstHigh =
3730 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3731 if (CstLow && CstHigh) {
3732 if (!IsMax)
3733 std::swap(CstLow, CstHigh);
3734 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3735 Tmp = CstLow->getAPIntValue().getNumSignBits();
3736 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3737 return std::min(Tmp, Tmp2);
3738 }
3739 }
3740
3741 // Fallback - just get the minimum number of sign bits of the operands.
3742 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3743 if (Tmp == 1)
3744 return 1; // Early out.
3745 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3746 return std::min(Tmp, Tmp2);
3747 }
3748 case ISD::UMIN:
3749 case ISD::UMAX:
3750 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3751 if (Tmp == 1)
3752 return 1; // Early out.
3753 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3754 return std::min(Tmp, Tmp2);
3755 case ISD::SADDO:
3756 case ISD::UADDO:
3757 case ISD::SSUBO:
3758 case ISD::USUBO:
3759 case ISD::SMULO:
3760 case ISD::UMULO:
3761 if (Op.getResNo() != 1)
3762 break;
3763 // The boolean result conforms to getBooleanContents. Fall through.
3764 // If setcc returns 0/-1, all bits are sign bits.
3765 // We know that we have an integer-based boolean since these operations
3766 // are only available for integer.
3767 if (TLI->getBooleanContents(VT.isVector(), false) ==
3768 TargetLowering::ZeroOrNegativeOneBooleanContent)
3769 return VTBits;
3770 break;
3771 case ISD::SETCC:
3772 case ISD::STRICT_FSETCC:
3773 case ISD::STRICT_FSETCCS: {
3774 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3775 // If setcc returns 0/-1, all bits are sign bits.
3776 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3777 TargetLowering::ZeroOrNegativeOneBooleanContent)
3778 return VTBits;
3779 break;
3780 }
3781 case ISD::ROTL:
3782 case ISD::ROTR:
3783 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3784
3785 // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
3786 if (Tmp == VTBits)
3787 return VTBits;
3788
3789 if (ConstantSDNode *C =
3790 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
3791 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3792
3793 // Handle rotate right by N like a rotate left by 32-N.
3794 if (Opcode == ISD::ROTR)
3795 RotAmt = (VTBits - RotAmt) % VTBits;
3796
3797 // If we aren't rotating out all of the known-in sign bits, return the
3798 // number that are left. This handles rotl(sext(x), 1) for example.
3799 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3800 }
3801 break;
3802 case ISD::ADD:
3803 case ISD::ADDC:
3804 // Add can have at most one carry bit. Thus we know that the output
3805 // is, at worst, one more bit than the inputs.
3806 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3807 if (Tmp == 1) return 1; // Early out.
3808
3809 // Special case decrementing a value (ADD X, -1):
3810 if (ConstantSDNode *CRHS =
3811 isConstOrConstSplat(Op.getOperand(1), DemandedElts))
3812 if (CRHS->isAllOnesValue()) {
3813 KnownBits Known =
3814 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3815
3816 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3817 // sign bits set.
3818 if ((Known.Zero | 1).isAllOnesValue())
3819 return VTBits;
3820
3821 // If we are subtracting one from a positive number, there is no carry
3822 // out of the result.
3823 if (Known.isNonNegative())
3824 return Tmp;
3825 }
3826
3827 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3828 if (Tmp2 == 1) return 1; // Early out.
3829 return std::min(Tmp, Tmp2) - 1;
3830 case ISD::SUB:
3831 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3832 if (Tmp2 == 1) return 1; // Early out.
3833
3834 // Handle NEG.
3835 if (ConstantSDNode *CLHS =
3836 isConstOrConstSplat(Op.getOperand(0), DemandedElts))
3837 if (CLHS->isNullValue()) {
3838 KnownBits Known =
3839 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3840 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3841 // sign bits set.
3842 if ((Known.Zero | 1).isAllOnesValue())
3843 return VTBits;
3844
3845 // If the input is known to be positive (the sign bit is known clear),
3846 // the output of the NEG has the same number of sign bits as the input.
3847 if (Known.isNonNegative())
3848 return Tmp2;
3849
3850 // Otherwise, we treat this like a SUB.
3851 }
3852
3853 // Sub can have at most one carry bit. Thus we know that the output
3854 // is, at worst, one more bit than the inputs.
3855 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3856 if (Tmp == 1) return 1; // Early out.
3857 return std::min(Tmp, Tmp2) - 1;
3858 case ISD::MUL: {
3859 // The output of the Mul can be at most twice the valid bits in the inputs.
3860 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3861 if (SignBitsOp0 == 1)
3862 break;
3863 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3864 if (SignBitsOp1 == 1)
3865 break;
3866 unsigned OutValidBits =
3867 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
3868 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
3869 }
3870 case ISD::TRUNCATE: {
3871 // Check if the sign bits of source go down as far as the truncated value.
3872 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
3873 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3874 if (NumSrcSignBits > (NumSrcBits - VTBits))
3875 return NumSrcSignBits - (NumSrcBits - VTBits);
3876 break;
3877 }
3878 case ISD::EXTRACT_ELEMENT: {
3879 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3880 const int BitWidth = Op.getValueSizeInBits();
3881 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
3882
3883 // Get reverse index (starting from 1), Op1 value indexes elements from
3884 // little end. Sign starts at big end.
3885 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
3886
3887 // If the sign portion ends in our element the subtraction gives correct
3888 // result. Otherwise it gives either negative or > bitwidth result
3889 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
3890 }
3891 case ISD::INSERT_VECTOR_ELT: {
3892 // If we know the element index, split the demand between the
3893 // source vector and the inserted element, otherwise assume we need
3894 // the original demanded vector elements and the value.
3895 SDValue InVec = Op.getOperand(0);
3896 SDValue InVal = Op.getOperand(1);
3897 SDValue EltNo = Op.getOperand(2);
3898 bool DemandedVal = true;
3899 APInt DemandedVecElts = DemandedElts;
3900 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3901 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3902 unsigned EltIdx = CEltNo->getZExtValue();
3903 DemandedVal = !!DemandedElts[EltIdx];
3904 DemandedVecElts.clearBit(EltIdx);
3905 }
3906 Tmp = std::numeric_limits<unsigned>::max();
3907 if (DemandedVal) {
3908 // TODO - handle implicit truncation of inserted elements.
3909 if (InVal.getScalarValueSizeInBits() != VTBits)
3910 break;
3911 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
3912 Tmp = std::min(Tmp, Tmp2);
3913 }
3914 if (!!DemandedVecElts) {
3915 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
3916 Tmp = std::min(Tmp, Tmp2);
3917 }
3918 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3919 return Tmp;
3920 }
3921 case ISD::EXTRACT_VECTOR_ELT: {
3922 SDValue InVec = Op.getOperand(0);
3923 SDValue EltNo = Op.getOperand(1);
3924 EVT VecVT = InVec.getValueType();
3925 const unsigned BitWidth = Op.getValueSizeInBits();
3926 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
3927 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3928
3929 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
3930 // anything about sign bits. But if the sizes match we can derive knowledge
3931 // about sign bits from the vector operand.
3932 if (BitWidth != EltBitWidth)
3933 break;
3934
3935 // If we know the element index, just demand that vector element, else for
3936 // an unknown element index, ignore DemandedElts and demand them all.
3937 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3938 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3939 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3940 DemandedSrcElts =
3941 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3942
3943 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
3944 }
3945 case ISD::EXTRACT_SUBVECTOR: {
3946 // Offset the demanded elts by the subvector index.
3947 SDValue Src = Op.getOperand(0);
3948 // Bail until we can represent demanded elements for scalable vectors.
3949 if (Src.getValueType().isScalableVector())
3950 break;
3951 uint64_t Idx = Op.getConstantOperandVal(1);
3952 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3953 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
3954 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
3955 }
3956 case ISD::CONCAT_VECTORS: {
3957 // Determine the minimum number of sign bits across all demanded
3958 // elts of the input vectors. Early out if the result is already 1.
3959 Tmp = std::numeric_limits<unsigned>::max();
3960 EVT SubVectorVT = Op.getOperand(0).getValueType();
3961 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3962 unsigned NumSubVectors = Op.getNumOperands();
3963 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
3964 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
3965 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
3966 if (!DemandedSub)
3967 continue;
3968 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
3969 Tmp = std::min(Tmp, Tmp2);
3970 }
3971 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3972 return Tmp;
3973 }
3974 case ISD::INSERT_SUBVECTOR: {
3975 // Demand any elements from the subvector and the remainder from the src its
3976 // inserted into.
3977 SDValue Src = Op.getOperand(0);
3978 SDValue Sub = Op.getOperand(1);
3979 uint64_t Idx = Op.getConstantOperandVal(2);
3980 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
3981 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
3982 APInt DemandedSrcElts = DemandedElts;
3983 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
3984
3985 Tmp = std::numeric_limits<unsigned>::max();
3986 if (!!DemandedSubElts) {
3987 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
3988 if (Tmp == 1)
3989 return 1; // early-out
3990 }
3991 if (!!DemandedSrcElts) {
3992 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
3993 Tmp = std::min(Tmp, Tmp2);
3994 }
3995 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3996 return Tmp;
3997 }
3998 }
3999
4000 // If we are looking at the loaded value of the SDNode.
4001 if (Op.getResNo() == 0) {
4002 // Handle LOADX separately here. EXTLOAD case will fallthrough.
4003 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
4004 unsigned ExtType = LD->getExtensionType();
4005 switch (ExtType) {
4006 default: break;
4007 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
4008 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4009 return VTBits - Tmp + 1;
4010 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
4011 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4012 return VTBits - Tmp;
4013 case ISD::NON_EXTLOAD:
4014 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
4015 // We only need to handle vectors - computeKnownBits should handle
4016 // scalar cases.
4017 Type *CstTy = Cst->getType();
4018 if (CstTy->isVectorTy() &&
4019 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
4020 Tmp = VTBits;
4021 for (unsigned i = 0; i != NumElts; ++i) {
4022 if (!DemandedElts[i])
4023 continue;
4024 if (Constant *Elt = Cst->getAggregateElement(i)) {
4025 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4026 const APInt &Value = CInt->getValue();
4027 Tmp = std::min(Tmp, Value.getNumSignBits());
4028 continue;
4029 }
4030 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4031 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4032 Tmp = std::min(Tmp, Value.getNumSignBits());
4033 continue;
4034 }
4035 }
4036 // Unknown type. Conservatively assume no bits match sign bit.
4037 return 1;
4038 }
4039 return Tmp;
4040 }
4041 }
4042 break;
4043 }
4044 }
4045 }
4046
4047 // Allow the target to implement this method for its nodes.
4048 if (Opcode >= ISD::BUILTIN_OP_END ||
4049 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4050 Opcode == ISD::INTRINSIC_W_CHAIN ||
4051 Opcode == ISD::INTRINSIC_VOID) {
4052 unsigned NumBits =
4053 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
4054 if (NumBits > 1)
4055 FirstAnswer = std::max(FirstAnswer, NumBits);
4056 }
4057
4058 // Finally, if we can prove that the top bits of the result are 0's or 1's,
4059 // use this information.
4060 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4061
4062 APInt Mask;
4063 if (Known.isNonNegative()) { // sign bit is 0
4064 Mask = Known.Zero;
4065 } else if (Known.isNegative()) { // sign bit is 1;
4066 Mask = Known.One;
4067 } else {
4068 // Nothing known.
4069 return FirstAnswer;
4070 }
4071
4072 // Okay, we know that the sign bit in Mask is set. Use CLO to determine
4073 // the number of identical bits in the top of the input value.
4074 Mask <<= Mask.getBitWidth()-VTBits;
4075 return std::max(FirstAnswer, Mask.countLeadingOnes());
4076 }
4077
isBaseWithConstantOffset(SDValue Op) const4078 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
4079 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
4080 !isa<ConstantSDNode>(Op.getOperand(1)))
4081 return false;
4082
4083 if (Op.getOpcode() == ISD::OR &&
4084 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
4085 return false;
4086
4087 return true;
4088 }
4089
isKnownNeverNaN(SDValue Op,bool SNaN,unsigned Depth) const4090 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
4091 // If we're told that NaNs won't happen, assume they won't.
4092 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
4093 return true;
4094
4095 if (Depth >= MaxRecursionDepth)
4096 return false; // Limit search depth.
4097
4098 // TODO: Handle vectors.
4099 // If the value is a constant, we can obviously see if it is a NaN or not.
4100 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
4101 return !C->getValueAPF().isNaN() ||
4102 (SNaN && !C->getValueAPF().isSignaling());
4103 }
4104
4105 unsigned Opcode = Op.getOpcode();
4106 switch (Opcode) {
4107 case ISD::FADD:
4108 case ISD::FSUB:
4109 case ISD::FMUL:
4110 case ISD::FDIV:
4111 case ISD::FREM:
4112 case ISD::FSIN:
4113 case ISD::FCOS: {
4114 if (SNaN)
4115 return true;
4116 // TODO: Need isKnownNeverInfinity
4117 return false;
4118 }
4119 case ISD::FCANONICALIZE:
4120 case ISD::FEXP:
4121 case ISD::FEXP2:
4122 case ISD::FTRUNC:
4123 case ISD::FFLOOR:
4124 case ISD::FCEIL:
4125 case ISD::FROUND:
4126 case ISD::FROUNDEVEN:
4127 case ISD::FRINT:
4128 case ISD::FNEARBYINT: {
4129 if (SNaN)
4130 return true;
4131 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4132 }
4133 case ISD::FABS:
4134 case ISD::FNEG:
4135 case ISD::FCOPYSIGN: {
4136 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4137 }
4138 case ISD::SELECT:
4139 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4140 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4141 case ISD::FP_EXTEND:
4142 case ISD::FP_ROUND: {
4143 if (SNaN)
4144 return true;
4145 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4146 }
4147 case ISD::SINT_TO_FP:
4148 case ISD::UINT_TO_FP:
4149 return true;
4150 case ISD::FMA:
4151 case ISD::FMAD: {
4152 if (SNaN)
4153 return true;
4154 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4155 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4156 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4157 }
4158 case ISD::FSQRT: // Need is known positive
4159 case ISD::FLOG:
4160 case ISD::FLOG2:
4161 case ISD::FLOG10:
4162 case ISD::FPOWI:
4163 case ISD::FPOW: {
4164 if (SNaN)
4165 return true;
4166 // TODO: Refine on operand
4167 return false;
4168 }
4169 case ISD::FMINNUM:
4170 case ISD::FMAXNUM: {
4171 // Only one needs to be known not-nan, since it will be returned if the
4172 // other ends up being one.
4173 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4174 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4175 }
4176 case ISD::FMINNUM_IEEE:
4177 case ISD::FMAXNUM_IEEE: {
4178 if (SNaN)
4179 return true;
4180 // This can return a NaN if either operand is an sNaN, or if both operands
4181 // are NaN.
4182 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4183 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4184 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4185 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4186 }
4187 case ISD::FMINIMUM:
4188 case ISD::FMAXIMUM: {
4189 // TODO: Does this quiet or return the origina NaN as-is?
4190 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4191 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4192 }
4193 case ISD::EXTRACT_VECTOR_ELT: {
4194 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4195 }
4196 default:
4197 if (Opcode >= ISD::BUILTIN_OP_END ||
4198 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4199 Opcode == ISD::INTRINSIC_W_CHAIN ||
4200 Opcode == ISD::INTRINSIC_VOID) {
4201 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4202 }
4203
4204 return false;
4205 }
4206 }
4207
isKnownNeverZeroFloat(SDValue Op) const4208 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4209 assert(Op.getValueType().isFloatingPoint() &&
4210 "Floating point type expected");
4211
4212 // If the value is a constant, we can obviously see if it is a zero or not.
4213 // TODO: Add BuildVector support.
4214 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4215 return !C->isZero();
4216 return false;
4217 }
4218
isKnownNeverZero(SDValue Op) const4219 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4220 assert(!Op.getValueType().isFloatingPoint() &&
4221 "Floating point types unsupported - use isKnownNeverZeroFloat");
4222
4223 // If the value is a constant, we can obviously see if it is a zero or not.
4224 if (ISD::matchUnaryPredicate(
4225 Op, [](ConstantSDNode *C) { return !C->isNullValue(); }))
4226 return true;
4227
4228 // TODO: Recognize more cases here.
4229 switch (Op.getOpcode()) {
4230 default: break;
4231 case ISD::OR:
4232 if (isKnownNeverZero(Op.getOperand(1)) ||
4233 isKnownNeverZero(Op.getOperand(0)))
4234 return true;
4235 break;
4236 }
4237
4238 return false;
4239 }
4240
isEqualTo(SDValue A,SDValue B) const4241 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4242 // Check the obvious case.
4243 if (A == B) return true;
4244
4245 // For for negative and positive zero.
4246 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4247 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4248 if (CA->isZero() && CB->isZero()) return true;
4249
4250 // Otherwise they may not be equal.
4251 return false;
4252 }
4253
4254 // FIXME: unify with llvm::haveNoCommonBitsSet.
4255 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
haveNoCommonBitsSet(SDValue A,SDValue B) const4256 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4257 assert(A.getValueType() == B.getValueType() &&
4258 "Values must have the same type");
4259 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue();
4260 }
4261
FoldBUILD_VECTOR(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4262 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4263 ArrayRef<SDValue> Ops,
4264 SelectionDAG &DAG) {
4265 int NumOps = Ops.size();
4266 assert(NumOps != 0 && "Can't build an empty vector!");
4267 assert(!VT.isScalableVector() &&
4268 "BUILD_VECTOR cannot be used with scalable types");
4269 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
4270 "Incorrect element count in BUILD_VECTOR!");
4271
4272 // BUILD_VECTOR of UNDEFs is UNDEF.
4273 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4274 return DAG.getUNDEF(VT);
4275
4276 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4277 SDValue IdentitySrc;
4278 bool IsIdentity = true;
4279 for (int i = 0; i != NumOps; ++i) {
4280 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4281 Ops[i].getOperand(0).getValueType() != VT ||
4282 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4283 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4284 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4285 IsIdentity = false;
4286 break;
4287 }
4288 IdentitySrc = Ops[i].getOperand(0);
4289 }
4290 if (IsIdentity)
4291 return IdentitySrc;
4292
4293 return SDValue();
4294 }
4295
4296 /// Try to simplify vector concatenation to an input value, undef, or build
4297 /// vector.
foldCONCAT_VECTORS(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4298 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4299 ArrayRef<SDValue> Ops,
4300 SelectionDAG &DAG) {
4301 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
4302 assert(llvm::all_of(Ops,
4303 [Ops](SDValue Op) {
4304 return Ops[0].getValueType() == Op.getValueType();
4305 }) &&
4306 "Concatenation of vectors with inconsistent value types!");
4307 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==
4308 VT.getVectorElementCount() &&
4309 "Incorrect element count in vector concatenation!");
4310
4311 if (Ops.size() == 1)
4312 return Ops[0];
4313
4314 // Concat of UNDEFs is UNDEF.
4315 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4316 return DAG.getUNDEF(VT);
4317
4318 // Scan the operands and look for extract operations from a single source
4319 // that correspond to insertion at the same location via this concatenation:
4320 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4321 SDValue IdentitySrc;
4322 bool IsIdentity = true;
4323 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4324 SDValue Op = Ops[i];
4325 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
4326 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4327 Op.getOperand(0).getValueType() != VT ||
4328 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4329 Op.getConstantOperandVal(1) != IdentityIndex) {
4330 IsIdentity = false;
4331 break;
4332 }
4333 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
4334 "Unexpected identity source vector for concat of extracts");
4335 IdentitySrc = Op.getOperand(0);
4336 }
4337 if (IsIdentity) {
4338 assert(IdentitySrc && "Failed to set source vector of extracts");
4339 return IdentitySrc;
4340 }
4341
4342 // The code below this point is only designed to work for fixed width
4343 // vectors, so we bail out for now.
4344 if (VT.isScalableVector())
4345 return SDValue();
4346
4347 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4348 // simplified to one big BUILD_VECTOR.
4349 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4350 EVT SVT = VT.getScalarType();
4351 SmallVector<SDValue, 16> Elts;
4352 for (SDValue Op : Ops) {
4353 EVT OpVT = Op.getValueType();
4354 if (Op.isUndef())
4355 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4356 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4357 Elts.append(Op->op_begin(), Op->op_end());
4358 else
4359 return SDValue();
4360 }
4361
4362 // BUILD_VECTOR requires all inputs to be of the same type, find the
4363 // maximum type and extend them all.
4364 for (SDValue Op : Elts)
4365 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4366
4367 if (SVT.bitsGT(VT.getScalarType())) {
4368 for (SDValue &Op : Elts) {
4369 if (Op.isUndef())
4370 Op = DAG.getUNDEF(SVT);
4371 else
4372 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4373 ? DAG.getZExtOrTrunc(Op, DL, SVT)
4374 : DAG.getSExtOrTrunc(Op, DL, SVT);
4375 }
4376 }
4377
4378 SDValue V = DAG.getBuildVector(VT, DL, Elts);
4379 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4380 return V;
4381 }
4382
4383 /// Gets or creates the specified node.
getNode(unsigned Opcode,const SDLoc & DL,EVT VT)4384 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4385 FoldingSetNodeID ID;
4386 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4387 void *IP = nullptr;
4388 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4389 return SDValue(E, 0);
4390
4391 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4392 getVTList(VT));
4393 CSEMap.InsertNode(N, IP);
4394
4395 InsertNode(N);
4396 SDValue V = SDValue(N, 0);
4397 NewSDValueDbgMsg(V, "Creating new node: ", this);
4398 return V;
4399 }
4400
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue Operand)4401 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4402 SDValue Operand) {
4403 SDNodeFlags Flags;
4404 if (Inserter)
4405 Flags = Inserter->getFlags();
4406 return getNode(Opcode, DL, VT, Operand, Flags);
4407 }
4408
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue Operand,const SDNodeFlags Flags)4409 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4410 SDValue Operand, const SDNodeFlags Flags) {
4411 // Constant fold unary operations with an integer constant operand. Even
4412 // opaque constant will be folded, because the folding of unary operations
4413 // doesn't create new constants with different values. Nevertheless, the
4414 // opaque flag is preserved during folding to prevent future folding with
4415 // other constants.
4416 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4417 const APInt &Val = C->getAPIntValue();
4418 switch (Opcode) {
4419 default: break;
4420 case ISD::SIGN_EXTEND:
4421 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4422 C->isTargetOpcode(), C->isOpaque());
4423 case ISD::TRUNCATE:
4424 if (C->isOpaque())
4425 break;
4426 LLVM_FALLTHROUGH;
4427 case ISD::ANY_EXTEND:
4428 case ISD::ZERO_EXTEND:
4429 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4430 C->isTargetOpcode(), C->isOpaque());
4431 case ISD::UINT_TO_FP:
4432 case ISD::SINT_TO_FP: {
4433 APFloat apf(EVTToAPFloatSemantics(VT),
4434 APInt::getNullValue(VT.getSizeInBits()));
4435 (void)apf.convertFromAPInt(Val,
4436 Opcode==ISD::SINT_TO_FP,
4437 APFloat::rmNearestTiesToEven);
4438 return getConstantFP(apf, DL, VT);
4439 }
4440 case ISD::BITCAST:
4441 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4442 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4443 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4444 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4445 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4446 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4447 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4448 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4449 break;
4450 case ISD::ABS:
4451 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4452 C->isOpaque());
4453 case ISD::BITREVERSE:
4454 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4455 C->isOpaque());
4456 case ISD::BSWAP:
4457 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4458 C->isOpaque());
4459 case ISD::CTPOP:
4460 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4461 C->isOpaque());
4462 case ISD::CTLZ:
4463 case ISD::CTLZ_ZERO_UNDEF:
4464 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4465 C->isOpaque());
4466 case ISD::CTTZ:
4467 case ISD::CTTZ_ZERO_UNDEF:
4468 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4469 C->isOpaque());
4470 case ISD::FP16_TO_FP: {
4471 bool Ignored;
4472 APFloat FPV(APFloat::IEEEhalf(),
4473 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4474
4475 // This can return overflow, underflow, or inexact; we don't care.
4476 // FIXME need to be more flexible about rounding mode.
4477 (void)FPV.convert(EVTToAPFloatSemantics(VT),
4478 APFloat::rmNearestTiesToEven, &Ignored);
4479 return getConstantFP(FPV, DL, VT);
4480 }
4481 }
4482 }
4483
4484 // Constant fold unary operations with a floating point constant operand.
4485 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4486 APFloat V = C->getValueAPF(); // make copy
4487 switch (Opcode) {
4488 case ISD::FNEG:
4489 V.changeSign();
4490 return getConstantFP(V, DL, VT);
4491 case ISD::FABS:
4492 V.clearSign();
4493 return getConstantFP(V, DL, VT);
4494 case ISD::FCEIL: {
4495 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4496 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4497 return getConstantFP(V, DL, VT);
4498 break;
4499 }
4500 case ISD::FTRUNC: {
4501 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4502 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4503 return getConstantFP(V, DL, VT);
4504 break;
4505 }
4506 case ISD::FFLOOR: {
4507 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4508 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4509 return getConstantFP(V, DL, VT);
4510 break;
4511 }
4512 case ISD::FP_EXTEND: {
4513 bool ignored;
4514 // This can return overflow, underflow, or inexact; we don't care.
4515 // FIXME need to be more flexible about rounding mode.
4516 (void)V.convert(EVTToAPFloatSemantics(VT),
4517 APFloat::rmNearestTiesToEven, &ignored);
4518 return getConstantFP(V, DL, VT);
4519 }
4520 case ISD::FP_TO_SINT:
4521 case ISD::FP_TO_UINT: {
4522 bool ignored;
4523 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4524 // FIXME need to be more flexible about rounding mode.
4525 APFloat::opStatus s =
4526 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4527 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4528 break;
4529 return getConstant(IntVal, DL, VT);
4530 }
4531 case ISD::BITCAST:
4532 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4533 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4534 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4535 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4536 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4537 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4538 break;
4539 case ISD::FP_TO_FP16: {
4540 bool Ignored;
4541 // This can return overflow, underflow, or inexact; we don't care.
4542 // FIXME need to be more flexible about rounding mode.
4543 (void)V.convert(APFloat::IEEEhalf(),
4544 APFloat::rmNearestTiesToEven, &Ignored);
4545 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4546 }
4547 }
4548 }
4549
4550 // Constant fold unary operations with a vector integer or float operand.
4551 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
4552 if (BV->isConstant()) {
4553 switch (Opcode) {
4554 default:
4555 // FIXME: Entirely reasonable to perform folding of other unary
4556 // operations here as the need arises.
4557 break;
4558 case ISD::FNEG:
4559 case ISD::FABS:
4560 case ISD::FCEIL:
4561 case ISD::FTRUNC:
4562 case ISD::FFLOOR:
4563 case ISD::FP_EXTEND:
4564 case ISD::FP_TO_SINT:
4565 case ISD::FP_TO_UINT:
4566 case ISD::TRUNCATE:
4567 case ISD::ANY_EXTEND:
4568 case ISD::ZERO_EXTEND:
4569 case ISD::SIGN_EXTEND:
4570 case ISD::UINT_TO_FP:
4571 case ISD::SINT_TO_FP:
4572 case ISD::ABS:
4573 case ISD::BITREVERSE:
4574 case ISD::BSWAP:
4575 case ISD::CTLZ:
4576 case ISD::CTLZ_ZERO_UNDEF:
4577 case ISD::CTTZ:
4578 case ISD::CTTZ_ZERO_UNDEF:
4579 case ISD::CTPOP: {
4580 SDValue Ops = { Operand };
4581 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4582 return Fold;
4583 }
4584 }
4585 }
4586 }
4587
4588 unsigned OpOpcode = Operand.getNode()->getOpcode();
4589 switch (Opcode) {
4590 case ISD::FREEZE:
4591 assert(VT == Operand.getValueType() && "Unexpected VT!");
4592 break;
4593 case ISD::TokenFactor:
4594 case ISD::MERGE_VALUES:
4595 case ISD::CONCAT_VECTORS:
4596 return Operand; // Factor, merge or concat of one node? No need.
4597 case ISD::BUILD_VECTOR: {
4598 // Attempt to simplify BUILD_VECTOR.
4599 SDValue Ops[] = {Operand};
4600 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4601 return V;
4602 break;
4603 }
4604 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
4605 case ISD::FP_EXTEND:
4606 assert(VT.isFloatingPoint() &&
4607 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
4608 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4609 assert((!VT.isVector() ||
4610 VT.getVectorElementCount() ==
4611 Operand.getValueType().getVectorElementCount()) &&
4612 "Vector element count mismatch!");
4613 assert(Operand.getValueType().bitsLT(VT) &&
4614 "Invalid fpext node, dst < src!");
4615 if (Operand.isUndef())
4616 return getUNDEF(VT);
4617 break;
4618 case ISD::FP_TO_SINT:
4619 case ISD::FP_TO_UINT:
4620 if (Operand.isUndef())
4621 return getUNDEF(VT);
4622 break;
4623 case ISD::SINT_TO_FP:
4624 case ISD::UINT_TO_FP:
4625 // [us]itofp(undef) = 0, because the result value is bounded.
4626 if (Operand.isUndef())
4627 return getConstantFP(0.0, DL, VT);
4628 break;
4629 case ISD::SIGN_EXTEND:
4630 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4631 "Invalid SIGN_EXTEND!");
4632 assert(VT.isVector() == Operand.getValueType().isVector() &&
4633 "SIGN_EXTEND result type type should be vector iff the operand "
4634 "type is vector!");
4635 if (Operand.getValueType() == VT) return Operand; // noop extension
4636 assert((!VT.isVector() ||
4637 VT.getVectorElementCount() ==
4638 Operand.getValueType().getVectorElementCount()) &&
4639 "Vector element count mismatch!");
4640 assert(Operand.getValueType().bitsLT(VT) &&
4641 "Invalid sext node, dst < src!");
4642 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4643 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4644 else if (OpOpcode == ISD::UNDEF)
4645 // sext(undef) = 0, because the top bits will all be the same.
4646 return getConstant(0, DL, VT);
4647 break;
4648 case ISD::ZERO_EXTEND:
4649 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4650 "Invalid ZERO_EXTEND!");
4651 assert(VT.isVector() == Operand.getValueType().isVector() &&
4652 "ZERO_EXTEND result type type should be vector iff the operand "
4653 "type is vector!");
4654 if (Operand.getValueType() == VT) return Operand; // noop extension
4655 assert((!VT.isVector() ||
4656 VT.getVectorElementCount() ==
4657 Operand.getValueType().getVectorElementCount()) &&
4658 "Vector element count mismatch!");
4659 assert(Operand.getValueType().bitsLT(VT) &&
4660 "Invalid zext node, dst < src!");
4661 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
4662 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4663 else if (OpOpcode == ISD::UNDEF)
4664 // zext(undef) = 0, because the top bits will be zero.
4665 return getConstant(0, DL, VT);
4666 break;
4667 case ISD::ANY_EXTEND:
4668 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4669 "Invalid ANY_EXTEND!");
4670 assert(VT.isVector() == Operand.getValueType().isVector() &&
4671 "ANY_EXTEND result type type should be vector iff the operand "
4672 "type is vector!");
4673 if (Operand.getValueType() == VT) return Operand; // noop extension
4674 assert((!VT.isVector() ||
4675 VT.getVectorElementCount() ==
4676 Operand.getValueType().getVectorElementCount()) &&
4677 "Vector element count mismatch!");
4678 assert(Operand.getValueType().bitsLT(VT) &&
4679 "Invalid anyext node, dst < src!");
4680
4681 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4682 OpOpcode == ISD::ANY_EXTEND)
4683 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4684 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4685 else if (OpOpcode == ISD::UNDEF)
4686 return getUNDEF(VT);
4687
4688 // (ext (trunc x)) -> x
4689 if (OpOpcode == ISD::TRUNCATE) {
4690 SDValue OpOp = Operand.getOperand(0);
4691 if (OpOp.getValueType() == VT) {
4692 transferDbgValues(Operand, OpOp);
4693 return OpOp;
4694 }
4695 }
4696 break;
4697 case ISD::TRUNCATE:
4698 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4699 "Invalid TRUNCATE!");
4700 assert(VT.isVector() == Operand.getValueType().isVector() &&
4701 "TRUNCATE result type type should be vector iff the operand "
4702 "type is vector!");
4703 if (Operand.getValueType() == VT) return Operand; // noop truncate
4704 assert((!VT.isVector() ||
4705 VT.getVectorElementCount() ==
4706 Operand.getValueType().getVectorElementCount()) &&
4707 "Vector element count mismatch!");
4708 assert(Operand.getValueType().bitsGT(VT) &&
4709 "Invalid truncate node, src < dst!");
4710 if (OpOpcode == ISD::TRUNCATE)
4711 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4712 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4713 OpOpcode == ISD::ANY_EXTEND) {
4714 // If the source is smaller than the dest, we still need an extend.
4715 if (Operand.getOperand(0).getValueType().getScalarType()
4716 .bitsLT(VT.getScalarType()))
4717 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4718 if (Operand.getOperand(0).getValueType().bitsGT(VT))
4719 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4720 return Operand.getOperand(0);
4721 }
4722 if (OpOpcode == ISD::UNDEF)
4723 return getUNDEF(VT);
4724 break;
4725 case ISD::ANY_EXTEND_VECTOR_INREG:
4726 case ISD::ZERO_EXTEND_VECTOR_INREG:
4727 case ISD::SIGN_EXTEND_VECTOR_INREG:
4728 assert(VT.isVector() && "This DAG node is restricted to vector types.");
4729 assert(Operand.getValueType().bitsLE(VT) &&
4730 "The input must be the same size or smaller than the result.");
4731 assert(VT.getVectorNumElements() <
4732 Operand.getValueType().getVectorNumElements() &&
4733 "The destination vector type must have fewer lanes than the input.");
4734 break;
4735 case ISD::ABS:
4736 assert(VT.isInteger() && VT == Operand.getValueType() &&
4737 "Invalid ABS!");
4738 if (OpOpcode == ISD::UNDEF)
4739 return getUNDEF(VT);
4740 break;
4741 case ISD::BSWAP:
4742 assert(VT.isInteger() && VT == Operand.getValueType() &&
4743 "Invalid BSWAP!");
4744 assert((VT.getScalarSizeInBits() % 16 == 0) &&
4745 "BSWAP types must be a multiple of 16 bits!");
4746 if (OpOpcode == ISD::UNDEF)
4747 return getUNDEF(VT);
4748 break;
4749 case ISD::BITREVERSE:
4750 assert(VT.isInteger() && VT == Operand.getValueType() &&
4751 "Invalid BITREVERSE!");
4752 if (OpOpcode == ISD::UNDEF)
4753 return getUNDEF(VT);
4754 break;
4755 case ISD::BITCAST:
4756 // Basic sanity checking.
4757 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
4758 "Cannot BITCAST between types of different sizes!");
4759 if (VT == Operand.getValueType()) return Operand; // noop conversion.
4760 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
4761 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
4762 if (OpOpcode == ISD::UNDEF)
4763 return getUNDEF(VT);
4764 break;
4765 case ISD::SCALAR_TO_VECTOR:
4766 assert(VT.isVector() && !Operand.getValueType().isVector() &&
4767 (VT.getVectorElementType() == Operand.getValueType() ||
4768 (VT.getVectorElementType().isInteger() &&
4769 Operand.getValueType().isInteger() &&
4770 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
4771 "Illegal SCALAR_TO_VECTOR node!");
4772 if (OpOpcode == ISD::UNDEF)
4773 return getUNDEF(VT);
4774 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
4775 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
4776 isa<ConstantSDNode>(Operand.getOperand(1)) &&
4777 Operand.getConstantOperandVal(1) == 0 &&
4778 Operand.getOperand(0).getValueType() == VT)
4779 return Operand.getOperand(0);
4780 break;
4781 case ISD::FNEG:
4782 // Negation of an unknown bag of bits is still completely undefined.
4783 if (OpOpcode == ISD::UNDEF)
4784 return getUNDEF(VT);
4785
4786 if (OpOpcode == ISD::FNEG) // --X -> X
4787 return Operand.getOperand(0);
4788 break;
4789 case ISD::FABS:
4790 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
4791 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
4792 break;
4793 case ISD::VSCALE:
4794 assert(VT == Operand.getValueType() && "Unexpected VT!");
4795 break;
4796 case ISD::CTPOP:
4797 if (Operand.getValueType().getScalarType() == MVT::i1)
4798 return Operand;
4799 break;
4800 case ISD::CTLZ:
4801 case ISD::CTTZ:
4802 if (Operand.getValueType().getScalarType() == MVT::i1)
4803 return getNOT(DL, Operand, Operand.getValueType());
4804 break;
4805 case ISD::VECREDUCE_SMIN:
4806 case ISD::VECREDUCE_UMAX:
4807 if (Operand.getValueType().getScalarType() == MVT::i1)
4808 return getNode(ISD::VECREDUCE_OR, DL, VT, Operand);
4809 break;
4810 case ISD::VECREDUCE_SMAX:
4811 case ISD::VECREDUCE_UMIN:
4812 if (Operand.getValueType().getScalarType() == MVT::i1)
4813 return getNode(ISD::VECREDUCE_AND, DL, VT, Operand);
4814 break;
4815 }
4816
4817 SDNode *N;
4818 SDVTList VTs = getVTList(VT);
4819 SDValue Ops[] = {Operand};
4820 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
4821 FoldingSetNodeID ID;
4822 AddNodeIDNode(ID, Opcode, VTs, Ops);
4823 void *IP = nullptr;
4824 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4825 E->intersectFlagsWith(Flags);
4826 return SDValue(E, 0);
4827 }
4828
4829 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4830 N->setFlags(Flags);
4831 createOperands(N, Ops);
4832 CSEMap.InsertNode(N, IP);
4833 } else {
4834 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4835 createOperands(N, Ops);
4836 }
4837
4838 InsertNode(N);
4839 SDValue V = SDValue(N, 0);
4840 NewSDValueDbgMsg(V, "Creating new node: ", this);
4841 return V;
4842 }
4843
FoldValue(unsigned Opcode,const APInt & C1,const APInt & C2)4844 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
4845 const APInt &C2) {
4846 switch (Opcode) {
4847 case ISD::ADD: return C1 + C2;
4848 case ISD::SUB: return C1 - C2;
4849 case ISD::MUL: return C1 * C2;
4850 case ISD::AND: return C1 & C2;
4851 case ISD::OR: return C1 | C2;
4852 case ISD::XOR: return C1 ^ C2;
4853 case ISD::SHL: return C1 << C2;
4854 case ISD::SRL: return C1.lshr(C2);
4855 case ISD::SRA: return C1.ashr(C2);
4856 case ISD::ROTL: return C1.rotl(C2);
4857 case ISD::ROTR: return C1.rotr(C2);
4858 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
4859 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
4860 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
4861 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
4862 case ISD::SADDSAT: return C1.sadd_sat(C2);
4863 case ISD::UADDSAT: return C1.uadd_sat(C2);
4864 case ISD::SSUBSAT: return C1.ssub_sat(C2);
4865 case ISD::USUBSAT: return C1.usub_sat(C2);
4866 case ISD::UDIV:
4867 if (!C2.getBoolValue())
4868 break;
4869 return C1.udiv(C2);
4870 case ISD::UREM:
4871 if (!C2.getBoolValue())
4872 break;
4873 return C1.urem(C2);
4874 case ISD::SDIV:
4875 if (!C2.getBoolValue())
4876 break;
4877 return C1.sdiv(C2);
4878 case ISD::SREM:
4879 if (!C2.getBoolValue())
4880 break;
4881 return C1.srem(C2);
4882 }
4883 return llvm::None;
4884 }
4885
FoldSymbolOffset(unsigned Opcode,EVT VT,const GlobalAddressSDNode * GA,const SDNode * N2)4886 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
4887 const GlobalAddressSDNode *GA,
4888 const SDNode *N2) {
4889 if (GA->getOpcode() != ISD::GlobalAddress)
4890 return SDValue();
4891 if (!TLI->isOffsetFoldingLegal(GA))
4892 return SDValue();
4893 auto *C2 = dyn_cast<ConstantSDNode>(N2);
4894 if (!C2)
4895 return SDValue();
4896 int64_t Offset = C2->getSExtValue();
4897 switch (Opcode) {
4898 case ISD::ADD: break;
4899 case ISD::SUB: Offset = -uint64_t(Offset); break;
4900 default: return SDValue();
4901 }
4902 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
4903 GA->getOffset() + uint64_t(Offset));
4904 }
4905
isUndef(unsigned Opcode,ArrayRef<SDValue> Ops)4906 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
4907 switch (Opcode) {
4908 case ISD::SDIV:
4909 case ISD::UDIV:
4910 case ISD::SREM:
4911 case ISD::UREM: {
4912 // If a divisor is zero/undef or any element of a divisor vector is
4913 // zero/undef, the whole op is undef.
4914 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
4915 SDValue Divisor = Ops[1];
4916 if (Divisor.isUndef() || isNullConstant(Divisor))
4917 return true;
4918
4919 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
4920 llvm::any_of(Divisor->op_values(),
4921 [](SDValue V) { return V.isUndef() ||
4922 isNullConstant(V); });
4923 // TODO: Handle signed overflow.
4924 }
4925 // TODO: Handle oversized shifts.
4926 default:
4927 return false;
4928 }
4929 }
4930
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops)4931 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4932 EVT VT, ArrayRef<SDValue> Ops) {
4933 // If the opcode is a target-specific ISD node, there's nothing we can
4934 // do here and the operand rules may not line up with the below, so
4935 // bail early.
4936 if (Opcode >= ISD::BUILTIN_OP_END)
4937 return SDValue();
4938
4939 // For now, the array Ops should only contain two values.
4940 // This enforcement will be removed once this function is merged with
4941 // FoldConstantVectorArithmetic
4942 if (Ops.size() != 2)
4943 return SDValue();
4944
4945 if (isUndef(Opcode, Ops))
4946 return getUNDEF(VT);
4947
4948 SDNode *N1 = Ops[0].getNode();
4949 SDNode *N2 = Ops[1].getNode();
4950
4951 // Handle the case of two scalars.
4952 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) {
4953 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) {
4954 if (C1->isOpaque() || C2->isOpaque())
4955 return SDValue();
4956
4957 Optional<APInt> FoldAttempt =
4958 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
4959 if (!FoldAttempt)
4960 return SDValue();
4961
4962 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT);
4963 assert((!Folded || !VT.isVector()) &&
4964 "Can't fold vectors ops with scalar operands");
4965 return Folded;
4966 }
4967 }
4968
4969 // fold (add Sym, c) -> Sym+c
4970 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1))
4971 return FoldSymbolOffset(Opcode, VT, GA, N2);
4972 if (TLI->isCommutativeBinOp(Opcode))
4973 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2))
4974 return FoldSymbolOffset(Opcode, VT, GA, N1);
4975
4976 // TODO: All the folds below are performed lane-by-lane and assume a fixed
4977 // vector width, however we should be able to do constant folds involving
4978 // splat vector nodes too.
4979 if (VT.isScalableVector())
4980 return SDValue();
4981
4982 // For fixed width vectors, extract each constant element and fold them
4983 // individually. Either input may be an undef value.
4984 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
4985 if (!BV1 && !N1->isUndef())
4986 return SDValue();
4987 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
4988 if (!BV2 && !N2->isUndef())
4989 return SDValue();
4990 // If both operands are undef, that's handled the same way as scalars.
4991 if (!BV1 && !BV2)
4992 return SDValue();
4993
4994 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) &&
4995 "Vector binop with different number of elements in operands?");
4996
4997 EVT SVT = VT.getScalarType();
4998 EVT LegalSVT = SVT;
4999 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5000 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5001 if (LegalSVT.bitsLT(SVT))
5002 return SDValue();
5003 }
5004 SmallVector<SDValue, 4> Outputs;
5005 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands();
5006 for (unsigned I = 0; I != NumOps; ++I) {
5007 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT);
5008 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT);
5009 if (SVT.isInteger()) {
5010 if (V1->getValueType(0).bitsGT(SVT))
5011 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
5012 if (V2->getValueType(0).bitsGT(SVT))
5013 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
5014 }
5015
5016 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
5017 return SDValue();
5018
5019 // Fold one vector element.
5020 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
5021 if (LegalSVT != SVT)
5022 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5023
5024 // Scalar folding only succeeded if the result is a constant or UNDEF.
5025 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5026 ScalarResult.getOpcode() != ISD::ConstantFP)
5027 return SDValue();
5028 Outputs.push_back(ScalarResult);
5029 }
5030
5031 assert(VT.getVectorNumElements() == Outputs.size() &&
5032 "Vector size mismatch!");
5033
5034 // We may have a vector type but a scalar result. Create a splat.
5035 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
5036
5037 // Build a big vector out of the scalar elements we generated.
5038 return getBuildVector(VT, SDLoc(), Outputs);
5039 }
5040
5041 // TODO: Merge with FoldConstantArithmetic
FoldConstantVectorArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)5042 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
5043 const SDLoc &DL, EVT VT,
5044 ArrayRef<SDValue> Ops,
5045 const SDNodeFlags Flags) {
5046 // If the opcode is a target-specific ISD node, there's nothing we can
5047 // do here and the operand rules may not line up with the below, so
5048 // bail early.
5049 if (Opcode >= ISD::BUILTIN_OP_END)
5050 return SDValue();
5051
5052 if (isUndef(Opcode, Ops))
5053 return getUNDEF(VT);
5054
5055 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
5056 if (!VT.isVector())
5057 return SDValue();
5058
5059 // TODO: All the folds below are performed lane-by-lane and assume a fixed
5060 // vector width, however we should be able to do constant folds involving
5061 // splat vector nodes too.
5062 if (VT.isScalableVector())
5063 return SDValue();
5064
5065 // From this point onwards all vectors are assumed to be fixed width.
5066 unsigned NumElts = VT.getVectorNumElements();
5067
5068 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
5069 return !Op.getValueType().isVector() ||
5070 Op.getValueType().getVectorNumElements() == NumElts;
5071 };
5072
5073 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
5074 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
5075 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
5076 (BV && BV->isConstant());
5077 };
5078
5079 // All operands must be vector types with the same number of elements as
5080 // the result type and must be either UNDEF or a build vector of constant
5081 // or UNDEF scalars.
5082 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
5083 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
5084 return SDValue();
5085
5086 // If we are comparing vectors, then the result needs to be a i1 boolean
5087 // that is then sign-extended back to the legal result type.
5088 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
5089
5090 // Find legal integer scalar type for constant promotion and
5091 // ensure that its scalar size is at least as large as source.
5092 EVT LegalSVT = VT.getScalarType();
5093 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5094 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5095 if (LegalSVT.bitsLT(VT.getScalarType()))
5096 return SDValue();
5097 }
5098
5099 // Constant fold each scalar lane separately.
5100 SmallVector<SDValue, 4> ScalarResults;
5101 for (unsigned i = 0; i != NumElts; i++) {
5102 SmallVector<SDValue, 4> ScalarOps;
5103 for (SDValue Op : Ops) {
5104 EVT InSVT = Op.getValueType().getScalarType();
5105 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
5106 if (!InBV) {
5107 // We've checked that this is UNDEF or a constant of some kind.
5108 if (Op.isUndef())
5109 ScalarOps.push_back(getUNDEF(InSVT));
5110 else
5111 ScalarOps.push_back(Op);
5112 continue;
5113 }
5114
5115 SDValue ScalarOp = InBV->getOperand(i);
5116 EVT ScalarVT = ScalarOp.getValueType();
5117
5118 // Build vector (integer) scalar operands may need implicit
5119 // truncation - do this before constant folding.
5120 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
5121 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
5122
5123 ScalarOps.push_back(ScalarOp);
5124 }
5125
5126 // Constant fold the scalar operands.
5127 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
5128
5129 // Legalize the (integer) scalar constant if necessary.
5130 if (LegalSVT != SVT)
5131 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5132
5133 // Scalar folding only succeeded if the result is a constant or UNDEF.
5134 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5135 ScalarResult.getOpcode() != ISD::ConstantFP)
5136 return SDValue();
5137 ScalarResults.push_back(ScalarResult);
5138 }
5139
5140 SDValue V = getBuildVector(VT, DL, ScalarResults);
5141 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
5142 return V;
5143 }
5144
foldConstantFPMath(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2)5145 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
5146 EVT VT, SDValue N1, SDValue N2) {
5147 // TODO: We don't do any constant folding for strict FP opcodes here, but we
5148 // should. That will require dealing with a potentially non-default
5149 // rounding mode, checking the "opStatus" return value from the APFloat
5150 // math calculations, and possibly other variations.
5151 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
5152 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
5153 if (N1CFP && N2CFP) {
5154 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF();
5155 switch (Opcode) {
5156 case ISD::FADD:
5157 C1.add(C2, APFloat::rmNearestTiesToEven);
5158 return getConstantFP(C1, DL, VT);
5159 case ISD::FSUB:
5160 C1.subtract(C2, APFloat::rmNearestTiesToEven);
5161 return getConstantFP(C1, DL, VT);
5162 case ISD::FMUL:
5163 C1.multiply(C2, APFloat::rmNearestTiesToEven);
5164 return getConstantFP(C1, DL, VT);
5165 case ISD::FDIV:
5166 C1.divide(C2, APFloat::rmNearestTiesToEven);
5167 return getConstantFP(C1, DL, VT);
5168 case ISD::FREM:
5169 C1.mod(C2);
5170 return getConstantFP(C1, DL, VT);
5171 case ISD::FCOPYSIGN:
5172 C1.copySign(C2);
5173 return getConstantFP(C1, DL, VT);
5174 default: break;
5175 }
5176 }
5177 if (N1CFP && Opcode == ISD::FP_ROUND) {
5178 APFloat C1 = N1CFP->getValueAPF(); // make copy
5179 bool Unused;
5180 // This can return overflow, underflow, or inexact; we don't care.
5181 // FIXME need to be more flexible about rounding mode.
5182 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5183 &Unused);
5184 return getConstantFP(C1, DL, VT);
5185 }
5186
5187 switch (Opcode) {
5188 case ISD::FSUB:
5189 // -0.0 - undef --> undef (consistent with "fneg undef")
5190 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef())
5191 return getUNDEF(VT);
5192 LLVM_FALLTHROUGH;
5193
5194 case ISD::FADD:
5195 case ISD::FMUL:
5196 case ISD::FDIV:
5197 case ISD::FREM:
5198 // If both operands are undef, the result is undef. If 1 operand is undef,
5199 // the result is NaN. This should match the behavior of the IR optimizer.
5200 if (N1.isUndef() && N2.isUndef())
5201 return getUNDEF(VT);
5202 if (N1.isUndef() || N2.isUndef())
5203 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5204 }
5205 return SDValue();
5206 }
5207
getAssertAlign(const SDLoc & DL,SDValue Val,Align A)5208 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) {
5209 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!");
5210
5211 // There's no need to assert on a byte-aligned pointer. All pointers are at
5212 // least byte aligned.
5213 if (A == Align(1))
5214 return Val;
5215
5216 FoldingSetNodeID ID;
5217 AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val});
5218 ID.AddInteger(A.value());
5219
5220 void *IP = nullptr;
5221 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5222 return SDValue(E, 0);
5223
5224 auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(),
5225 Val.getValueType(), A);
5226 createOperands(N, {Val});
5227
5228 CSEMap.InsertNode(N, IP);
5229 InsertNode(N);
5230
5231 SDValue V(N, 0);
5232 NewSDValueDbgMsg(V, "Creating new node: ", this);
5233 return V;
5234 }
5235
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2)5236 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5237 SDValue N1, SDValue N2) {
5238 SDNodeFlags Flags;
5239 if (Inserter)
5240 Flags = Inserter->getFlags();
5241 return getNode(Opcode, DL, VT, N1, N2, Flags);
5242 }
5243
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,const SDNodeFlags Flags)5244 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5245 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
5246 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
5247 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
5248 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5249 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5250
5251 // Canonicalize constant to RHS if commutative.
5252 if (TLI->isCommutativeBinOp(Opcode)) {
5253 if (N1C && !N2C) {
5254 std::swap(N1C, N2C);
5255 std::swap(N1, N2);
5256 } else if (N1CFP && !N2CFP) {
5257 std::swap(N1CFP, N2CFP);
5258 std::swap(N1, N2);
5259 }
5260 }
5261
5262 switch (Opcode) {
5263 default: break;
5264 case ISD::TokenFactor:
5265 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
5266 N2.getValueType() == MVT::Other && "Invalid token factor!");
5267 // Fold trivial token factors.
5268 if (N1.getOpcode() == ISD::EntryToken) return N2;
5269 if (N2.getOpcode() == ISD::EntryToken) return N1;
5270 if (N1 == N2) return N1;
5271 break;
5272 case ISD::BUILD_VECTOR: {
5273 // Attempt to simplify BUILD_VECTOR.
5274 SDValue Ops[] = {N1, N2};
5275 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5276 return V;
5277 break;
5278 }
5279 case ISD::CONCAT_VECTORS: {
5280 SDValue Ops[] = {N1, N2};
5281 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5282 return V;
5283 break;
5284 }
5285 case ISD::AND:
5286 assert(VT.isInteger() && "This operator does not apply to FP types!");
5287 assert(N1.getValueType() == N2.getValueType() &&
5288 N1.getValueType() == VT && "Binary operator types must match!");
5289 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
5290 // worth handling here.
5291 if (N2C && N2C->isNullValue())
5292 return N2;
5293 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
5294 return N1;
5295 break;
5296 case ISD::OR:
5297 case ISD::XOR:
5298 case ISD::ADD:
5299 case ISD::SUB:
5300 assert(VT.isInteger() && "This operator does not apply to FP types!");
5301 assert(N1.getValueType() == N2.getValueType() &&
5302 N1.getValueType() == VT && "Binary operator types must match!");
5303 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
5304 // it's worth handling here.
5305 if (N2C && N2C->isNullValue())
5306 return N1;
5307 break;
5308 case ISD::MUL:
5309 assert(VT.isInteger() && "This operator does not apply to FP types!");
5310 assert(N1.getValueType() == N2.getValueType() &&
5311 N1.getValueType() == VT && "Binary operator types must match!");
5312 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5313 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue();
5314 APInt N2CImm = N2C->getAPIntValue();
5315 return getVScale(DL, VT, MulImm * N2CImm);
5316 }
5317 break;
5318 case ISD::UDIV:
5319 case ISD::UREM:
5320 case ISD::MULHU:
5321 case ISD::MULHS:
5322 case ISD::SDIV:
5323 case ISD::SREM:
5324 case ISD::SADDSAT:
5325 case ISD::SSUBSAT:
5326 case ISD::UADDSAT:
5327 case ISD::USUBSAT:
5328 assert(VT.isInteger() && "This operator does not apply to FP types!");
5329 assert(N1.getValueType() == N2.getValueType() &&
5330 N1.getValueType() == VT && "Binary operator types must match!");
5331 break;
5332 case ISD::SMIN:
5333 case ISD::UMAX:
5334 assert(VT.isInteger() && "This operator does not apply to FP types!");
5335 assert(N1.getValueType() == N2.getValueType() &&
5336 N1.getValueType() == VT && "Binary operator types must match!");
5337 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5338 return getNode(ISD::OR, DL, VT, N1, N2);
5339 break;
5340 case ISD::SMAX:
5341 case ISD::UMIN:
5342 assert(VT.isInteger() && "This operator does not apply to FP types!");
5343 assert(N1.getValueType() == N2.getValueType() &&
5344 N1.getValueType() == VT && "Binary operator types must match!");
5345 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5346 return getNode(ISD::AND, DL, VT, N1, N2);
5347 break;
5348 case ISD::FADD:
5349 case ISD::FSUB:
5350 case ISD::FMUL:
5351 case ISD::FDIV:
5352 case ISD::FREM:
5353 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5354 assert(N1.getValueType() == N2.getValueType() &&
5355 N1.getValueType() == VT && "Binary operator types must match!");
5356 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags))
5357 return V;
5358 break;
5359 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
5360 assert(N1.getValueType() == VT &&
5361 N1.getValueType().isFloatingPoint() &&
5362 N2.getValueType().isFloatingPoint() &&
5363 "Invalid FCOPYSIGN!");
5364 break;
5365 case ISD::SHL:
5366 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5367 APInt MulImm = cast<ConstantSDNode>(N1->getOperand(0))->getAPIntValue();
5368 APInt ShiftImm = N2C->getAPIntValue();
5369 return getVScale(DL, VT, MulImm << ShiftImm);
5370 }
5371 LLVM_FALLTHROUGH;
5372 case ISD::SRA:
5373 case ISD::SRL:
5374 if (SDValue V = simplifyShift(N1, N2))
5375 return V;
5376 LLVM_FALLTHROUGH;
5377 case ISD::ROTL:
5378 case ISD::ROTR:
5379 assert(VT == N1.getValueType() &&
5380 "Shift operators return type must be the same as their first arg");
5381 assert(VT.isInteger() && N2.getValueType().isInteger() &&
5382 "Shifts only work on integers");
5383 assert((!VT.isVector() || VT == N2.getValueType()) &&
5384 "Vector shift amounts must be in the same as their first arg");
5385 // Verify that the shift amount VT is big enough to hold valid shift
5386 // amounts. This catches things like trying to shift an i1024 value by an
5387 // i8, which is easy to fall into in generic code that uses
5388 // TLI.getShiftAmount().
5389 assert(N2.getValueType().getScalarSizeInBits() >=
5390 Log2_32_Ceil(VT.getScalarSizeInBits()) &&
5391 "Invalid use of small shift amount with oversized value!");
5392
5393 // Always fold shifts of i1 values so the code generator doesn't need to
5394 // handle them. Since we know the size of the shift has to be less than the
5395 // size of the value, the shift/rotate count is guaranteed to be zero.
5396 if (VT == MVT::i1)
5397 return N1;
5398 if (N2C && N2C->isNullValue())
5399 return N1;
5400 break;
5401 case ISD::FP_ROUND:
5402 assert(VT.isFloatingPoint() &&
5403 N1.getValueType().isFloatingPoint() &&
5404 VT.bitsLE(N1.getValueType()) &&
5405 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
5406 "Invalid FP_ROUND!");
5407 if (N1.getValueType() == VT) return N1; // noop conversion.
5408 break;
5409 case ISD::AssertSext:
5410 case ISD::AssertZext: {
5411 EVT EVT = cast<VTSDNode>(N2)->getVT();
5412 assert(VT == N1.getValueType() && "Not an inreg extend!");
5413 assert(VT.isInteger() && EVT.isInteger() &&
5414 "Cannot *_EXTEND_INREG FP types");
5415 assert(!EVT.isVector() &&
5416 "AssertSExt/AssertZExt type should be the vector element type "
5417 "rather than the vector type!");
5418 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
5419 if (VT.getScalarType() == EVT) return N1; // noop assertion.
5420 break;
5421 }
5422 case ISD::SIGN_EXTEND_INREG: {
5423 EVT EVT = cast<VTSDNode>(N2)->getVT();
5424 assert(VT == N1.getValueType() && "Not an inreg extend!");
5425 assert(VT.isInteger() && EVT.isInteger() &&
5426 "Cannot *_EXTEND_INREG FP types");
5427 assert(EVT.isVector() == VT.isVector() &&
5428 "SIGN_EXTEND_INREG type should be vector iff the operand "
5429 "type is vector!");
5430 assert((!EVT.isVector() ||
5431 EVT.getVectorElementCount() == VT.getVectorElementCount()) &&
5432 "Vector element counts must match in SIGN_EXTEND_INREG");
5433 assert(EVT.bitsLE(VT) && "Not extending!");
5434 if (EVT == VT) return N1; // Not actually extending
5435
5436 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
5437 unsigned FromBits = EVT.getScalarSizeInBits();
5438 Val <<= Val.getBitWidth() - FromBits;
5439 Val.ashrInPlace(Val.getBitWidth() - FromBits);
5440 return getConstant(Val, DL, ConstantVT);
5441 };
5442
5443 if (N1C) {
5444 const APInt &Val = N1C->getAPIntValue();
5445 return SignExtendInReg(Val, VT);
5446 }
5447 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
5448 SmallVector<SDValue, 8> Ops;
5449 llvm::EVT OpVT = N1.getOperand(0).getValueType();
5450 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5451 SDValue Op = N1.getOperand(i);
5452 if (Op.isUndef()) {
5453 Ops.push_back(getUNDEF(OpVT));
5454 continue;
5455 }
5456 ConstantSDNode *C = cast<ConstantSDNode>(Op);
5457 APInt Val = C->getAPIntValue();
5458 Ops.push_back(SignExtendInReg(Val, OpVT));
5459 }
5460 return getBuildVector(VT, DL, Ops);
5461 }
5462 break;
5463 }
5464 case ISD::EXTRACT_VECTOR_ELT:
5465 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
5466 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
5467 element type of the vector.");
5468
5469 // Extract from an undefined value or using an undefined index is undefined.
5470 if (N1.isUndef() || N2.isUndef())
5471 return getUNDEF(VT);
5472
5473 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length
5474 // vectors. For scalable vectors we will provide appropriate support for
5475 // dealing with arbitrary indices.
5476 if (N2C && N1.getValueType().isFixedLengthVector() &&
5477 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
5478 return getUNDEF(VT);
5479
5480 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5481 // expanding copies of large vectors from registers. This only works for
5482 // fixed length vectors, since we need to know the exact number of
5483 // elements.
5484 if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() &&
5485 N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) {
5486 unsigned Factor =
5487 N1.getOperand(0).getValueType().getVectorNumElements();
5488 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
5489 N1.getOperand(N2C->getZExtValue() / Factor),
5490 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL));
5491 }
5492
5493 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while
5494 // lowering is expanding large vector constants.
5495 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR ||
5496 N1.getOpcode() == ISD::SPLAT_VECTOR)) {
5497 assert((N1.getOpcode() != ISD::BUILD_VECTOR ||
5498 N1.getValueType().isFixedLengthVector()) &&
5499 "BUILD_VECTOR used for scalable vectors");
5500 unsigned Index =
5501 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0;
5502 SDValue Elt = N1.getOperand(Index);
5503
5504 if (VT != Elt.getValueType())
5505 // If the vector element type is not legal, the BUILD_VECTOR operands
5506 // are promoted and implicitly truncated, and the result implicitly
5507 // extended. Make that explicit here.
5508 Elt = getAnyExtOrTrunc(Elt, DL, VT);
5509
5510 return Elt;
5511 }
5512
5513 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5514 // operations are lowered to scalars.
5515 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
5516 // If the indices are the same, return the inserted element else
5517 // if the indices are known different, extract the element from
5518 // the original vector.
5519 SDValue N1Op2 = N1.getOperand(2);
5520 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
5521
5522 if (N1Op2C && N2C) {
5523 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
5524 if (VT == N1.getOperand(1).getValueType())
5525 return N1.getOperand(1);
5526 else
5527 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
5528 }
5529
5530 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
5531 }
5532 }
5533
5534 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5535 // when vector types are scalarized and v1iX is legal.
5536 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx).
5537 // Here we are completely ignoring the extract element index (N2),
5538 // which is fine for fixed width vectors, since any index other than 0
5539 // is undefined anyway. However, this cannot be ignored for scalable
5540 // vectors - in theory we could support this, but we don't want to do this
5541 // without a profitability check.
5542 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5543 N1.getValueType().isFixedLengthVector() &&
5544 N1.getValueType().getVectorNumElements() == 1) {
5545 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
5546 N1.getOperand(1));
5547 }
5548 break;
5549 case ISD::EXTRACT_ELEMENT:
5550 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
5551 assert(!N1.getValueType().isVector() && !VT.isVector() &&
5552 (N1.getValueType().isInteger() == VT.isInteger()) &&
5553 N1.getValueType() != VT &&
5554 "Wrong types for EXTRACT_ELEMENT!");
5555
5556 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5557 // 64-bit integers into 32-bit parts. Instead of building the extract of
5558 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5559 if (N1.getOpcode() == ISD::BUILD_PAIR)
5560 return N1.getOperand(N2C->getZExtValue());
5561
5562 // EXTRACT_ELEMENT of a constant int is also very common.
5563 if (N1C) {
5564 unsigned ElementSize = VT.getSizeInBits();
5565 unsigned Shift = ElementSize * N2C->getZExtValue();
5566 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
5567 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
5568 }
5569 break;
5570 case ISD::EXTRACT_SUBVECTOR:
5571 EVT N1VT = N1.getValueType();
5572 assert(VT.isVector() && N1VT.isVector() &&
5573 "Extract subvector VTs must be vectors!");
5574 assert(VT.getVectorElementType() == N1VT.getVectorElementType() &&
5575 "Extract subvector VTs must have the same element type!");
5576 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&
5577 "Cannot extract a scalable vector from a fixed length vector!");
5578 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
5579 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) &&
5580 "Extract subvector must be from larger vector to smaller vector!");
5581 assert(N2C && "Extract subvector index must be a constant");
5582 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
5583 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=
5584 N1VT.getVectorMinNumElements()) &&
5585 "Extract subvector overflow!");
5586 assert(N2C->getAPIntValue().getBitWidth() ==
5587 TLI->getVectorIdxTy(getDataLayout())
5588 .getSizeInBits()
5589 .getFixedSize() &&
5590 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
5591
5592 // Trivial extraction.
5593 if (VT == N1VT)
5594 return N1;
5595
5596 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5597 if (N1.isUndef())
5598 return getUNDEF(VT);
5599
5600 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5601 // the concat have the same type as the extract.
5602 if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 &&
5603 VT == N1.getOperand(0).getValueType()) {
5604 unsigned Factor = VT.getVectorMinNumElements();
5605 return N1.getOperand(N2C->getZExtValue() / Factor);
5606 }
5607
5608 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5609 // during shuffle legalization.
5610 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
5611 VT == N1.getOperand(1).getValueType())
5612 return N1.getOperand(1);
5613 break;
5614 }
5615
5616 // Perform trivial constant folding.
5617 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}))
5618 return SV;
5619
5620 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2))
5621 return V;
5622
5623 // Canonicalize an UNDEF to the RHS, even over a constant.
5624 if (N1.isUndef()) {
5625 if (TLI->isCommutativeBinOp(Opcode)) {
5626 std::swap(N1, N2);
5627 } else {
5628 switch (Opcode) {
5629 case ISD::SIGN_EXTEND_INREG:
5630 case ISD::SUB:
5631 return getUNDEF(VT); // fold op(undef, arg2) -> undef
5632 case ISD::UDIV:
5633 case ISD::SDIV:
5634 case ISD::UREM:
5635 case ISD::SREM:
5636 case ISD::SSUBSAT:
5637 case ISD::USUBSAT:
5638 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
5639 }
5640 }
5641 }
5642
5643 // Fold a bunch of operators when the RHS is undef.
5644 if (N2.isUndef()) {
5645 switch (Opcode) {
5646 case ISD::XOR:
5647 if (N1.isUndef())
5648 // Handle undef ^ undef -> 0 special case. This is a common
5649 // idiom (misuse).
5650 return getConstant(0, DL, VT);
5651 LLVM_FALLTHROUGH;
5652 case ISD::ADD:
5653 case ISD::SUB:
5654 case ISD::UDIV:
5655 case ISD::SDIV:
5656 case ISD::UREM:
5657 case ISD::SREM:
5658 return getUNDEF(VT); // fold op(arg1, undef) -> undef
5659 case ISD::MUL:
5660 case ISD::AND:
5661 case ISD::SSUBSAT:
5662 case ISD::USUBSAT:
5663 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
5664 case ISD::OR:
5665 case ISD::SADDSAT:
5666 case ISD::UADDSAT:
5667 return getAllOnesConstant(DL, VT);
5668 }
5669 }
5670
5671 // Memoize this node if possible.
5672 SDNode *N;
5673 SDVTList VTs = getVTList(VT);
5674 SDValue Ops[] = {N1, N2};
5675 if (VT != MVT::Glue) {
5676 FoldingSetNodeID ID;
5677 AddNodeIDNode(ID, Opcode, VTs, Ops);
5678 void *IP = nullptr;
5679 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5680 E->intersectFlagsWith(Flags);
5681 return SDValue(E, 0);
5682 }
5683
5684 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5685 N->setFlags(Flags);
5686 createOperands(N, Ops);
5687 CSEMap.InsertNode(N, IP);
5688 } else {
5689 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5690 createOperands(N, Ops);
5691 }
5692
5693 InsertNode(N);
5694 SDValue V = SDValue(N, 0);
5695 NewSDValueDbgMsg(V, "Creating new node: ", this);
5696 return V;
5697 }
5698
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3)5699 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5700 SDValue N1, SDValue N2, SDValue N3) {
5701 SDNodeFlags Flags;
5702 if (Inserter)
5703 Flags = Inserter->getFlags();
5704 return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
5705 }
5706
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,const SDNodeFlags Flags)5707 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5708 SDValue N1, SDValue N2, SDValue N3,
5709 const SDNodeFlags Flags) {
5710 // Perform various simplifications.
5711 switch (Opcode) {
5712 case ISD::FMA: {
5713 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5714 assert(N1.getValueType() == VT && N2.getValueType() == VT &&
5715 N3.getValueType() == VT && "FMA types must match!");
5716 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5717 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5718 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
5719 if (N1CFP && N2CFP && N3CFP) {
5720 APFloat V1 = N1CFP->getValueAPF();
5721 const APFloat &V2 = N2CFP->getValueAPF();
5722 const APFloat &V3 = N3CFP->getValueAPF();
5723 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
5724 return getConstantFP(V1, DL, VT);
5725 }
5726 break;
5727 }
5728 case ISD::BUILD_VECTOR: {
5729 // Attempt to simplify BUILD_VECTOR.
5730 SDValue Ops[] = {N1, N2, N3};
5731 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5732 return V;
5733 break;
5734 }
5735 case ISD::CONCAT_VECTORS: {
5736 SDValue Ops[] = {N1, N2, N3};
5737 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5738 return V;
5739 break;
5740 }
5741 case ISD::SETCC: {
5742 assert(VT.isInteger() && "SETCC result type must be an integer!");
5743 assert(N1.getValueType() == N2.getValueType() &&
5744 "SETCC operands must have the same type!");
5745 assert(VT.isVector() == N1.getValueType().isVector() &&
5746 "SETCC type should be vector iff the operand type is vector!");
5747 assert((!VT.isVector() || VT.getVectorElementCount() ==
5748 N1.getValueType().getVectorElementCount()) &&
5749 "SETCC vector element counts must match!");
5750 // Use FoldSetCC to simplify SETCC's.
5751 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
5752 return V;
5753 // Vector constant folding.
5754 SDValue Ops[] = {N1, N2, N3};
5755 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
5756 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
5757 return V;
5758 }
5759 break;
5760 }
5761 case ISD::SELECT:
5762 case ISD::VSELECT:
5763 if (SDValue V = simplifySelect(N1, N2, N3))
5764 return V;
5765 break;
5766 case ISD::VECTOR_SHUFFLE:
5767 llvm_unreachable("should use getVectorShuffle constructor!");
5768 case ISD::INSERT_VECTOR_ELT: {
5769 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
5770 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except
5771 // for scalable vectors where we will generate appropriate code to
5772 // deal with out-of-bounds cases correctly.
5773 if (N3C && N1.getValueType().isFixedLengthVector() &&
5774 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
5775 return getUNDEF(VT);
5776
5777 // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
5778 if (N3.isUndef())
5779 return getUNDEF(VT);
5780
5781 // If the inserted element is an UNDEF, just use the input vector.
5782 if (N2.isUndef())
5783 return N1;
5784
5785 break;
5786 }
5787 case ISD::INSERT_SUBVECTOR: {
5788 // Inserting undef into undef is still undef.
5789 if (N1.isUndef() && N2.isUndef())
5790 return getUNDEF(VT);
5791
5792 EVT N2VT = N2.getValueType();
5793 assert(VT == N1.getValueType() &&
5794 "Dest and insert subvector source types must match!");
5795 assert(VT.isVector() && N2VT.isVector() &&
5796 "Insert subvector VTs must be vectors!");
5797 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) &&
5798 "Cannot insert a scalable vector into a fixed length vector!");
5799 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
5800 VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) &&
5801 "Insert subvector must be from smaller vector to larger vector!");
5802 assert(isa<ConstantSDNode>(N3) &&
5803 "Insert subvector index must be constant");
5804 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
5805 (N2VT.getVectorMinNumElements() +
5806 cast<ConstantSDNode>(N3)->getZExtValue()) <=
5807 VT.getVectorMinNumElements()) &&
5808 "Insert subvector overflow!");
5809
5810 // Trivial insertion.
5811 if (VT == N2VT)
5812 return N2;
5813
5814 // If this is an insert of an extracted vector into an undef vector, we
5815 // can just use the input to the extract.
5816 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5817 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
5818 return N2.getOperand(0);
5819 break;
5820 }
5821 case ISD::BITCAST:
5822 // Fold bit_convert nodes from a type to themselves.
5823 if (N1.getValueType() == VT)
5824 return N1;
5825 break;
5826 }
5827
5828 // Memoize node if it doesn't produce a flag.
5829 SDNode *N;
5830 SDVTList VTs = getVTList(VT);
5831 SDValue Ops[] = {N1, N2, N3};
5832 if (VT != MVT::Glue) {
5833 FoldingSetNodeID ID;
5834 AddNodeIDNode(ID, Opcode, VTs, Ops);
5835 void *IP = nullptr;
5836 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5837 E->intersectFlagsWith(Flags);
5838 return SDValue(E, 0);
5839 }
5840
5841 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5842 N->setFlags(Flags);
5843 createOperands(N, Ops);
5844 CSEMap.InsertNode(N, IP);
5845 } else {
5846 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5847 createOperands(N, Ops);
5848 }
5849
5850 InsertNode(N);
5851 SDValue V = SDValue(N, 0);
5852 NewSDValueDbgMsg(V, "Creating new node: ", this);
5853 return V;
5854 }
5855
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)5856 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5857 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5858 SDValue Ops[] = { N1, N2, N3, N4 };
5859 return getNode(Opcode, DL, VT, Ops);
5860 }
5861
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)5862 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5863 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5864 SDValue N5) {
5865 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5866 return getNode(Opcode, DL, VT, Ops);
5867 }
5868
5869 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
5870 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)5871 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
5872 SmallVector<SDValue, 8> ArgChains;
5873
5874 // Include the original chain at the beginning of the list. When this is
5875 // used by target LowerCall hooks, this helps legalize find the
5876 // CALLSEQ_BEGIN node.
5877 ArgChains.push_back(Chain);
5878
5879 // Add a chain value for each stack argument.
5880 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
5881 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
5882 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
5883 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
5884 if (FI->getIndex() < 0)
5885 ArgChains.push_back(SDValue(L, 1));
5886
5887 // Build a tokenfactor for all the chains.
5888 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
5889 }
5890
5891 /// getMemsetValue - Vectorized representation of the memset value
5892 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,const SDLoc & dl)5893 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
5894 const SDLoc &dl) {
5895 assert(!Value.isUndef());
5896
5897 unsigned NumBits = VT.getScalarSizeInBits();
5898 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
5899 assert(C->getAPIntValue().getBitWidth() == 8);
5900 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
5901 if (VT.isInteger()) {
5902 bool IsOpaque = VT.getSizeInBits() > 64 ||
5903 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
5904 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
5905 }
5906 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
5907 VT);
5908 }
5909
5910 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
5911 EVT IntVT = VT.getScalarType();
5912 if (!IntVT.isInteger())
5913 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
5914
5915 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
5916 if (NumBits > 8) {
5917 // Use a multiplication with 0x010101... to extend the input to the
5918 // required length.
5919 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
5920 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
5921 DAG.getConstant(Magic, dl, IntVT));
5922 }
5923
5924 if (VT != Value.getValueType() && !VT.isInteger())
5925 Value = DAG.getBitcast(VT.getScalarType(), Value);
5926 if (VT != Value.getValueType())
5927 Value = DAG.getSplatBuildVector(VT, dl, Value);
5928
5929 return Value;
5930 }
5931
5932 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
5933 /// used when a memcpy is turned into a memset when the source is a constant
5934 /// string ptr.
getMemsetStringVal(EVT VT,const SDLoc & dl,SelectionDAG & DAG,const TargetLowering & TLI,const ConstantDataArraySlice & Slice)5935 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
5936 const TargetLowering &TLI,
5937 const ConstantDataArraySlice &Slice) {
5938 // Handle vector with all elements zero.
5939 if (Slice.Array == nullptr) {
5940 if (VT.isInteger())
5941 return DAG.getConstant(0, dl, VT);
5942 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
5943 return DAG.getConstantFP(0.0, dl, VT);
5944 else if (VT.isVector()) {
5945 unsigned NumElts = VT.getVectorNumElements();
5946 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
5947 return DAG.getNode(ISD::BITCAST, dl, VT,
5948 DAG.getConstant(0, dl,
5949 EVT::getVectorVT(*DAG.getContext(),
5950 EltVT, NumElts)));
5951 } else
5952 llvm_unreachable("Expected type!");
5953 }
5954
5955 assert(!VT.isVector() && "Can't handle vector type here!");
5956 unsigned NumVTBits = VT.getSizeInBits();
5957 unsigned NumVTBytes = NumVTBits / 8;
5958 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
5959
5960 APInt Val(NumVTBits, 0);
5961 if (DAG.getDataLayout().isLittleEndian()) {
5962 for (unsigned i = 0; i != NumBytes; ++i)
5963 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
5964 } else {
5965 for (unsigned i = 0; i != NumBytes; ++i)
5966 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
5967 }
5968
5969 // If the "cost" of materializing the integer immediate is less than the cost
5970 // of a load, then it is cost effective to turn the load into the immediate.
5971 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
5972 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
5973 return DAG.getConstant(Val, dl, VT);
5974 return SDValue(nullptr, 0);
5975 }
5976
getMemBasePlusOffset(SDValue Base,TypeSize Offset,const SDLoc & DL,const SDNodeFlags Flags)5977 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset,
5978 const SDLoc &DL,
5979 const SDNodeFlags Flags) {
5980 EVT VT = Base.getValueType();
5981 SDValue Index;
5982
5983 if (Offset.isScalable())
5984 Index = getVScale(DL, Base.getValueType(),
5985 APInt(Base.getValueSizeInBits().getFixedSize(),
5986 Offset.getKnownMinSize()));
5987 else
5988 Index = getConstant(Offset.getFixedSize(), DL, VT);
5989
5990 return getMemBasePlusOffset(Base, Index, DL, Flags);
5991 }
5992
getMemBasePlusOffset(SDValue Ptr,SDValue Offset,const SDLoc & DL,const SDNodeFlags Flags)5993 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset,
5994 const SDLoc &DL,
5995 const SDNodeFlags Flags) {
5996 assert(Offset.getValueType().isInteger());
5997 EVT BasePtrVT = Ptr.getValueType();
5998 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags);
5999 }
6000
6001 /// Returns true if memcpy source is constant data.
isMemSrcFromConstant(SDValue Src,ConstantDataArraySlice & Slice)6002 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
6003 uint64_t SrcDelta = 0;
6004 GlobalAddressSDNode *G = nullptr;
6005 if (Src.getOpcode() == ISD::GlobalAddress)
6006 G = cast<GlobalAddressSDNode>(Src);
6007 else if (Src.getOpcode() == ISD::ADD &&
6008 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
6009 Src.getOperand(1).getOpcode() == ISD::Constant) {
6010 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
6011 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
6012 }
6013 if (!G)
6014 return false;
6015
6016 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
6017 SrcDelta + G->getOffset());
6018 }
6019
shouldLowerMemFuncForSize(const MachineFunction & MF,SelectionDAG & DAG)6020 static bool shouldLowerMemFuncForSize(const MachineFunction &MF,
6021 SelectionDAG &DAG) {
6022 // On Darwin, -Os means optimize for size without hurting performance, so
6023 // only really optimize for size when -Oz (MinSize) is used.
6024 if (MF.getTarget().getTargetTriple().isOSDarwin())
6025 return MF.getFunction().hasMinSize();
6026 return DAG.shouldOptForSize();
6027 }
6028
chainLoadsAndStoresForMemcpy(SelectionDAG & DAG,const SDLoc & dl,SmallVector<SDValue,32> & OutChains,unsigned From,unsigned To,SmallVector<SDValue,16> & OutLoadChains,SmallVector<SDValue,16> & OutStoreChains)6029 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
6030 SmallVector<SDValue, 32> &OutChains, unsigned From,
6031 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
6032 SmallVector<SDValue, 16> &OutStoreChains) {
6033 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
6034 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
6035 SmallVector<SDValue, 16> GluedLoadChains;
6036 for (unsigned i = From; i < To; ++i) {
6037 OutChains.push_back(OutLoadChains[i]);
6038 GluedLoadChains.push_back(OutLoadChains[i]);
6039 }
6040
6041 // Chain for all loads.
6042 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
6043 GluedLoadChains);
6044
6045 for (unsigned i = From; i < To; ++i) {
6046 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
6047 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
6048 ST->getBasePtr(), ST->getMemoryVT(),
6049 ST->getMemOperand());
6050 OutChains.push_back(NewStore);
6051 }
6052 }
6053
getMemcpyLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6054 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6055 SDValue Chain, SDValue Dst, SDValue Src,
6056 uint64_t Size, Align Alignment,
6057 bool isVol, bool AlwaysInline,
6058 MachinePointerInfo DstPtrInfo,
6059 MachinePointerInfo SrcPtrInfo) {
6060 // Turn a memcpy of undef to nop.
6061 // FIXME: We need to honor volatile even is Src is undef.
6062 if (Src.isUndef())
6063 return Chain;
6064
6065 // Expand memcpy to a series of load and store ops if the size operand falls
6066 // below a certain threshold.
6067 // TODO: In the AlwaysInline case, if the size is big then generate a loop
6068 // rather than maybe a humongous number of loads and stores.
6069 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6070 const DataLayout &DL = DAG.getDataLayout();
6071 LLVMContext &C = *DAG.getContext();
6072 std::vector<EVT> MemOps;
6073 bool DstAlignCanChange = false;
6074 MachineFunction &MF = DAG.getMachineFunction();
6075 MachineFrameInfo &MFI = MF.getFrameInfo();
6076 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6077 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6078 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6079 DstAlignCanChange = true;
6080 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6081 if (!SrcAlign || Alignment > *SrcAlign)
6082 SrcAlign = Alignment;
6083 assert(SrcAlign && "SrcAlign must be set");
6084 ConstantDataArraySlice Slice;
6085 // If marked as volatile, perform a copy even when marked as constant.
6086 bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
6087 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
6088 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
6089 const MemOp Op = isZeroConstant
6090 ? MemOp::Set(Size, DstAlignCanChange, Alignment,
6091 /*IsZeroMemset*/ true, isVol)
6092 : MemOp::Copy(Size, DstAlignCanChange, Alignment,
6093 *SrcAlign, isVol, CopyFromConstant);
6094 if (!TLI.findOptimalMemOpLowering(
6095 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(),
6096 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
6097 return SDValue();
6098
6099 if (DstAlignCanChange) {
6100 Type *Ty = MemOps[0].getTypeForEVT(C);
6101 Align NewAlign = DL.getABITypeAlign(Ty);
6102
6103 // Don't promote to an alignment that would require dynamic stack
6104 // realignment.
6105 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
6106 if (!TRI->needsStackRealignment(MF))
6107 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
6108 NewAlign = NewAlign / 2;
6109
6110 if (NewAlign > Alignment) {
6111 // Give the stack frame object a larger alignment if needed.
6112 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6113 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6114 Alignment = NewAlign;
6115 }
6116 }
6117
6118 MachineMemOperand::Flags MMOFlags =
6119 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6120 SmallVector<SDValue, 16> OutLoadChains;
6121 SmallVector<SDValue, 16> OutStoreChains;
6122 SmallVector<SDValue, 32> OutChains;
6123 unsigned NumMemOps = MemOps.size();
6124 uint64_t SrcOff = 0, DstOff = 0;
6125 for (unsigned i = 0; i != NumMemOps; ++i) {
6126 EVT VT = MemOps[i];
6127 unsigned VTSize = VT.getSizeInBits() / 8;
6128 SDValue Value, Store;
6129
6130 if (VTSize > Size) {
6131 // Issuing an unaligned load / store pair that overlaps with the previous
6132 // pair. Adjust the offset accordingly.
6133 assert(i == NumMemOps-1 && i != 0);
6134 SrcOff -= VTSize - Size;
6135 DstOff -= VTSize - Size;
6136 }
6137
6138 if (CopyFromConstant &&
6139 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
6140 // It's unlikely a store of a vector immediate can be done in a single
6141 // instruction. It would require a load from a constantpool first.
6142 // We only handle zero vectors here.
6143 // FIXME: Handle other cases where store of vector immediate is done in
6144 // a single instruction.
6145 ConstantDataArraySlice SubSlice;
6146 if (SrcOff < Slice.Length) {
6147 SubSlice = Slice;
6148 SubSlice.move(SrcOff);
6149 } else {
6150 // This is an out-of-bounds access and hence UB. Pretend we read zero.
6151 SubSlice.Array = nullptr;
6152 SubSlice.Offset = 0;
6153 SubSlice.Length = VTSize;
6154 }
6155 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
6156 if (Value.getNode()) {
6157 Store = DAG.getStore(
6158 Chain, dl, Value,
6159 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6160 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
6161 OutChains.push_back(Store);
6162 }
6163 }
6164
6165 if (!Store.getNode()) {
6166 // The type might not be legal for the target. This should only happen
6167 // if the type is smaller than a legal type, as on PPC, so the right
6168 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
6169 // to Load/Store if NVT==VT.
6170 // FIXME does the case above also need this?
6171 EVT NVT = TLI.getTypeToTransformTo(C, VT);
6172 assert(NVT.bitsGE(VT));
6173
6174 bool isDereferenceable =
6175 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6176 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6177 if (isDereferenceable)
6178 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6179
6180 Value = DAG.getExtLoad(
6181 ISD::EXTLOAD, dl, NVT, Chain,
6182 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6183 SrcPtrInfo.getWithOffset(SrcOff), VT,
6184 commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags);
6185 OutLoadChains.push_back(Value.getValue(1));
6186
6187 Store = DAG.getTruncStore(
6188 Chain, dl, Value,
6189 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6190 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
6191 OutStoreChains.push_back(Store);
6192 }
6193 SrcOff += VTSize;
6194 DstOff += VTSize;
6195 Size -= VTSize;
6196 }
6197
6198 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
6199 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
6200 unsigned NumLdStInMemcpy = OutStoreChains.size();
6201
6202 if (NumLdStInMemcpy) {
6203 // It may be that memcpy might be converted to memset if it's memcpy
6204 // of constants. In such a case, we won't have loads and stores, but
6205 // just stores. In the absence of loads, there is nothing to gang up.
6206 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
6207 // If target does not care, just leave as it.
6208 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
6209 OutChains.push_back(OutLoadChains[i]);
6210 OutChains.push_back(OutStoreChains[i]);
6211 }
6212 } else {
6213 // Ld/St less than/equal limit set by target.
6214 if (NumLdStInMemcpy <= GluedLdStLimit) {
6215 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6216 NumLdStInMemcpy, OutLoadChains,
6217 OutStoreChains);
6218 } else {
6219 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
6220 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
6221 unsigned GlueIter = 0;
6222
6223 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
6224 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
6225 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
6226
6227 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
6228 OutLoadChains, OutStoreChains);
6229 GlueIter += GluedLdStLimit;
6230 }
6231
6232 // Residual ld/st.
6233 if (RemainingLdStInMemcpy) {
6234 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6235 RemainingLdStInMemcpy, OutLoadChains,
6236 OutStoreChains);
6237 }
6238 }
6239 }
6240 }
6241 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6242 }
6243
getMemmoveLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6244 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6245 SDValue Chain, SDValue Dst, SDValue Src,
6246 uint64_t Size, Align Alignment,
6247 bool isVol, bool AlwaysInline,
6248 MachinePointerInfo DstPtrInfo,
6249 MachinePointerInfo SrcPtrInfo) {
6250 // Turn a memmove of undef to nop.
6251 // FIXME: We need to honor volatile even is Src is undef.
6252 if (Src.isUndef())
6253 return Chain;
6254
6255 // Expand memmove to a series of load and store ops if the size operand falls
6256 // below a certain threshold.
6257 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6258 const DataLayout &DL = DAG.getDataLayout();
6259 LLVMContext &C = *DAG.getContext();
6260 std::vector<EVT> MemOps;
6261 bool DstAlignCanChange = false;
6262 MachineFunction &MF = DAG.getMachineFunction();
6263 MachineFrameInfo &MFI = MF.getFrameInfo();
6264 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6265 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6266 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6267 DstAlignCanChange = true;
6268 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6269 if (!SrcAlign || Alignment > *SrcAlign)
6270 SrcAlign = Alignment;
6271 assert(SrcAlign && "SrcAlign must be set");
6272 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
6273 if (!TLI.findOptimalMemOpLowering(
6274 MemOps, Limit,
6275 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign,
6276 /*IsVolatile*/ true),
6277 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
6278 MF.getFunction().getAttributes()))
6279 return SDValue();
6280
6281 if (DstAlignCanChange) {
6282 Type *Ty = MemOps[0].getTypeForEVT(C);
6283 Align NewAlign = DL.getABITypeAlign(Ty);
6284 if (NewAlign > Alignment) {
6285 // Give the stack frame object a larger alignment if needed.
6286 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6287 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6288 Alignment = NewAlign;
6289 }
6290 }
6291
6292 MachineMemOperand::Flags MMOFlags =
6293 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6294 uint64_t SrcOff = 0, DstOff = 0;
6295 SmallVector<SDValue, 8> LoadValues;
6296 SmallVector<SDValue, 8> LoadChains;
6297 SmallVector<SDValue, 8> OutChains;
6298 unsigned NumMemOps = MemOps.size();
6299 for (unsigned i = 0; i < NumMemOps; i++) {
6300 EVT VT = MemOps[i];
6301 unsigned VTSize = VT.getSizeInBits() / 8;
6302 SDValue Value;
6303
6304 bool isDereferenceable =
6305 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6306 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6307 if (isDereferenceable)
6308 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6309
6310 Value =
6311 DAG.getLoad(VT, dl, Chain,
6312 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6313 SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags);
6314 LoadValues.push_back(Value);
6315 LoadChains.push_back(Value.getValue(1));
6316 SrcOff += VTSize;
6317 }
6318 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
6319 OutChains.clear();
6320 for (unsigned i = 0; i < NumMemOps; i++) {
6321 EVT VT = MemOps[i];
6322 unsigned VTSize = VT.getSizeInBits() / 8;
6323 SDValue Store;
6324
6325 Store =
6326 DAG.getStore(Chain, dl, LoadValues[i],
6327 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6328 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
6329 OutChains.push_back(Store);
6330 DstOff += VTSize;
6331 }
6332
6333 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6334 }
6335
6336 /// Lower the call to 'memset' intrinsic function into a series of store
6337 /// operations.
6338 ///
6339 /// \param DAG Selection DAG where lowered code is placed.
6340 /// \param dl Link to corresponding IR location.
6341 /// \param Chain Control flow dependency.
6342 /// \param Dst Pointer to destination memory location.
6343 /// \param Src Value of byte to write into the memory.
6344 /// \param Size Number of bytes to write.
6345 /// \param Alignment Alignment of the destination in bytes.
6346 /// \param isVol True if destination is volatile.
6347 /// \param DstPtrInfo IR information on the memory pointer.
6348 /// \returns New head in the control flow, if lowering was successful, empty
6349 /// SDValue otherwise.
6350 ///
6351 /// The function tries to replace 'llvm.memset' intrinsic with several store
6352 /// operations and value calculation code. This is usually profitable for small
6353 /// memory size.
getMemsetStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,MachinePointerInfo DstPtrInfo)6354 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
6355 SDValue Chain, SDValue Dst, SDValue Src,
6356 uint64_t Size, Align Alignment, bool isVol,
6357 MachinePointerInfo DstPtrInfo) {
6358 // Turn a memset of undef to nop.
6359 // FIXME: We need to honor volatile even is Src is undef.
6360 if (Src.isUndef())
6361 return Chain;
6362
6363 // Expand memset to a series of load/store ops if the size operand
6364 // falls below a certain threshold.
6365 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6366 std::vector<EVT> MemOps;
6367 bool DstAlignCanChange = false;
6368 MachineFunction &MF = DAG.getMachineFunction();
6369 MachineFrameInfo &MFI = MF.getFrameInfo();
6370 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6371 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6372 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6373 DstAlignCanChange = true;
6374 bool IsZeroVal =
6375 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
6376 if (!TLI.findOptimalMemOpLowering(
6377 MemOps, TLI.getMaxStoresPerMemset(OptSize),
6378 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
6379 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes()))
6380 return SDValue();
6381
6382 if (DstAlignCanChange) {
6383 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
6384 Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty);
6385 if (NewAlign > Alignment) {
6386 // Give the stack frame object a larger alignment if needed.
6387 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6388 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6389 Alignment = NewAlign;
6390 }
6391 }
6392
6393 SmallVector<SDValue, 8> OutChains;
6394 uint64_t DstOff = 0;
6395 unsigned NumMemOps = MemOps.size();
6396
6397 // Find the largest store and generate the bit pattern for it.
6398 EVT LargestVT = MemOps[0];
6399 for (unsigned i = 1; i < NumMemOps; i++)
6400 if (MemOps[i].bitsGT(LargestVT))
6401 LargestVT = MemOps[i];
6402 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
6403
6404 for (unsigned i = 0; i < NumMemOps; i++) {
6405 EVT VT = MemOps[i];
6406 unsigned VTSize = VT.getSizeInBits() / 8;
6407 if (VTSize > Size) {
6408 // Issuing an unaligned load / store pair that overlaps with the previous
6409 // pair. Adjust the offset accordingly.
6410 assert(i == NumMemOps-1 && i != 0);
6411 DstOff -= VTSize - Size;
6412 }
6413
6414 // If this store is smaller than the largest store see whether we can get
6415 // the smaller value for free with a truncate.
6416 SDValue Value = MemSetValue;
6417 if (VT.bitsLT(LargestVT)) {
6418 if (!LargestVT.isVector() && !VT.isVector() &&
6419 TLI.isTruncateFree(LargestVT, VT))
6420 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
6421 else
6422 Value = getMemsetValue(Src, VT, DAG, dl);
6423 }
6424 assert(Value.getValueType() == VT && "Value with wrong type.");
6425 SDValue Store = DAG.getStore(
6426 Chain, dl, Value,
6427 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6428 DstPtrInfo.getWithOffset(DstOff), Alignment,
6429 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
6430 OutChains.push_back(Store);
6431 DstOff += VT.getSizeInBits() / 8;
6432 Size -= VTSize;
6433 }
6434
6435 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6436 }
6437
checkAddrSpaceIsValidForLibcall(const TargetLowering * TLI,unsigned AS)6438 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
6439 unsigned AS) {
6440 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6441 // pointer operands can be losslessly bitcasted to pointers of address space 0
6442 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
6443 report_fatal_error("cannot lower memory intrinsic in address space " +
6444 Twine(AS));
6445 }
6446 }
6447
getMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool AlwaysInline,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6448 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
6449 SDValue Src, SDValue Size, Align Alignment,
6450 bool isVol, bool AlwaysInline, bool isTailCall,
6451 MachinePointerInfo DstPtrInfo,
6452 MachinePointerInfo SrcPtrInfo) {
6453 // Check to see if we should lower the memcpy to loads and stores first.
6454 // For cases within the target-specified limits, this is the best choice.
6455 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6456 if (ConstantSize) {
6457 // Memcpy with size zero? Just return the original chain.
6458 if (ConstantSize->isNullValue())
6459 return Chain;
6460
6461 SDValue Result = getMemcpyLoadsAndStores(
6462 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
6463 isVol, false, DstPtrInfo, SrcPtrInfo);
6464 if (Result.getNode())
6465 return Result;
6466 }
6467
6468 // Then check to see if we should lower the memcpy with target-specific
6469 // code. If the target chooses to do this, this is the next best.
6470 if (TSI) {
6471 SDValue Result = TSI->EmitTargetCodeForMemcpy(
6472 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline,
6473 DstPtrInfo, SrcPtrInfo);
6474 if (Result.getNode())
6475 return Result;
6476 }
6477
6478 // If we really need inline code and the target declined to provide it,
6479 // use a (potentially long) sequence of loads and stores.
6480 if (AlwaysInline) {
6481 assert(ConstantSize && "AlwaysInline requires a constant size!");
6482 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6483 ConstantSize->getZExtValue(), Alignment,
6484 isVol, true, DstPtrInfo, SrcPtrInfo);
6485 }
6486
6487 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6488 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6489
6490 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
6491 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
6492 // respect volatile, so they may do things like read or write memory
6493 // beyond the given memory regions. But fixing this isn't easy, and most
6494 // people don't care.
6495
6496 // Emit a library call.
6497 TargetLowering::ArgListTy Args;
6498 TargetLowering::ArgListEntry Entry;
6499 Entry.Ty = Type::getInt8PtrTy(*getContext());
6500 Entry.Node = Dst; Args.push_back(Entry);
6501 Entry.Node = Src; Args.push_back(Entry);
6502
6503 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6504 Entry.Node = Size; Args.push_back(Entry);
6505 // FIXME: pass in SDLoc
6506 TargetLowering::CallLoweringInfo CLI(*this);
6507 CLI.setDebugLoc(dl)
6508 .setChain(Chain)
6509 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
6510 Dst.getValueType().getTypeForEVT(*getContext()),
6511 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
6512 TLI->getPointerTy(getDataLayout())),
6513 std::move(Args))
6514 .setDiscardResult()
6515 .setTailCall(isTailCall);
6516
6517 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6518 return CallResult.second;
6519 }
6520
getAtomicMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6521 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
6522 SDValue Dst, unsigned DstAlign,
6523 SDValue Src, unsigned SrcAlign,
6524 SDValue Size, Type *SizeTy,
6525 unsigned ElemSz, bool isTailCall,
6526 MachinePointerInfo DstPtrInfo,
6527 MachinePointerInfo SrcPtrInfo) {
6528 // Emit a library call.
6529 TargetLowering::ArgListTy Args;
6530 TargetLowering::ArgListEntry Entry;
6531 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6532 Entry.Node = Dst;
6533 Args.push_back(Entry);
6534
6535 Entry.Node = Src;
6536 Args.push_back(Entry);
6537
6538 Entry.Ty = SizeTy;
6539 Entry.Node = Size;
6540 Args.push_back(Entry);
6541
6542 RTLIB::Libcall LibraryCall =
6543 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6544 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6545 report_fatal_error("Unsupported element size");
6546
6547 TargetLowering::CallLoweringInfo CLI(*this);
6548 CLI.setDebugLoc(dl)
6549 .setChain(Chain)
6550 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6551 Type::getVoidTy(*getContext()),
6552 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6553 TLI->getPointerTy(getDataLayout())),
6554 std::move(Args))
6555 .setDiscardResult()
6556 .setTailCall(isTailCall);
6557
6558 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6559 return CallResult.second;
6560 }
6561
getMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6562 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
6563 SDValue Src, SDValue Size, Align Alignment,
6564 bool isVol, bool isTailCall,
6565 MachinePointerInfo DstPtrInfo,
6566 MachinePointerInfo SrcPtrInfo) {
6567 // Check to see if we should lower the memmove to loads and stores first.
6568 // For cases within the target-specified limits, this is the best choice.
6569 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6570 if (ConstantSize) {
6571 // Memmove with size zero? Just return the original chain.
6572 if (ConstantSize->isNullValue())
6573 return Chain;
6574
6575 SDValue Result = getMemmoveLoadsAndStores(
6576 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
6577 isVol, false, DstPtrInfo, SrcPtrInfo);
6578 if (Result.getNode())
6579 return Result;
6580 }
6581
6582 // Then check to see if we should lower the memmove with target-specific
6583 // code. If the target chooses to do this, this is the next best.
6584 if (TSI) {
6585 SDValue Result =
6586 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size,
6587 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
6588 if (Result.getNode())
6589 return Result;
6590 }
6591
6592 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6593 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6594
6595 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
6596 // not be safe. See memcpy above for more details.
6597
6598 // Emit a library call.
6599 TargetLowering::ArgListTy Args;
6600 TargetLowering::ArgListEntry Entry;
6601 Entry.Ty = Type::getInt8PtrTy(*getContext());
6602 Entry.Node = Dst; Args.push_back(Entry);
6603 Entry.Node = Src; Args.push_back(Entry);
6604
6605 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6606 Entry.Node = Size; Args.push_back(Entry);
6607 // FIXME: pass in SDLoc
6608 TargetLowering::CallLoweringInfo CLI(*this);
6609 CLI.setDebugLoc(dl)
6610 .setChain(Chain)
6611 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
6612 Dst.getValueType().getTypeForEVT(*getContext()),
6613 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
6614 TLI->getPointerTy(getDataLayout())),
6615 std::move(Args))
6616 .setDiscardResult()
6617 .setTailCall(isTailCall);
6618
6619 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6620 return CallResult.second;
6621 }
6622
getAtomicMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6623 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
6624 SDValue Dst, unsigned DstAlign,
6625 SDValue Src, unsigned SrcAlign,
6626 SDValue Size, Type *SizeTy,
6627 unsigned ElemSz, bool isTailCall,
6628 MachinePointerInfo DstPtrInfo,
6629 MachinePointerInfo SrcPtrInfo) {
6630 // Emit a library call.
6631 TargetLowering::ArgListTy Args;
6632 TargetLowering::ArgListEntry Entry;
6633 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6634 Entry.Node = Dst;
6635 Args.push_back(Entry);
6636
6637 Entry.Node = Src;
6638 Args.push_back(Entry);
6639
6640 Entry.Ty = SizeTy;
6641 Entry.Node = Size;
6642 Args.push_back(Entry);
6643
6644 RTLIB::Libcall LibraryCall =
6645 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6646 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6647 report_fatal_error("Unsupported element size");
6648
6649 TargetLowering::CallLoweringInfo CLI(*this);
6650 CLI.setDebugLoc(dl)
6651 .setChain(Chain)
6652 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6653 Type::getVoidTy(*getContext()),
6654 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6655 TLI->getPointerTy(getDataLayout())),
6656 std::move(Args))
6657 .setDiscardResult()
6658 .setTailCall(isTailCall);
6659
6660 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6661 return CallResult.second;
6662 }
6663
getMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo)6664 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
6665 SDValue Src, SDValue Size, Align Alignment,
6666 bool isVol, bool isTailCall,
6667 MachinePointerInfo DstPtrInfo) {
6668 // Check to see if we should lower the memset to stores first.
6669 // For cases within the target-specified limits, this is the best choice.
6670 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6671 if (ConstantSize) {
6672 // Memset with size zero? Just return the original chain.
6673 if (ConstantSize->isNullValue())
6674 return Chain;
6675
6676 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
6677 ConstantSize->getZExtValue(), Alignment,
6678 isVol, DstPtrInfo);
6679
6680 if (Result.getNode())
6681 return Result;
6682 }
6683
6684 // Then check to see if we should lower the memset with target-specific
6685 // code. If the target chooses to do this, this is the next best.
6686 if (TSI) {
6687 SDValue Result = TSI->EmitTargetCodeForMemset(
6688 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo);
6689 if (Result.getNode())
6690 return Result;
6691 }
6692
6693 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6694
6695 // Emit a library call.
6696 TargetLowering::ArgListTy Args;
6697 TargetLowering::ArgListEntry Entry;
6698 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext());
6699 Args.push_back(Entry);
6700 Entry.Node = Src;
6701 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
6702 Args.push_back(Entry);
6703 Entry.Node = Size;
6704 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6705 Args.push_back(Entry);
6706
6707 // FIXME: pass in SDLoc
6708 TargetLowering::CallLoweringInfo CLI(*this);
6709 CLI.setDebugLoc(dl)
6710 .setChain(Chain)
6711 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
6712 Dst.getValueType().getTypeForEVT(*getContext()),
6713 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
6714 TLI->getPointerTy(getDataLayout())),
6715 std::move(Args))
6716 .setDiscardResult()
6717 .setTailCall(isTailCall);
6718
6719 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6720 return CallResult.second;
6721 }
6722
getAtomicMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Value,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo)6723 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
6724 SDValue Dst, unsigned DstAlign,
6725 SDValue Value, SDValue Size, Type *SizeTy,
6726 unsigned ElemSz, bool isTailCall,
6727 MachinePointerInfo DstPtrInfo) {
6728 // Emit a library call.
6729 TargetLowering::ArgListTy Args;
6730 TargetLowering::ArgListEntry Entry;
6731 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6732 Entry.Node = Dst;
6733 Args.push_back(Entry);
6734
6735 Entry.Ty = Type::getInt8Ty(*getContext());
6736 Entry.Node = Value;
6737 Args.push_back(Entry);
6738
6739 Entry.Ty = SizeTy;
6740 Entry.Node = Size;
6741 Args.push_back(Entry);
6742
6743 RTLIB::Libcall LibraryCall =
6744 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6745 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6746 report_fatal_error("Unsupported element size");
6747
6748 TargetLowering::CallLoweringInfo CLI(*this);
6749 CLI.setDebugLoc(dl)
6750 .setChain(Chain)
6751 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6752 Type::getVoidTy(*getContext()),
6753 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6754 TLI->getPointerTy(getDataLayout())),
6755 std::move(Args))
6756 .setDiscardResult()
6757 .setTailCall(isTailCall);
6758
6759 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6760 return CallResult.second;
6761 }
6762
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)6763 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6764 SDVTList VTList, ArrayRef<SDValue> Ops,
6765 MachineMemOperand *MMO) {
6766 FoldingSetNodeID ID;
6767 ID.AddInteger(MemVT.getRawBits());
6768 AddNodeIDNode(ID, Opcode, VTList, Ops);
6769 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6770 void* IP = nullptr;
6771 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6772 cast<AtomicSDNode>(E)->refineAlignment(MMO);
6773 return SDValue(E, 0);
6774 }
6775
6776 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6777 VTList, MemVT, MMO);
6778 createOperands(N, Ops);
6779
6780 CSEMap.InsertNode(N, IP);
6781 InsertNode(N);
6782 return SDValue(N, 0);
6783 }
6784
getAtomicCmpSwap(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO)6785 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
6786 EVT MemVT, SDVTList VTs, SDValue Chain,
6787 SDValue Ptr, SDValue Cmp, SDValue Swp,
6788 MachineMemOperand *MMO) {
6789 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
6790 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
6791 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
6792
6793 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
6794 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6795 }
6796
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO)6797 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6798 SDValue Chain, SDValue Ptr, SDValue Val,
6799 MachineMemOperand *MMO) {
6800 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
6801 Opcode == ISD::ATOMIC_LOAD_SUB ||
6802 Opcode == ISD::ATOMIC_LOAD_AND ||
6803 Opcode == ISD::ATOMIC_LOAD_CLR ||
6804 Opcode == ISD::ATOMIC_LOAD_OR ||
6805 Opcode == ISD::ATOMIC_LOAD_XOR ||
6806 Opcode == ISD::ATOMIC_LOAD_NAND ||
6807 Opcode == ISD::ATOMIC_LOAD_MIN ||
6808 Opcode == ISD::ATOMIC_LOAD_MAX ||
6809 Opcode == ISD::ATOMIC_LOAD_UMIN ||
6810 Opcode == ISD::ATOMIC_LOAD_UMAX ||
6811 Opcode == ISD::ATOMIC_LOAD_FADD ||
6812 Opcode == ISD::ATOMIC_LOAD_FSUB ||
6813 Opcode == ISD::ATOMIC_SWAP ||
6814 Opcode == ISD::ATOMIC_STORE) &&
6815 "Invalid Atomic Op");
6816
6817 EVT VT = Val.getValueType();
6818
6819 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
6820 getVTList(VT, MVT::Other);
6821 SDValue Ops[] = {Chain, Ptr, Val};
6822 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6823 }
6824
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)6825 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6826 EVT VT, SDValue Chain, SDValue Ptr,
6827 MachineMemOperand *MMO) {
6828 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
6829
6830 SDVTList VTs = getVTList(VT, MVT::Other);
6831 SDValue Ops[] = {Chain, Ptr};
6832 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6833 }
6834
6835 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,const SDLoc & dl)6836 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
6837 if (Ops.size() == 1)
6838 return Ops[0];
6839
6840 SmallVector<EVT, 4> VTs;
6841 VTs.reserve(Ops.size());
6842 for (unsigned i = 0; i < Ops.size(); ++i)
6843 VTs.push_back(Ops[i].getValueType());
6844 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
6845 }
6846
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags Flags,uint64_t Size,const AAMDNodes & AAInfo)6847 SDValue SelectionDAG::getMemIntrinsicNode(
6848 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
6849 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
6850 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
6851 if (!Size && MemVT.isScalableVector())
6852 Size = MemoryLocation::UnknownSize;
6853 else if (!Size)
6854 Size = MemVT.getStoreSize();
6855
6856 MachineFunction &MF = getMachineFunction();
6857 MachineMemOperand *MMO =
6858 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo);
6859
6860 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
6861 }
6862
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)6863 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
6864 SDVTList VTList,
6865 ArrayRef<SDValue> Ops, EVT MemVT,
6866 MachineMemOperand *MMO) {
6867 assert((Opcode == ISD::INTRINSIC_VOID ||
6868 Opcode == ISD::INTRINSIC_W_CHAIN ||
6869 Opcode == ISD::PREFETCH ||
6870 ((int)Opcode <= std::numeric_limits<int>::max() &&
6871 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
6872 "Opcode is not a memory-accessing opcode!");
6873
6874 // Memoize the node unless it returns a flag.
6875 MemIntrinsicSDNode *N;
6876 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
6877 FoldingSetNodeID ID;
6878 AddNodeIDNode(ID, Opcode, VTList, Ops);
6879 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
6880 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
6881 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6882 void *IP = nullptr;
6883 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6884 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
6885 return SDValue(E, 0);
6886 }
6887
6888 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6889 VTList, MemVT, MMO);
6890 createOperands(N, Ops);
6891
6892 CSEMap.InsertNode(N, IP);
6893 } else {
6894 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6895 VTList, MemVT, MMO);
6896 createOperands(N, Ops);
6897 }
6898 InsertNode(N);
6899 SDValue V(N, 0);
6900 NewSDValueDbgMsg(V, "Creating new node: ", this);
6901 return V;
6902 }
6903
getLifetimeNode(bool IsStart,const SDLoc & dl,SDValue Chain,int FrameIndex,int64_t Size,int64_t Offset)6904 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
6905 SDValue Chain, int FrameIndex,
6906 int64_t Size, int64_t Offset) {
6907 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
6908 const auto VTs = getVTList(MVT::Other);
6909 SDValue Ops[2] = {
6910 Chain,
6911 getFrameIndex(FrameIndex,
6912 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
6913 true)};
6914
6915 FoldingSetNodeID ID;
6916 AddNodeIDNode(ID, Opcode, VTs, Ops);
6917 ID.AddInteger(FrameIndex);
6918 ID.AddInteger(Size);
6919 ID.AddInteger(Offset);
6920 void *IP = nullptr;
6921 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
6922 return SDValue(E, 0);
6923
6924 LifetimeSDNode *N = newSDNode<LifetimeSDNode>(
6925 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset);
6926 createOperands(N, Ops);
6927 CSEMap.InsertNode(N, IP);
6928 InsertNode(N);
6929 SDValue V(N, 0);
6930 NewSDValueDbgMsg(V, "Creating new node: ", this);
6931 return V;
6932 }
6933
getPseudoProbeNode(const SDLoc & Dl,SDValue Chain,uint64_t Guid,uint64_t Index,uint32_t Attr)6934 SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain,
6935 uint64_t Guid, uint64_t Index,
6936 uint32_t Attr) {
6937 const unsigned Opcode = ISD::PSEUDO_PROBE;
6938 const auto VTs = getVTList(MVT::Other);
6939 SDValue Ops[] = {Chain};
6940 FoldingSetNodeID ID;
6941 AddNodeIDNode(ID, Opcode, VTs, Ops);
6942 ID.AddInteger(Guid);
6943 ID.AddInteger(Index);
6944 void *IP = nullptr;
6945 if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
6946 return SDValue(E, 0);
6947
6948 auto *N = newSDNode<PseudoProbeSDNode>(
6949 Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
6950 createOperands(N, Ops);
6951 CSEMap.InsertNode(N, IP);
6952 InsertNode(N);
6953 SDValue V(N, 0);
6954 NewSDValueDbgMsg(V, "Creating new node: ", this);
6955 return V;
6956 }
6957
6958 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6959 /// MachinePointerInfo record from it. This is particularly useful because the
6960 /// code generator has many cases where it doesn't bother passing in a
6961 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,int64_t Offset=0)6962 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6963 SelectionDAG &DAG, SDValue Ptr,
6964 int64_t Offset = 0) {
6965 // If this is FI+Offset, we can model it.
6966 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
6967 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
6968 FI->getIndex(), Offset);
6969
6970 // If this is (FI+Offset1)+Offset2, we can model it.
6971 if (Ptr.getOpcode() != ISD::ADD ||
6972 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
6973 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
6974 return Info;
6975
6976 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6977 return MachinePointerInfo::getFixedStack(
6978 DAG.getMachineFunction(), FI,
6979 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
6980 }
6981
6982 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6983 /// MachinePointerInfo record from it. This is particularly useful because the
6984 /// code generator has many cases where it doesn't bother passing in a
6985 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,SDValue OffsetOp)6986 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6987 SelectionDAG &DAG, SDValue Ptr,
6988 SDValue OffsetOp) {
6989 // If the 'Offset' value isn't a constant, we can't handle this.
6990 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
6991 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
6992 if (OffsetOp.isUndef())
6993 return InferPointerInfo(Info, DAG, Ptr);
6994 return Info;
6995 }
6996
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)6997 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6998 EVT VT, const SDLoc &dl, SDValue Chain,
6999 SDValue Ptr, SDValue Offset,
7000 MachinePointerInfo PtrInfo, EVT MemVT,
7001 Align Alignment,
7002 MachineMemOperand::Flags MMOFlags,
7003 const AAMDNodes &AAInfo, const MDNode *Ranges) {
7004 assert(Chain.getValueType() == MVT::Other &&
7005 "Invalid chain type");
7006
7007 MMOFlags |= MachineMemOperand::MOLoad;
7008 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
7009 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
7010 // clients.
7011 if (PtrInfo.V.isNull())
7012 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
7013
7014 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
7015 MachineFunction &MF = getMachineFunction();
7016 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
7017 Alignment, AAInfo, Ranges);
7018 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
7019 }
7020
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)7021 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
7022 EVT VT, const SDLoc &dl, SDValue Chain,
7023 SDValue Ptr, SDValue Offset, EVT MemVT,
7024 MachineMemOperand *MMO) {
7025 if (VT == MemVT) {
7026 ExtType = ISD::NON_EXTLOAD;
7027 } else if (ExtType == ISD::NON_EXTLOAD) {
7028 assert(VT == MemVT && "Non-extending load from different memory type!");
7029 } else {
7030 // Extending load.
7031 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
7032 "Should only be an extending load, not truncating!");
7033 assert(VT.isInteger() == MemVT.isInteger() &&
7034 "Cannot convert from FP to Int or Int -> FP!");
7035 assert(VT.isVector() == MemVT.isVector() &&
7036 "Cannot use an ext load to convert to or from a vector!");
7037 assert((!VT.isVector() ||
7038 VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
7039 "Cannot use an ext load to change the number of vector elements!");
7040 }
7041
7042 bool Indexed = AM != ISD::UNINDEXED;
7043 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
7044
7045 SDVTList VTs = Indexed ?
7046 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
7047 SDValue Ops[] = { Chain, Ptr, Offset };
7048 FoldingSetNodeID ID;
7049 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
7050 ID.AddInteger(MemVT.getRawBits());
7051 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
7052 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
7053 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7054 void *IP = nullptr;
7055 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7056 cast<LoadSDNode>(E)->refineAlignment(MMO);
7057 return SDValue(E, 0);
7058 }
7059 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7060 ExtType, MemVT, MMO);
7061 createOperands(N, Ops);
7062
7063 CSEMap.InsertNode(N, IP);
7064 InsertNode(N);
7065 SDValue V(N, 0);
7066 NewSDValueDbgMsg(V, "Creating new node: ", this);
7067 return V;
7068 }
7069
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)7070 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7071 SDValue Ptr, MachinePointerInfo PtrInfo,
7072 MaybeAlign Alignment,
7073 MachineMemOperand::Flags MMOFlags,
7074 const AAMDNodes &AAInfo, const MDNode *Ranges) {
7075 SDValue Undef = getUNDEF(Ptr.getValueType());
7076 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7077 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
7078 }
7079
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)7080 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7081 SDValue Ptr, MachineMemOperand *MMO) {
7082 SDValue Undef = getUNDEF(Ptr.getValueType());
7083 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7084 VT, MMO);
7085 }
7086
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)7087 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7088 EVT VT, SDValue Chain, SDValue Ptr,
7089 MachinePointerInfo PtrInfo, EVT MemVT,
7090 MaybeAlign Alignment,
7091 MachineMemOperand::Flags MMOFlags,
7092 const AAMDNodes &AAInfo) {
7093 SDValue Undef = getUNDEF(Ptr.getValueType());
7094 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
7095 MemVT, Alignment, MMOFlags, AAInfo);
7096 }
7097
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)7098 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7099 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
7100 MachineMemOperand *MMO) {
7101 SDValue Undef = getUNDEF(Ptr.getValueType());
7102 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
7103 MemVT, MMO);
7104 }
7105
getIndexedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7106 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
7107 SDValue Base, SDValue Offset,
7108 ISD::MemIndexedMode AM) {
7109 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
7110 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
7111 // Don't propagate the invariant or dereferenceable flags.
7112 auto MMOFlags =
7113 LD->getMemOperand()->getFlags() &
7114 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7115 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
7116 LD->getChain(), Base, Offset, LD->getPointerInfo(),
7117 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
7118 }
7119
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)7120 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7121 SDValue Ptr, MachinePointerInfo PtrInfo,
7122 Align Alignment,
7123 MachineMemOperand::Flags MMOFlags,
7124 const AAMDNodes &AAInfo) {
7125 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7126
7127 MMOFlags |= MachineMemOperand::MOStore;
7128 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7129
7130 if (PtrInfo.V.isNull())
7131 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7132
7133 MachineFunction &MF = getMachineFunction();
7134 uint64_t Size =
7135 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize());
7136 MachineMemOperand *MMO =
7137 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
7138 return getStore(Chain, dl, Val, Ptr, MMO);
7139 }
7140
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)7141 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7142 SDValue Ptr, MachineMemOperand *MMO) {
7143 assert(Chain.getValueType() == MVT::Other &&
7144 "Invalid chain type");
7145 EVT VT = Val.getValueType();
7146 SDVTList VTs = getVTList(MVT::Other);
7147 SDValue Undef = getUNDEF(Ptr.getValueType());
7148 SDValue Ops[] = { Chain, Val, Ptr, Undef };
7149 FoldingSetNodeID ID;
7150 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7151 ID.AddInteger(VT.getRawBits());
7152 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7153 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
7154 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7155 void *IP = nullptr;
7156 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7157 cast<StoreSDNode>(E)->refineAlignment(MMO);
7158 return SDValue(E, 0);
7159 }
7160 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7161 ISD::UNINDEXED, false, VT, MMO);
7162 createOperands(N, Ops);
7163
7164 CSEMap.InsertNode(N, IP);
7165 InsertNode(N);
7166 SDValue V(N, 0);
7167 NewSDValueDbgMsg(V, "Creating new node: ", this);
7168 return V;
7169 }
7170
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)7171 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7172 SDValue Ptr, MachinePointerInfo PtrInfo,
7173 EVT SVT, Align Alignment,
7174 MachineMemOperand::Flags MMOFlags,
7175 const AAMDNodes &AAInfo) {
7176 assert(Chain.getValueType() == MVT::Other &&
7177 "Invalid chain type");
7178
7179 MMOFlags |= MachineMemOperand::MOStore;
7180 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7181
7182 if (PtrInfo.V.isNull())
7183 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7184
7185 MachineFunction &MF = getMachineFunction();
7186 MachineMemOperand *MMO = MF.getMachineMemOperand(
7187 PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
7188 Alignment, AAInfo);
7189 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
7190 }
7191
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)7192 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7193 SDValue Ptr, EVT SVT,
7194 MachineMemOperand *MMO) {
7195 EVT VT = Val.getValueType();
7196
7197 assert(Chain.getValueType() == MVT::Other &&
7198 "Invalid chain type");
7199 if (VT == SVT)
7200 return getStore(Chain, dl, Val, Ptr, MMO);
7201
7202 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
7203 "Should only be a truncating store, not extending!");
7204 assert(VT.isInteger() == SVT.isInteger() &&
7205 "Can't do FP-INT conversion!");
7206 assert(VT.isVector() == SVT.isVector() &&
7207 "Cannot use trunc store to convert to or from a vector!");
7208 assert((!VT.isVector() ||
7209 VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
7210 "Cannot use trunc store to change the number of vector elements!");
7211
7212 SDVTList VTs = getVTList(MVT::Other);
7213 SDValue Undef = getUNDEF(Ptr.getValueType());
7214 SDValue Ops[] = { Chain, Val, Ptr, Undef };
7215 FoldingSetNodeID ID;
7216 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7217 ID.AddInteger(SVT.getRawBits());
7218 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7219 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
7220 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7221 void *IP = nullptr;
7222 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7223 cast<StoreSDNode>(E)->refineAlignment(MMO);
7224 return SDValue(E, 0);
7225 }
7226 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7227 ISD::UNINDEXED, true, SVT, MMO);
7228 createOperands(N, Ops);
7229
7230 CSEMap.InsertNode(N, IP);
7231 InsertNode(N);
7232 SDValue V(N, 0);
7233 NewSDValueDbgMsg(V, "Creating new node: ", this);
7234 return V;
7235 }
7236
getIndexedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7237 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
7238 SDValue Base, SDValue Offset,
7239 ISD::MemIndexedMode AM) {
7240 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
7241 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
7242 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
7243 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
7244 FoldingSetNodeID ID;
7245 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7246 ID.AddInteger(ST->getMemoryVT().getRawBits());
7247 ID.AddInteger(ST->getRawSubclassData());
7248 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
7249 void *IP = nullptr;
7250 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7251 return SDValue(E, 0);
7252
7253 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7254 ST->isTruncatingStore(), ST->getMemoryVT(),
7255 ST->getMemOperand());
7256 createOperands(N, Ops);
7257
7258 CSEMap.InsertNode(N, IP);
7259 InsertNode(N);
7260 SDValue V(N, 0);
7261 NewSDValueDbgMsg(V, "Creating new node: ", this);
7262 return V;
7263 }
7264
getMaskedLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Base,SDValue Offset,SDValue Mask,SDValue PassThru,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,ISD::LoadExtType ExtTy,bool isExpanding)7265 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7266 SDValue Base, SDValue Offset, SDValue Mask,
7267 SDValue PassThru, EVT MemVT,
7268 MachineMemOperand *MMO,
7269 ISD::MemIndexedMode AM,
7270 ISD::LoadExtType ExtTy, bool isExpanding) {
7271 bool Indexed = AM != ISD::UNINDEXED;
7272 assert((Indexed || Offset.isUndef()) &&
7273 "Unindexed masked load with an offset!");
7274 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
7275 : getVTList(VT, MVT::Other);
7276 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
7277 FoldingSetNodeID ID;
7278 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
7279 ID.AddInteger(MemVT.getRawBits());
7280 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
7281 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
7282 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7283 void *IP = nullptr;
7284 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7285 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
7286 return SDValue(E, 0);
7287 }
7288 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7289 AM, ExtTy, isExpanding, MemVT, MMO);
7290 createOperands(N, Ops);
7291
7292 CSEMap.InsertNode(N, IP);
7293 InsertNode(N);
7294 SDValue V(N, 0);
7295 NewSDValueDbgMsg(V, "Creating new node: ", this);
7296 return V;
7297 }
7298
getIndexedMaskedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7299 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
7300 SDValue Base, SDValue Offset,
7301 ISD::MemIndexedMode AM) {
7302 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
7303 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
7304 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
7305 Offset, LD->getMask(), LD->getPassThru(),
7306 LD->getMemoryVT(), LD->getMemOperand(), AM,
7307 LD->getExtensionType(), LD->isExpandingLoad());
7308 }
7309
getMaskedStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Base,SDValue Offset,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,bool IsTruncating,bool IsCompressing)7310 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
7311 SDValue Val, SDValue Base, SDValue Offset,
7312 SDValue Mask, EVT MemVT,
7313 MachineMemOperand *MMO,
7314 ISD::MemIndexedMode AM, bool IsTruncating,
7315 bool IsCompressing) {
7316 assert(Chain.getValueType() == MVT::Other &&
7317 "Invalid chain type");
7318 bool Indexed = AM != ISD::UNINDEXED;
7319 assert((Indexed || Offset.isUndef()) &&
7320 "Unindexed masked store with an offset!");
7321 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
7322 : getVTList(MVT::Other);
7323 SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
7324 FoldingSetNodeID ID;
7325 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
7326 ID.AddInteger(MemVT.getRawBits());
7327 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
7328 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
7329 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7330 void *IP = nullptr;
7331 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7332 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
7333 return SDValue(E, 0);
7334 }
7335 auto *N =
7336 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7337 IsTruncating, IsCompressing, MemVT, MMO);
7338 createOperands(N, Ops);
7339
7340 CSEMap.InsertNode(N, IP);
7341 InsertNode(N);
7342 SDValue V(N, 0);
7343 NewSDValueDbgMsg(V, "Creating new node: ", this);
7344 return V;
7345 }
7346
getIndexedMaskedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7347 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
7348 SDValue Base, SDValue Offset,
7349 ISD::MemIndexedMode AM) {
7350 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
7351 assert(ST->getOffset().isUndef() &&
7352 "Masked store is already a indexed store!");
7353 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
7354 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
7355 AM, ST->isTruncatingStore(), ST->isCompressingStore());
7356 }
7357
getMaskedGather(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType,ISD::LoadExtType ExtTy)7358 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
7359 ArrayRef<SDValue> Ops,
7360 MachineMemOperand *MMO,
7361 ISD::MemIndexType IndexType,
7362 ISD::LoadExtType ExtTy) {
7363 assert(Ops.size() == 6 && "Incompatible number of operands");
7364
7365 FoldingSetNodeID ID;
7366 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
7367 ID.AddInteger(VT.getRawBits());
7368 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
7369 dl.getIROrder(), VTs, VT, MMO, IndexType, ExtTy));
7370 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7371 void *IP = nullptr;
7372 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7373 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
7374 return SDValue(E, 0);
7375 }
7376
7377 IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
7378 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7379 VTs, VT, MMO, IndexType, ExtTy);
7380 createOperands(N, Ops);
7381
7382 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
7383 "Incompatible type of the PassThru value in MaskedGatherSDNode");
7384 assert(N->getMask().getValueType().getVectorElementCount() ==
7385 N->getValueType(0).getVectorElementCount() &&
7386 "Vector width mismatch between mask and data");
7387 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
7388 N->getValueType(0).getVectorElementCount().isScalable() &&
7389 "Scalable flags of index and data do not match");
7390 assert(ElementCount::isKnownGE(
7391 N->getIndex().getValueType().getVectorElementCount(),
7392 N->getValueType(0).getVectorElementCount()) &&
7393 "Vector width mismatch between index and data");
7394 assert(isa<ConstantSDNode>(N->getScale()) &&
7395 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7396 "Scale should be a constant power of 2");
7397
7398 CSEMap.InsertNode(N, IP);
7399 InsertNode(N);
7400 SDValue V(N, 0);
7401 NewSDValueDbgMsg(V, "Creating new node: ", this);
7402 return V;
7403 }
7404
getMaskedScatter(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType,bool IsTrunc)7405 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
7406 ArrayRef<SDValue> Ops,
7407 MachineMemOperand *MMO,
7408 ISD::MemIndexType IndexType,
7409 bool IsTrunc) {
7410 assert(Ops.size() == 6 && "Incompatible number of operands");
7411
7412 FoldingSetNodeID ID;
7413 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
7414 ID.AddInteger(VT.getRawBits());
7415 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
7416 dl.getIROrder(), VTs, VT, MMO, IndexType, IsTrunc));
7417 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7418 void *IP = nullptr;
7419 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7420 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
7421 return SDValue(E, 0);
7422 }
7423
7424 IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
7425 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7426 VTs, VT, MMO, IndexType, IsTrunc);
7427 createOperands(N, Ops);
7428
7429 assert(N->getMask().getValueType().getVectorElementCount() ==
7430 N->getValue().getValueType().getVectorElementCount() &&
7431 "Vector width mismatch between mask and data");
7432 assert(
7433 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
7434 N->getValue().getValueType().getVectorElementCount().isScalable() &&
7435 "Scalable flags of index and data do not match");
7436 assert(ElementCount::isKnownGE(
7437 N->getIndex().getValueType().getVectorElementCount(),
7438 N->getValue().getValueType().getVectorElementCount()) &&
7439 "Vector width mismatch between index and data");
7440 assert(isa<ConstantSDNode>(N->getScale()) &&
7441 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7442 "Scale should be a constant power of 2");
7443
7444 CSEMap.InsertNode(N, IP);
7445 InsertNode(N);
7446 SDValue V(N, 0);
7447 NewSDValueDbgMsg(V, "Creating new node: ", this);
7448 return V;
7449 }
7450
simplifySelect(SDValue Cond,SDValue T,SDValue F)7451 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
7452 // select undef, T, F --> T (if T is a constant), otherwise F
7453 // select, ?, undef, F --> F
7454 // select, ?, T, undef --> T
7455 if (Cond.isUndef())
7456 return isConstantValueOfAnyType(T) ? T : F;
7457 if (T.isUndef())
7458 return F;
7459 if (F.isUndef())
7460 return T;
7461
7462 // select true, T, F --> T
7463 // select false, T, F --> F
7464 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
7465 return CondC->isNullValue() ? F : T;
7466
7467 // TODO: This should simplify VSELECT with constant condition using something
7468 // like this (but check boolean contents to be complete?):
7469 // if (ISD::isBuildVectorAllOnes(Cond.getNode()))
7470 // return T;
7471 // if (ISD::isBuildVectorAllZeros(Cond.getNode()))
7472 // return F;
7473
7474 // select ?, T, T --> T
7475 if (T == F)
7476 return T;
7477
7478 return SDValue();
7479 }
7480
simplifyShift(SDValue X,SDValue Y)7481 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
7482 // shift undef, Y --> 0 (can always assume that the undef value is 0)
7483 if (X.isUndef())
7484 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
7485 // shift X, undef --> undef (because it may shift by the bitwidth)
7486 if (Y.isUndef())
7487 return getUNDEF(X.getValueType());
7488
7489 // shift 0, Y --> 0
7490 // shift X, 0 --> X
7491 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
7492 return X;
7493
7494 // shift X, C >= bitwidth(X) --> undef
7495 // All vector elements must be too big (or undef) to avoid partial undefs.
7496 auto isShiftTooBig = [X](ConstantSDNode *Val) {
7497 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
7498 };
7499 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
7500 return getUNDEF(X.getValueType());
7501
7502 return SDValue();
7503 }
7504
simplifyFPBinop(unsigned Opcode,SDValue X,SDValue Y,SDNodeFlags Flags)7505 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
7506 SDNodeFlags Flags) {
7507 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
7508 // (an undef operand can be chosen to be Nan/Inf), then the result of this
7509 // operation is poison. That result can be relaxed to undef.
7510 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true);
7511 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
7512 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
7513 (YC && YC->getValueAPF().isNaN());
7514 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
7515 (YC && YC->getValueAPF().isInfinity());
7516
7517 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
7518 return getUNDEF(X.getValueType());
7519
7520 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
7521 return getUNDEF(X.getValueType());
7522
7523 if (!YC)
7524 return SDValue();
7525
7526 // X + -0.0 --> X
7527 if (Opcode == ISD::FADD)
7528 if (YC->getValueAPF().isNegZero())
7529 return X;
7530
7531 // X - +0.0 --> X
7532 if (Opcode == ISD::FSUB)
7533 if (YC->getValueAPF().isPosZero())
7534 return X;
7535
7536 // X * 1.0 --> X
7537 // X / 1.0 --> X
7538 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
7539 if (YC->getValueAPF().isExactlyValue(1.0))
7540 return X;
7541
7542 // X * 0.0 --> 0.0
7543 if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
7544 if (YC->getValueAPF().isZero())
7545 return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
7546
7547 return SDValue();
7548 }
7549
getVAArg(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)7550 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
7551 SDValue Ptr, SDValue SV, unsigned Align) {
7552 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
7553 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
7554 }
7555
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDUse> Ops)7556 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7557 ArrayRef<SDUse> Ops) {
7558 switch (Ops.size()) {
7559 case 0: return getNode(Opcode, DL, VT);
7560 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
7561 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
7562 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
7563 default: break;
7564 }
7565
7566 // Copy from an SDUse array into an SDValue array for use with
7567 // the regular getNode logic.
7568 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
7569 return getNode(Opcode, DL, VT, NewOps);
7570 }
7571
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops)7572 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7573 ArrayRef<SDValue> Ops) {
7574 SDNodeFlags Flags;
7575 if (Inserter)
7576 Flags = Inserter->getFlags();
7577 return getNode(Opcode, DL, VT, Ops, Flags);
7578 }
7579
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)7580 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7581 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7582 unsigned NumOps = Ops.size();
7583 switch (NumOps) {
7584 case 0: return getNode(Opcode, DL, VT);
7585 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
7586 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
7587 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
7588 default: break;
7589 }
7590
7591 switch (Opcode) {
7592 default: break;
7593 case ISD::BUILD_VECTOR:
7594 // Attempt to simplify BUILD_VECTOR.
7595 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
7596 return V;
7597 break;
7598 case ISD::CONCAT_VECTORS:
7599 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
7600 return V;
7601 break;
7602 case ISD::SELECT_CC:
7603 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
7604 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
7605 "LHS and RHS of condition must have same type!");
7606 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7607 "True and False arms of SelectCC must have same type!");
7608 assert(Ops[2].getValueType() == VT &&
7609 "select_cc node must be of same type as true and false value!");
7610 break;
7611 case ISD::BR_CC:
7612 assert(NumOps == 5 && "BR_CC takes 5 operands!");
7613 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7614 "LHS/RHS of comparison should match types!");
7615 break;
7616 }
7617
7618 // Memoize nodes.
7619 SDNode *N;
7620 SDVTList VTs = getVTList(VT);
7621
7622 if (VT != MVT::Glue) {
7623 FoldingSetNodeID ID;
7624 AddNodeIDNode(ID, Opcode, VTs, Ops);
7625 void *IP = nullptr;
7626
7627 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7628 return SDValue(E, 0);
7629
7630 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7631 createOperands(N, Ops);
7632
7633 CSEMap.InsertNode(N, IP);
7634 } else {
7635 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7636 createOperands(N, Ops);
7637 }
7638
7639 N->setFlags(Flags);
7640 InsertNode(N);
7641 SDValue V(N, 0);
7642 NewSDValueDbgMsg(V, "Creating new node: ", this);
7643 return V;
7644 }
7645
getNode(unsigned Opcode,const SDLoc & DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)7646 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7647 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
7648 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
7649 }
7650
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops)7651 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7652 ArrayRef<SDValue> Ops) {
7653 SDNodeFlags Flags;
7654 if (Inserter)
7655 Flags = Inserter->getFlags();
7656 return getNode(Opcode, DL, VTList, Ops, Flags);
7657 }
7658
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)7659 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7660 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7661 if (VTList.NumVTs == 1)
7662 return getNode(Opcode, DL, VTList.VTs[0], Ops);
7663
7664 switch (Opcode) {
7665 case ISD::STRICT_FP_EXTEND:
7666 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
7667 "Invalid STRICT_FP_EXTEND!");
7668 assert(VTList.VTs[0].isFloatingPoint() &&
7669 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
7670 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7671 "STRICT_FP_EXTEND result type should be vector iff the operand "
7672 "type is vector!");
7673 assert((!VTList.VTs[0].isVector() ||
7674 VTList.VTs[0].getVectorNumElements() ==
7675 Ops[1].getValueType().getVectorNumElements()) &&
7676 "Vector element count mismatch!");
7677 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
7678 "Invalid fpext node, dst <= src!");
7679 break;
7680 case ISD::STRICT_FP_ROUND:
7681 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
7682 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7683 "STRICT_FP_ROUND result type should be vector iff the operand "
7684 "type is vector!");
7685 assert((!VTList.VTs[0].isVector() ||
7686 VTList.VTs[0].getVectorNumElements() ==
7687 Ops[1].getValueType().getVectorNumElements()) &&
7688 "Vector element count mismatch!");
7689 assert(VTList.VTs[0].isFloatingPoint() &&
7690 Ops[1].getValueType().isFloatingPoint() &&
7691 VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
7692 isa<ConstantSDNode>(Ops[2]) &&
7693 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
7694 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
7695 "Invalid STRICT_FP_ROUND!");
7696 break;
7697 #if 0
7698 // FIXME: figure out how to safely handle things like
7699 // int foo(int x) { return 1 << (x & 255); }
7700 // int bar() { return foo(256); }
7701 case ISD::SRA_PARTS:
7702 case ISD::SRL_PARTS:
7703 case ISD::SHL_PARTS:
7704 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
7705 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
7706 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7707 else if (N3.getOpcode() == ISD::AND)
7708 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
7709 // If the and is only masking out bits that cannot effect the shift,
7710 // eliminate the and.
7711 unsigned NumBits = VT.getScalarSizeInBits()*2;
7712 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
7713 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7714 }
7715 break;
7716 #endif
7717 }
7718
7719 // Memoize the node unless it returns a flag.
7720 SDNode *N;
7721 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
7722 FoldingSetNodeID ID;
7723 AddNodeIDNode(ID, Opcode, VTList, Ops);
7724 void *IP = nullptr;
7725 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7726 return SDValue(E, 0);
7727
7728 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7729 createOperands(N, Ops);
7730 CSEMap.InsertNode(N, IP);
7731 } else {
7732 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7733 createOperands(N, Ops);
7734 }
7735
7736 N->setFlags(Flags);
7737 InsertNode(N);
7738 SDValue V(N, 0);
7739 NewSDValueDbgMsg(V, "Creating new node: ", this);
7740 return V;
7741 }
7742
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList)7743 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7744 SDVTList VTList) {
7745 return getNode(Opcode, DL, VTList, None);
7746 }
7747
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1)7748 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7749 SDValue N1) {
7750 SDValue Ops[] = { N1 };
7751 return getNode(Opcode, DL, VTList, Ops);
7752 }
7753
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2)7754 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7755 SDValue N1, SDValue N2) {
7756 SDValue Ops[] = { N1, N2 };
7757 return getNode(Opcode, DL, VTList, Ops);
7758 }
7759
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)7760 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7761 SDValue N1, SDValue N2, SDValue N3) {
7762 SDValue Ops[] = { N1, N2, N3 };
7763 return getNode(Opcode, DL, VTList, Ops);
7764 }
7765
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)7766 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7767 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
7768 SDValue Ops[] = { N1, N2, N3, N4 };
7769 return getNode(Opcode, DL, VTList, Ops);
7770 }
7771
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)7772 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7773 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
7774 SDValue N5) {
7775 SDValue Ops[] = { N1, N2, N3, N4, N5 };
7776 return getNode(Opcode, DL, VTList, Ops);
7777 }
7778
getVTList(EVT VT)7779 SDVTList SelectionDAG::getVTList(EVT VT) {
7780 return makeVTList(SDNode::getValueTypeList(VT), 1);
7781 }
7782
getVTList(EVT VT1,EVT VT2)7783 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
7784 FoldingSetNodeID ID;
7785 ID.AddInteger(2U);
7786 ID.AddInteger(VT1.getRawBits());
7787 ID.AddInteger(VT2.getRawBits());
7788
7789 void *IP = nullptr;
7790 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7791 if (!Result) {
7792 EVT *Array = Allocator.Allocate<EVT>(2);
7793 Array[0] = VT1;
7794 Array[1] = VT2;
7795 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
7796 VTListMap.InsertNode(Result, IP);
7797 }
7798 return Result->getSDVTList();
7799 }
7800
getVTList(EVT VT1,EVT VT2,EVT VT3)7801 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
7802 FoldingSetNodeID ID;
7803 ID.AddInteger(3U);
7804 ID.AddInteger(VT1.getRawBits());
7805 ID.AddInteger(VT2.getRawBits());
7806 ID.AddInteger(VT3.getRawBits());
7807
7808 void *IP = nullptr;
7809 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7810 if (!Result) {
7811 EVT *Array = Allocator.Allocate<EVT>(3);
7812 Array[0] = VT1;
7813 Array[1] = VT2;
7814 Array[2] = VT3;
7815 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
7816 VTListMap.InsertNode(Result, IP);
7817 }
7818 return Result->getSDVTList();
7819 }
7820
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)7821 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
7822 FoldingSetNodeID ID;
7823 ID.AddInteger(4U);
7824 ID.AddInteger(VT1.getRawBits());
7825 ID.AddInteger(VT2.getRawBits());
7826 ID.AddInteger(VT3.getRawBits());
7827 ID.AddInteger(VT4.getRawBits());
7828
7829 void *IP = nullptr;
7830 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7831 if (!Result) {
7832 EVT *Array = Allocator.Allocate<EVT>(4);
7833 Array[0] = VT1;
7834 Array[1] = VT2;
7835 Array[2] = VT3;
7836 Array[3] = VT4;
7837 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
7838 VTListMap.InsertNode(Result, IP);
7839 }
7840 return Result->getSDVTList();
7841 }
7842
getVTList(ArrayRef<EVT> VTs)7843 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
7844 unsigned NumVTs = VTs.size();
7845 FoldingSetNodeID ID;
7846 ID.AddInteger(NumVTs);
7847 for (unsigned index = 0; index < NumVTs; index++) {
7848 ID.AddInteger(VTs[index].getRawBits());
7849 }
7850
7851 void *IP = nullptr;
7852 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7853 if (!Result) {
7854 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
7855 llvm::copy(VTs, Array);
7856 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
7857 VTListMap.InsertNode(Result, IP);
7858 }
7859 return Result->getSDVTList();
7860 }
7861
7862
7863 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
7864 /// specified operands. If the resultant node already exists in the DAG,
7865 /// this does not modify the specified node, instead it returns the node that
7866 /// already exists. If the resultant node does not exist in the DAG, the
7867 /// input node is returned. As a degenerate case, if you specify the same
7868 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)7869 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
7870 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
7871
7872 // Check to see if there is no change.
7873 if (Op == N->getOperand(0)) return N;
7874
7875 // See if the modified node already exists.
7876 void *InsertPos = nullptr;
7877 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
7878 return Existing;
7879
7880 // Nope it doesn't. Remove the node from its current place in the maps.
7881 if (InsertPos)
7882 if (!RemoveNodeFromCSEMaps(N))
7883 InsertPos = nullptr;
7884
7885 // Now we update the operands.
7886 N->OperandList[0].set(Op);
7887
7888 updateDivergence(N);
7889 // If this gets put into a CSE map, add it.
7890 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7891 return N;
7892 }
7893
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)7894 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
7895 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
7896
7897 // Check to see if there is no change.
7898 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
7899 return N; // No operands changed, just return the input node.
7900
7901 // See if the modified node already exists.
7902 void *InsertPos = nullptr;
7903 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
7904 return Existing;
7905
7906 // Nope it doesn't. Remove the node from its current place in the maps.
7907 if (InsertPos)
7908 if (!RemoveNodeFromCSEMaps(N))
7909 InsertPos = nullptr;
7910
7911 // Now we update the operands.
7912 if (N->OperandList[0] != Op1)
7913 N->OperandList[0].set(Op1);
7914 if (N->OperandList[1] != Op2)
7915 N->OperandList[1].set(Op2);
7916
7917 updateDivergence(N);
7918 // If this gets put into a CSE map, add it.
7919 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7920 return N;
7921 }
7922
7923 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)7924 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
7925 SDValue Ops[] = { Op1, Op2, Op3 };
7926 return UpdateNodeOperands(N, Ops);
7927 }
7928
7929 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)7930 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7931 SDValue Op3, SDValue Op4) {
7932 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
7933 return UpdateNodeOperands(N, Ops);
7934 }
7935
7936 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)7937 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7938 SDValue Op3, SDValue Op4, SDValue Op5) {
7939 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
7940 return UpdateNodeOperands(N, Ops);
7941 }
7942
7943 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)7944 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
7945 unsigned NumOps = Ops.size();
7946 assert(N->getNumOperands() == NumOps &&
7947 "Update with wrong number of operands");
7948
7949 // If no operands changed just return the input node.
7950 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
7951 return N;
7952
7953 // See if the modified node already exists.
7954 void *InsertPos = nullptr;
7955 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
7956 return Existing;
7957
7958 // Nope it doesn't. Remove the node from its current place in the maps.
7959 if (InsertPos)
7960 if (!RemoveNodeFromCSEMaps(N))
7961 InsertPos = nullptr;
7962
7963 // Now we update the operands.
7964 for (unsigned i = 0; i != NumOps; ++i)
7965 if (N->OperandList[i] != Ops[i])
7966 N->OperandList[i].set(Ops[i]);
7967
7968 updateDivergence(N);
7969 // If this gets put into a CSE map, add it.
7970 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7971 return N;
7972 }
7973
7974 /// DropOperands - Release the operands and set this node to have
7975 /// zero operands.
DropOperands()7976 void SDNode::DropOperands() {
7977 // Unlike the code in MorphNodeTo that does this, we don't need to
7978 // watch for dead nodes here.
7979 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
7980 SDUse &Use = *I++;
7981 Use.set(SDValue());
7982 }
7983 }
7984
setNodeMemRefs(MachineSDNode * N,ArrayRef<MachineMemOperand * > NewMemRefs)7985 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
7986 ArrayRef<MachineMemOperand *> NewMemRefs) {
7987 if (NewMemRefs.empty()) {
7988 N->clearMemRefs();
7989 return;
7990 }
7991
7992 // Check if we can avoid allocating by storing a single reference directly.
7993 if (NewMemRefs.size() == 1) {
7994 N->MemRefs = NewMemRefs[0];
7995 N->NumMemRefs = 1;
7996 return;
7997 }
7998
7999 MachineMemOperand **MemRefsBuffer =
8000 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
8001 llvm::copy(NewMemRefs, MemRefsBuffer);
8002 N->MemRefs = MemRefsBuffer;
8003 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
8004 }
8005
8006 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
8007 /// machine opcode.
8008 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)8009 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8010 EVT VT) {
8011 SDVTList VTs = getVTList(VT);
8012 return SelectNodeTo(N, MachineOpc, VTs, None);
8013 }
8014
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)8015 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8016 EVT VT, SDValue Op1) {
8017 SDVTList VTs = getVTList(VT);
8018 SDValue Ops[] = { Op1 };
8019 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8020 }
8021
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)8022 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8023 EVT VT, SDValue Op1,
8024 SDValue Op2) {
8025 SDVTList VTs = getVTList(VT);
8026 SDValue Ops[] = { Op1, Op2 };
8027 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8028 }
8029
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)8030 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8031 EVT VT, SDValue Op1,
8032 SDValue Op2, SDValue Op3) {
8033 SDVTList VTs = getVTList(VT);
8034 SDValue Ops[] = { Op1, Op2, Op3 };
8035 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8036 }
8037
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)8038 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8039 EVT VT, ArrayRef<SDValue> Ops) {
8040 SDVTList VTs = getVTList(VT);
8041 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8042 }
8043
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)8044 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8045 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
8046 SDVTList VTs = getVTList(VT1, VT2);
8047 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8048 }
8049
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)8050 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8051 EVT VT1, EVT VT2) {
8052 SDVTList VTs = getVTList(VT1, VT2);
8053 return SelectNodeTo(N, MachineOpc, VTs, None);
8054 }
8055
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)8056 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8057 EVT VT1, EVT VT2, EVT VT3,
8058 ArrayRef<SDValue> Ops) {
8059 SDVTList VTs = getVTList(VT1, VT2, VT3);
8060 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8061 }
8062
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)8063 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8064 EVT VT1, EVT VT2,
8065 SDValue Op1, SDValue Op2) {
8066 SDVTList VTs = getVTList(VT1, VT2);
8067 SDValue Ops[] = { Op1, Op2 };
8068 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8069 }
8070
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)8071 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8072 SDVTList VTs,ArrayRef<SDValue> Ops) {
8073 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
8074 // Reset the NodeID to -1.
8075 New->setNodeId(-1);
8076 if (New != N) {
8077 ReplaceAllUsesWith(N, New);
8078 RemoveDeadNode(N);
8079 }
8080 return New;
8081 }
8082
8083 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
8084 /// the line number information on the merged node since it is not possible to
8085 /// preserve the information that operation is associated with multiple lines.
8086 /// This will make the debugger working better at -O0, were there is a higher
8087 /// probability having other instructions associated with that line.
8088 ///
8089 /// For IROrder, we keep the smaller of the two
UpdateSDLocOnMergeSDNode(SDNode * N,const SDLoc & OLoc)8090 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
8091 DebugLoc NLoc = N->getDebugLoc();
8092 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
8093 N->setDebugLoc(DebugLoc());
8094 }
8095 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
8096 N->setIROrder(Order);
8097 return N;
8098 }
8099
8100 /// MorphNodeTo - This *mutates* the specified node to have the specified
8101 /// return type, opcode, and operands.
8102 ///
8103 /// Note that MorphNodeTo returns the resultant node. If there is already a
8104 /// node of the specified opcode and operands, it returns that node instead of
8105 /// the current one. Note that the SDLoc need not be the same.
8106 ///
8107 /// Using MorphNodeTo is faster than creating a new node and swapping it in
8108 /// with ReplaceAllUsesWith both because it often avoids allocating a new
8109 /// node, and because it doesn't require CSE recalculation for any of
8110 /// the node's users.
8111 ///
8112 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
8113 /// As a consequence it isn't appropriate to use from within the DAG combiner or
8114 /// the legalizer which maintain worklists that would need to be updated when
8115 /// deleting things.
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)8116 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
8117 SDVTList VTs, ArrayRef<SDValue> Ops) {
8118 // If an identical node already exists, use it.
8119 void *IP = nullptr;
8120 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
8121 FoldingSetNodeID ID;
8122 AddNodeIDNode(ID, Opc, VTs, Ops);
8123 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
8124 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
8125 }
8126
8127 if (!RemoveNodeFromCSEMaps(N))
8128 IP = nullptr;
8129
8130 // Start the morphing.
8131 N->NodeType = Opc;
8132 N->ValueList = VTs.VTs;
8133 N->NumValues = VTs.NumVTs;
8134
8135 // Clear the operands list, updating used nodes to remove this from their
8136 // use list. Keep track of any operands that become dead as a result.
8137 SmallPtrSet<SDNode*, 16> DeadNodeSet;
8138 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
8139 SDUse &Use = *I++;
8140 SDNode *Used = Use.getNode();
8141 Use.set(SDValue());
8142 if (Used->use_empty())
8143 DeadNodeSet.insert(Used);
8144 }
8145
8146 // For MachineNode, initialize the memory references information.
8147 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
8148 MN->clearMemRefs();
8149
8150 // Swap for an appropriately sized array from the recycler.
8151 removeOperands(N);
8152 createOperands(N, Ops);
8153
8154 // Delete any nodes that are still dead after adding the uses for the
8155 // new operands.
8156 if (!DeadNodeSet.empty()) {
8157 SmallVector<SDNode *, 16> DeadNodes;
8158 for (SDNode *N : DeadNodeSet)
8159 if (N->use_empty())
8160 DeadNodes.push_back(N);
8161 RemoveDeadNodes(DeadNodes);
8162 }
8163
8164 if (IP)
8165 CSEMap.InsertNode(N, IP); // Memoize the new node.
8166 return N;
8167 }
8168
mutateStrictFPToFP(SDNode * Node)8169 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
8170 unsigned OrigOpc = Node->getOpcode();
8171 unsigned NewOpc;
8172 switch (OrigOpc) {
8173 default:
8174 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
8175 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8176 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
8177 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8178 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
8179 #include "llvm/IR/ConstrainedOps.def"
8180 }
8181
8182 assert(Node->getNumValues() == 2 && "Unexpected number of results!");
8183
8184 // We're taking this node out of the chain, so we need to re-link things.
8185 SDValue InputChain = Node->getOperand(0);
8186 SDValue OutputChain = SDValue(Node, 1);
8187 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
8188
8189 SmallVector<SDValue, 3> Ops;
8190 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
8191 Ops.push_back(Node->getOperand(i));
8192
8193 SDVTList VTs = getVTList(Node->getValueType(0));
8194 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
8195
8196 // MorphNodeTo can operate in two ways: if an existing node with the
8197 // specified operands exists, it can just return it. Otherwise, it
8198 // updates the node in place to have the requested operands.
8199 if (Res == Node) {
8200 // If we updated the node in place, reset the node ID. To the isel,
8201 // this should be just like a newly allocated machine node.
8202 Res->setNodeId(-1);
8203 } else {
8204 ReplaceAllUsesWith(Node, Res);
8205 RemoveDeadNode(Node);
8206 }
8207
8208 return Res;
8209 }
8210
8211 /// getMachineNode - These are used for target selectors to create a new node
8212 /// with specified return type(s), MachineInstr opcode, and operands.
8213 ///
8214 /// Note that getMachineNode returns the resultant node. If there is already a
8215 /// node of the specified opcode and operands, it returns that node instead of
8216 /// the current one.
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT)8217 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8218 EVT VT) {
8219 SDVTList VTs = getVTList(VT);
8220 return getMachineNode(Opcode, dl, VTs, None);
8221 }
8222
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1)8223 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8224 EVT VT, SDValue Op1) {
8225 SDVTList VTs = getVTList(VT);
8226 SDValue Ops[] = { Op1 };
8227 return getMachineNode(Opcode, dl, VTs, Ops);
8228 }
8229
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2)8230 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8231 EVT VT, SDValue Op1, SDValue Op2) {
8232 SDVTList VTs = getVTList(VT);
8233 SDValue Ops[] = { Op1, Op2 };
8234 return getMachineNode(Opcode, dl, VTs, Ops);
8235 }
8236
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)8237 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8238 EVT VT, SDValue Op1, SDValue Op2,
8239 SDValue Op3) {
8240 SDVTList VTs = getVTList(VT);
8241 SDValue Ops[] = { Op1, Op2, Op3 };
8242 return getMachineNode(Opcode, dl, VTs, Ops);
8243 }
8244
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,ArrayRef<SDValue> Ops)8245 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8246 EVT VT, ArrayRef<SDValue> Ops) {
8247 SDVTList VTs = getVTList(VT);
8248 return getMachineNode(Opcode, dl, VTs, Ops);
8249 }
8250
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)8251 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8252 EVT VT1, EVT VT2, SDValue Op1,
8253 SDValue Op2) {
8254 SDVTList VTs = getVTList(VT1, VT2);
8255 SDValue Ops[] = { Op1, Op2 };
8256 return getMachineNode(Opcode, dl, VTs, Ops);
8257 }
8258
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)8259 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8260 EVT VT1, EVT VT2, SDValue Op1,
8261 SDValue Op2, SDValue Op3) {
8262 SDVTList VTs = getVTList(VT1, VT2);
8263 SDValue Ops[] = { Op1, Op2, Op3 };
8264 return getMachineNode(Opcode, dl, VTs, Ops);
8265 }
8266
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)8267 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8268 EVT VT1, EVT VT2,
8269 ArrayRef<SDValue> Ops) {
8270 SDVTList VTs = getVTList(VT1, VT2);
8271 return getMachineNode(Opcode, dl, VTs, Ops);
8272 }
8273
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)8274 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8275 EVT VT1, EVT VT2, EVT VT3,
8276 SDValue Op1, SDValue Op2) {
8277 SDVTList VTs = getVTList(VT1, VT2, VT3);
8278 SDValue Ops[] = { Op1, Op2 };
8279 return getMachineNode(Opcode, dl, VTs, Ops);
8280 }
8281
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)8282 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8283 EVT VT1, EVT VT2, EVT VT3,
8284 SDValue Op1, SDValue Op2,
8285 SDValue Op3) {
8286 SDVTList VTs = getVTList(VT1, VT2, VT3);
8287 SDValue Ops[] = { Op1, Op2, Op3 };
8288 return getMachineNode(Opcode, dl, VTs, Ops);
8289 }
8290
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)8291 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8292 EVT VT1, EVT VT2, EVT VT3,
8293 ArrayRef<SDValue> Ops) {
8294 SDVTList VTs = getVTList(VT1, VT2, VT3);
8295 return getMachineNode(Opcode, dl, VTs, Ops);
8296 }
8297
getMachineNode(unsigned Opcode,const SDLoc & dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)8298 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8299 ArrayRef<EVT> ResultTys,
8300 ArrayRef<SDValue> Ops) {
8301 SDVTList VTs = getVTList(ResultTys);
8302 return getMachineNode(Opcode, dl, VTs, Ops);
8303 }
8304
getMachineNode(unsigned Opcode,const SDLoc & DL,SDVTList VTs,ArrayRef<SDValue> Ops)8305 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
8306 SDVTList VTs,
8307 ArrayRef<SDValue> Ops) {
8308 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
8309 MachineSDNode *N;
8310 void *IP = nullptr;
8311
8312 if (DoCSE) {
8313 FoldingSetNodeID ID;
8314 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
8315 IP = nullptr;
8316 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
8317 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
8318 }
8319 }
8320
8321 // Allocate a new MachineSDNode.
8322 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8323 createOperands(N, Ops);
8324
8325 if (DoCSE)
8326 CSEMap.InsertNode(N, IP);
8327
8328 InsertNode(N);
8329 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
8330 return N;
8331 }
8332
8333 /// getTargetExtractSubreg - A convenience function for creating
8334 /// TargetOpcode::EXTRACT_SUBREG nodes.
getTargetExtractSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand)8335 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8336 SDValue Operand) {
8337 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8338 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
8339 VT, Operand, SRIdxVal);
8340 return SDValue(Subreg, 0);
8341 }
8342
8343 /// getTargetInsertSubreg - A convenience function for creating
8344 /// TargetOpcode::INSERT_SUBREG nodes.
getTargetInsertSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand,SDValue Subreg)8345 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8346 SDValue Operand, SDValue Subreg) {
8347 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8348 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
8349 VT, Operand, Subreg, SRIdxVal);
8350 return SDValue(Result, 0);
8351 }
8352
8353 /// getNodeIfExists - Get the specified node if it's already available, or
8354 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops)8355 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
8356 ArrayRef<SDValue> Ops) {
8357 SDNodeFlags Flags;
8358 if (Inserter)
8359 Flags = Inserter->getFlags();
8360 return getNodeIfExists(Opcode, VTList, Ops, Flags);
8361 }
8362
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)8363 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
8364 ArrayRef<SDValue> Ops,
8365 const SDNodeFlags Flags) {
8366 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
8367 FoldingSetNodeID ID;
8368 AddNodeIDNode(ID, Opcode, VTList, Ops);
8369 void *IP = nullptr;
8370 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
8371 E->intersectFlagsWith(Flags);
8372 return E;
8373 }
8374 }
8375 return nullptr;
8376 }
8377
8378 /// doesNodeExist - Check if a node exists without modifying its flags.
doesNodeExist(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops)8379 bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
8380 ArrayRef<SDValue> Ops) {
8381 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
8382 FoldingSetNodeID ID;
8383 AddNodeIDNode(ID, Opcode, VTList, Ops);
8384 void *IP = nullptr;
8385 if (FindNodeOrInsertPos(ID, SDLoc(), IP))
8386 return true;
8387 }
8388 return false;
8389 }
8390
8391 /// getDbgValue - Creates a SDDbgValue node.
8392 ///
8393 /// SDNode
getDbgValue(DIVariable * Var,DIExpression * Expr,SDNode * N,unsigned R,bool IsIndirect,const DebugLoc & DL,unsigned O)8394 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
8395 SDNode *N, unsigned R, bool IsIndirect,
8396 const DebugLoc &DL, unsigned O) {
8397 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8398 "Expected inlined-at fields to agree");
8399 return new (DbgInfo->getAlloc())
8400 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O);
8401 }
8402
8403 /// Constant
getConstantDbgValue(DIVariable * Var,DIExpression * Expr,const Value * C,const DebugLoc & DL,unsigned O)8404 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
8405 DIExpression *Expr,
8406 const Value *C,
8407 const DebugLoc &DL, unsigned O) {
8408 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8409 "Expected inlined-at fields to agree");
8410 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O);
8411 }
8412
8413 /// FrameIndex
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,bool IsIndirect,const DebugLoc & DL,unsigned O)8414 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
8415 DIExpression *Expr, unsigned FI,
8416 bool IsIndirect,
8417 const DebugLoc &DL,
8418 unsigned O) {
8419 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8420 "Expected inlined-at fields to agree");
8421 return new (DbgInfo->getAlloc())
8422 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX);
8423 }
8424
8425 /// VReg
getVRegDbgValue(DIVariable * Var,DIExpression * Expr,unsigned VReg,bool IsIndirect,const DebugLoc & DL,unsigned O)8426 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var,
8427 DIExpression *Expr,
8428 unsigned VReg, bool IsIndirect,
8429 const DebugLoc &DL, unsigned O) {
8430 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8431 "Expected inlined-at fields to agree");
8432 return new (DbgInfo->getAlloc())
8433 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG);
8434 }
8435
transferDbgValues(SDValue From,SDValue To,unsigned OffsetInBits,unsigned SizeInBits,bool InvalidateDbg)8436 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
8437 unsigned OffsetInBits, unsigned SizeInBits,
8438 bool InvalidateDbg) {
8439 SDNode *FromNode = From.getNode();
8440 SDNode *ToNode = To.getNode();
8441 assert(FromNode && ToNode && "Can't modify dbg values");
8442
8443 // PR35338
8444 // TODO: assert(From != To && "Redundant dbg value transfer");
8445 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
8446 if (From == To || FromNode == ToNode)
8447 return;
8448
8449 if (!FromNode->getHasDebugValue())
8450 return;
8451
8452 SmallVector<SDDbgValue *, 2> ClonedDVs;
8453 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
8454 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated())
8455 continue;
8456
8457 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
8458
8459 // Just transfer the dbg value attached to From.
8460 if (Dbg->getResNo() != From.getResNo())
8461 continue;
8462
8463 DIVariable *Var = Dbg->getVariable();
8464 auto *Expr = Dbg->getExpression();
8465 // If a fragment is requested, update the expression.
8466 if (SizeInBits) {
8467 // When splitting a larger (e.g., sign-extended) value whose
8468 // lower bits are described with an SDDbgValue, do not attempt
8469 // to transfer the SDDbgValue to the upper bits.
8470 if (auto FI = Expr->getFragmentInfo())
8471 if (OffsetInBits + SizeInBits > FI->SizeInBits)
8472 continue;
8473 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
8474 SizeInBits);
8475 if (!Fragment)
8476 continue;
8477 Expr = *Fragment;
8478 }
8479 // Clone the SDDbgValue and move it to To.
8480 SDDbgValue *Clone = getDbgValue(
8481 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(),
8482 std::max(ToNode->getIROrder(), Dbg->getOrder()));
8483 ClonedDVs.push_back(Clone);
8484
8485 if (InvalidateDbg) {
8486 // Invalidate value and indicate the SDDbgValue should not be emitted.
8487 Dbg->setIsInvalidated();
8488 Dbg->setIsEmitted();
8489 }
8490 }
8491
8492 for (SDDbgValue *Dbg : ClonedDVs)
8493 AddDbgValue(Dbg, ToNode, false);
8494 }
8495
salvageDebugInfo(SDNode & N)8496 void SelectionDAG::salvageDebugInfo(SDNode &N) {
8497 if (!N.getHasDebugValue())
8498 return;
8499
8500 SmallVector<SDDbgValue *, 2> ClonedDVs;
8501 for (auto DV : GetDbgValues(&N)) {
8502 if (DV->isInvalidated())
8503 continue;
8504 switch (N.getOpcode()) {
8505 default:
8506 break;
8507 case ISD::ADD:
8508 SDValue N0 = N.getOperand(0);
8509 SDValue N1 = N.getOperand(1);
8510 if (!isConstantIntBuildVectorOrConstantInt(N0) &&
8511 isConstantIntBuildVectorOrConstantInt(N1)) {
8512 uint64_t Offset = N.getConstantOperandVal(1);
8513 // Rewrite an ADD constant node into a DIExpression. Since we are
8514 // performing arithmetic to compute the variable's *value* in the
8515 // DIExpression, we need to mark the expression with a
8516 // DW_OP_stack_value.
8517 auto *DIExpr = DV->getExpression();
8518 DIExpr =
8519 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset);
8520 SDDbgValue *Clone =
8521 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(),
8522 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder());
8523 ClonedDVs.push_back(Clone);
8524 DV->setIsInvalidated();
8525 DV->setIsEmitted();
8526 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
8527 N0.getNode()->dumprFull(this);
8528 dbgs() << " into " << *DIExpr << '\n');
8529 }
8530 }
8531 }
8532
8533 for (SDDbgValue *Dbg : ClonedDVs)
8534 AddDbgValue(Dbg, Dbg->getSDNode(), false);
8535 }
8536
8537 /// Creates a SDDbgLabel node.
getDbgLabel(DILabel * Label,const DebugLoc & DL,unsigned O)8538 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
8539 const DebugLoc &DL, unsigned O) {
8540 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
8541 "Expected inlined-at fields to agree");
8542 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
8543 }
8544
8545 namespace {
8546
8547 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
8548 /// pointed to by a use iterator is deleted, increment the use iterator
8549 /// so that it doesn't dangle.
8550 ///
8551 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
8552 SDNode::use_iterator &UI;
8553 SDNode::use_iterator &UE;
8554
NodeDeleted(SDNode * N,SDNode * E)8555 void NodeDeleted(SDNode *N, SDNode *E) override {
8556 // Increment the iterator as needed.
8557 while (UI != UE && N == *UI)
8558 ++UI;
8559 }
8560
8561 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)8562 RAUWUpdateListener(SelectionDAG &d,
8563 SDNode::use_iterator &ui,
8564 SDNode::use_iterator &ue)
8565 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
8566 };
8567
8568 } // end anonymous namespace
8569
8570 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8571 /// This can cause recursive merging of nodes in the DAG.
8572 ///
8573 /// This version assumes From has a single result value.
8574 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)8575 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
8576 SDNode *From = FromN.getNode();
8577 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
8578 "Cannot replace with this method!");
8579 assert(From != To.getNode() && "Cannot replace uses of with self");
8580
8581 // Preserve Debug Values
8582 transferDbgValues(FromN, To);
8583
8584 // Iterate over all the existing uses of From. New uses will be added
8585 // to the beginning of the use list, which we avoid visiting.
8586 // This specifically avoids visiting uses of From that arise while the
8587 // replacement is happening, because any such uses would be the result
8588 // of CSE: If an existing node looks like From after one of its operands
8589 // is replaced by To, we don't want to replace of all its users with To
8590 // too. See PR3018 for more info.
8591 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8592 RAUWUpdateListener Listener(*this, UI, UE);
8593 while (UI != UE) {
8594 SDNode *User = *UI;
8595
8596 // This node is about to morph, remove its old self from the CSE maps.
8597 RemoveNodeFromCSEMaps(User);
8598
8599 // A user can appear in a use list multiple times, and when this
8600 // happens the uses are usually next to each other in the list.
8601 // To help reduce the number of CSE recomputations, process all
8602 // the uses of this user that we can find this way.
8603 do {
8604 SDUse &Use = UI.getUse();
8605 ++UI;
8606 Use.set(To);
8607 if (To->isDivergent() != From->isDivergent())
8608 updateDivergence(User);
8609 } while (UI != UE && *UI == User);
8610 // Now that we have modified User, add it back to the CSE maps. If it
8611 // already exists there, recursively merge the results together.
8612 AddModifiedNodeToCSEMaps(User);
8613 }
8614
8615 // If we just RAUW'd the root, take note.
8616 if (FromN == getRoot())
8617 setRoot(To);
8618 }
8619
8620 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8621 /// This can cause recursive merging of nodes in the DAG.
8622 ///
8623 /// This version assumes that for each value of From, there is a
8624 /// corresponding value in To in the same position with the same type.
8625 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)8626 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
8627 #ifndef NDEBUG
8628 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8629 assert((!From->hasAnyUseOfValue(i) ||
8630 From->getValueType(i) == To->getValueType(i)) &&
8631 "Cannot use this version of ReplaceAllUsesWith!");
8632 #endif
8633
8634 // Handle the trivial case.
8635 if (From == To)
8636 return;
8637
8638 // Preserve Debug Info. Only do this if there's a use.
8639 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8640 if (From->hasAnyUseOfValue(i)) {
8641 assert((i < To->getNumValues()) && "Invalid To location");
8642 transferDbgValues(SDValue(From, i), SDValue(To, i));
8643 }
8644
8645 // Iterate over just the existing users of From. See the comments in
8646 // the ReplaceAllUsesWith above.
8647 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8648 RAUWUpdateListener Listener(*this, UI, UE);
8649 while (UI != UE) {
8650 SDNode *User = *UI;
8651
8652 // This node is about to morph, remove its old self from the CSE maps.
8653 RemoveNodeFromCSEMaps(User);
8654
8655 // A user can appear in a use list multiple times, and when this
8656 // happens the uses are usually next to each other in the list.
8657 // To help reduce the number of CSE recomputations, process all
8658 // the uses of this user that we can find this way.
8659 do {
8660 SDUse &Use = UI.getUse();
8661 ++UI;
8662 Use.setNode(To);
8663 if (To->isDivergent() != From->isDivergent())
8664 updateDivergence(User);
8665 } while (UI != UE && *UI == User);
8666
8667 // Now that we have modified User, add it back to the CSE maps. If it
8668 // already exists there, recursively merge the results together.
8669 AddModifiedNodeToCSEMaps(User);
8670 }
8671
8672 // If we just RAUW'd the root, take note.
8673 if (From == getRoot().getNode())
8674 setRoot(SDValue(To, getRoot().getResNo()));
8675 }
8676
8677 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8678 /// This can cause recursive merging of nodes in the DAG.
8679 ///
8680 /// This version can replace From with any result values. To must match the
8681 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)8682 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
8683 if (From->getNumValues() == 1) // Handle the simple case efficiently.
8684 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
8685
8686 // Preserve Debug Info.
8687 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8688 transferDbgValues(SDValue(From, i), To[i]);
8689
8690 // Iterate over just the existing users of From. See the comments in
8691 // the ReplaceAllUsesWith above.
8692 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8693 RAUWUpdateListener Listener(*this, UI, UE);
8694 while (UI != UE) {
8695 SDNode *User = *UI;
8696
8697 // This node is about to morph, remove its old self from the CSE maps.
8698 RemoveNodeFromCSEMaps(User);
8699
8700 // A user can appear in a use list multiple times, and when this happens the
8701 // uses are usually next to each other in the list. To help reduce the
8702 // number of CSE and divergence recomputations, process all the uses of this
8703 // user that we can find this way.
8704 bool To_IsDivergent = false;
8705 do {
8706 SDUse &Use = UI.getUse();
8707 const SDValue &ToOp = To[Use.getResNo()];
8708 ++UI;
8709 Use.set(ToOp);
8710 To_IsDivergent |= ToOp->isDivergent();
8711 } while (UI != UE && *UI == User);
8712
8713 if (To_IsDivergent != From->isDivergent())
8714 updateDivergence(User);
8715
8716 // Now that we have modified User, add it back to the CSE maps. If it
8717 // already exists there, recursively merge the results together.
8718 AddModifiedNodeToCSEMaps(User);
8719 }
8720
8721 // If we just RAUW'd the root, take note.
8722 if (From == getRoot().getNode())
8723 setRoot(SDValue(To[getRoot().getResNo()]));
8724 }
8725
8726 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
8727 /// uses of other values produced by From.getNode() alone. The Deleted
8728 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)8729 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
8730 // Handle the really simple, really trivial case efficiently.
8731 if (From == To) return;
8732
8733 // Handle the simple, trivial, case efficiently.
8734 if (From.getNode()->getNumValues() == 1) {
8735 ReplaceAllUsesWith(From, To);
8736 return;
8737 }
8738
8739 // Preserve Debug Info.
8740 transferDbgValues(From, To);
8741
8742 // Iterate over just the existing users of From. See the comments in
8743 // the ReplaceAllUsesWith above.
8744 SDNode::use_iterator UI = From.getNode()->use_begin(),
8745 UE = From.getNode()->use_end();
8746 RAUWUpdateListener Listener(*this, UI, UE);
8747 while (UI != UE) {
8748 SDNode *User = *UI;
8749 bool UserRemovedFromCSEMaps = false;
8750
8751 // A user can appear in a use list multiple times, and when this
8752 // happens the uses are usually next to each other in the list.
8753 // To help reduce the number of CSE recomputations, process all
8754 // the uses of this user that we can find this way.
8755 do {
8756 SDUse &Use = UI.getUse();
8757
8758 // Skip uses of different values from the same node.
8759 if (Use.getResNo() != From.getResNo()) {
8760 ++UI;
8761 continue;
8762 }
8763
8764 // If this node hasn't been modified yet, it's still in the CSE maps,
8765 // so remove its old self from the CSE maps.
8766 if (!UserRemovedFromCSEMaps) {
8767 RemoveNodeFromCSEMaps(User);
8768 UserRemovedFromCSEMaps = true;
8769 }
8770
8771 ++UI;
8772 Use.set(To);
8773 if (To->isDivergent() != From->isDivergent())
8774 updateDivergence(User);
8775 } while (UI != UE && *UI == User);
8776 // We are iterating over all uses of the From node, so if a use
8777 // doesn't use the specific value, no changes are made.
8778 if (!UserRemovedFromCSEMaps)
8779 continue;
8780
8781 // Now that we have modified User, add it back to the CSE maps. If it
8782 // already exists there, recursively merge the results together.
8783 AddModifiedNodeToCSEMaps(User);
8784 }
8785
8786 // If we just RAUW'd the root, take note.
8787 if (From == getRoot())
8788 setRoot(To);
8789 }
8790
8791 namespace {
8792
8793 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
8794 /// to record information about a use.
8795 struct UseMemo {
8796 SDNode *User;
8797 unsigned Index;
8798 SDUse *Use;
8799 };
8800
8801 /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)8802 bool operator<(const UseMemo &L, const UseMemo &R) {
8803 return (intptr_t)L.User < (intptr_t)R.User;
8804 }
8805
8806 } // end anonymous namespace
8807
calculateDivergence(SDNode * N)8808 bool SelectionDAG::calculateDivergence(SDNode *N) {
8809 if (TLI->isSDNodeAlwaysUniform(N)) {
8810 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) &&
8811 "Conflicting divergence information!");
8812 return false;
8813 }
8814 if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA))
8815 return true;
8816 for (auto &Op : N->ops()) {
8817 if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
8818 return true;
8819 }
8820 return false;
8821 }
8822
updateDivergence(SDNode * N)8823 void SelectionDAG::updateDivergence(SDNode *N) {
8824 SmallVector<SDNode *, 16> Worklist(1, N);
8825 do {
8826 N = Worklist.pop_back_val();
8827 bool IsDivergent = calculateDivergence(N);
8828 if (N->SDNodeBits.IsDivergent != IsDivergent) {
8829 N->SDNodeBits.IsDivergent = IsDivergent;
8830 llvm::append_range(Worklist, N->uses());
8831 }
8832 } while (!Worklist.empty());
8833 }
8834
CreateTopologicalOrder(std::vector<SDNode * > & Order)8835 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
8836 DenseMap<SDNode *, unsigned> Degree;
8837 Order.reserve(AllNodes.size());
8838 for (auto &N : allnodes()) {
8839 unsigned NOps = N.getNumOperands();
8840 Degree[&N] = NOps;
8841 if (0 == NOps)
8842 Order.push_back(&N);
8843 }
8844 for (size_t I = 0; I != Order.size(); ++I) {
8845 SDNode *N = Order[I];
8846 for (auto U : N->uses()) {
8847 unsigned &UnsortedOps = Degree[U];
8848 if (0 == --UnsortedOps)
8849 Order.push_back(U);
8850 }
8851 }
8852 }
8853
8854 #ifndef NDEBUG
VerifyDAGDiverence()8855 void SelectionDAG::VerifyDAGDiverence() {
8856 std::vector<SDNode *> TopoOrder;
8857 CreateTopologicalOrder(TopoOrder);
8858 for (auto *N : TopoOrder) {
8859 assert(calculateDivergence(N) == N->isDivergent() &&
8860 "Divergence bit inconsistency detected");
8861 }
8862 }
8863 #endif
8864
8865 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
8866 /// uses of other values produced by From.getNode() alone. The same value
8867 /// may appear in both the From and To list. The Deleted vector is
8868 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)8869 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
8870 const SDValue *To,
8871 unsigned Num){
8872 // Handle the simple, trivial case efficiently.
8873 if (Num == 1)
8874 return ReplaceAllUsesOfValueWith(*From, *To);
8875
8876 transferDbgValues(*From, *To);
8877
8878 // Read up all the uses and make records of them. This helps
8879 // processing new uses that are introduced during the
8880 // replacement process.
8881 SmallVector<UseMemo, 4> Uses;
8882 for (unsigned i = 0; i != Num; ++i) {
8883 unsigned FromResNo = From[i].getResNo();
8884 SDNode *FromNode = From[i].getNode();
8885 for (SDNode::use_iterator UI = FromNode->use_begin(),
8886 E = FromNode->use_end(); UI != E; ++UI) {
8887 SDUse &Use = UI.getUse();
8888 if (Use.getResNo() == FromResNo) {
8889 UseMemo Memo = { *UI, i, &Use };
8890 Uses.push_back(Memo);
8891 }
8892 }
8893 }
8894
8895 // Sort the uses, so that all the uses from a given User are together.
8896 llvm::sort(Uses);
8897
8898 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
8899 UseIndex != UseIndexEnd; ) {
8900 // We know that this user uses some value of From. If it is the right
8901 // value, update it.
8902 SDNode *User = Uses[UseIndex].User;
8903
8904 // This node is about to morph, remove its old self from the CSE maps.
8905 RemoveNodeFromCSEMaps(User);
8906
8907 // The Uses array is sorted, so all the uses for a given User
8908 // are next to each other in the list.
8909 // To help reduce the number of CSE recomputations, process all
8910 // the uses of this user that we can find this way.
8911 do {
8912 unsigned i = Uses[UseIndex].Index;
8913 SDUse &Use = *Uses[UseIndex].Use;
8914 ++UseIndex;
8915
8916 Use.set(To[i]);
8917 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
8918
8919 // Now that we have modified User, add it back to the CSE maps. If it
8920 // already exists there, recursively merge the results together.
8921 AddModifiedNodeToCSEMaps(User);
8922 }
8923 }
8924
8925 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
8926 /// based on their topological order. It returns the maximum id and a vector
8927 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()8928 unsigned SelectionDAG::AssignTopologicalOrder() {
8929 unsigned DAGSize = 0;
8930
8931 // SortedPos tracks the progress of the algorithm. Nodes before it are
8932 // sorted, nodes after it are unsorted. When the algorithm completes
8933 // it is at the end of the list.
8934 allnodes_iterator SortedPos = allnodes_begin();
8935
8936 // Visit all the nodes. Move nodes with no operands to the front of
8937 // the list immediately. Annotate nodes that do have operands with their
8938 // operand count. Before we do this, the Node Id fields of the nodes
8939 // may contain arbitrary values. After, the Node Id fields for nodes
8940 // before SortedPos will contain the topological sort index, and the
8941 // Node Id fields for nodes At SortedPos and after will contain the
8942 // count of outstanding operands.
8943 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
8944 SDNode *N = &*I++;
8945 checkForCycles(N, this);
8946 unsigned Degree = N->getNumOperands();
8947 if (Degree == 0) {
8948 // A node with no uses, add it to the result array immediately.
8949 N->setNodeId(DAGSize++);
8950 allnodes_iterator Q(N);
8951 if (Q != SortedPos)
8952 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
8953 assert(SortedPos != AllNodes.end() && "Overran node list");
8954 ++SortedPos;
8955 } else {
8956 // Temporarily use the Node Id as scratch space for the degree count.
8957 N->setNodeId(Degree);
8958 }
8959 }
8960
8961 // Visit all the nodes. As we iterate, move nodes into sorted order,
8962 // such that by the time the end is reached all nodes will be sorted.
8963 for (SDNode &Node : allnodes()) {
8964 SDNode *N = &Node;
8965 checkForCycles(N, this);
8966 // N is in sorted position, so all its uses have one less operand
8967 // that needs to be sorted.
8968 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
8969 UI != UE; ++UI) {
8970 SDNode *P = *UI;
8971 unsigned Degree = P->getNodeId();
8972 assert(Degree != 0 && "Invalid node degree");
8973 --Degree;
8974 if (Degree == 0) {
8975 // All of P's operands are sorted, so P may sorted now.
8976 P->setNodeId(DAGSize++);
8977 if (P->getIterator() != SortedPos)
8978 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
8979 assert(SortedPos != AllNodes.end() && "Overran node list");
8980 ++SortedPos;
8981 } else {
8982 // Update P's outstanding operand count.
8983 P->setNodeId(Degree);
8984 }
8985 }
8986 if (Node.getIterator() == SortedPos) {
8987 #ifndef NDEBUG
8988 allnodes_iterator I(N);
8989 SDNode *S = &*++I;
8990 dbgs() << "Overran sorted position:\n";
8991 S->dumprFull(this); dbgs() << "\n";
8992 dbgs() << "Checking if this is due to cycles\n";
8993 checkForCycles(this, true);
8994 #endif
8995 llvm_unreachable(nullptr);
8996 }
8997 }
8998
8999 assert(SortedPos == AllNodes.end() &&
9000 "Topological sort incomplete!");
9001 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
9002 "First node in topological sort is not the entry token!");
9003 assert(AllNodes.front().getNodeId() == 0 &&
9004 "First node in topological sort has non-zero id!");
9005 assert(AllNodes.front().getNumOperands() == 0 &&
9006 "First node in topological sort has operands!");
9007 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
9008 "Last node in topologic sort has unexpected id!");
9009 assert(AllNodes.back().use_empty() &&
9010 "Last node in topologic sort has users!");
9011 assert(DAGSize == allnodes_size() && "Node count mismatch!");
9012 return DAGSize;
9013 }
9014
9015 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
9016 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,SDNode * SD,bool isParameter)9017 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
9018 if (SD) {
9019 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
9020 SD->setHasDebugValue(true);
9021 }
9022 DbgInfo->add(DB, SD, isParameter);
9023 }
9024
AddDbgLabel(SDDbgLabel * DB)9025 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) {
9026 DbgInfo->add(DB);
9027 }
9028
makeEquivalentMemoryOrdering(SDValue OldChain,SDValue NewMemOpChain)9029 SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain,
9030 SDValue NewMemOpChain) {
9031 assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
9032 assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
9033 // The new memory operation must have the same position as the old load in
9034 // terms of memory dependency. Create a TokenFactor for the old load and new
9035 // memory operation and update uses of the old load's output chain to use that
9036 // TokenFactor.
9037 if (OldChain == NewMemOpChain || OldChain.use_empty())
9038 return NewMemOpChain;
9039
9040 SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
9041 OldChain, NewMemOpChain);
9042 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
9043 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
9044 return TokenFactor;
9045 }
9046
makeEquivalentMemoryOrdering(LoadSDNode * OldLoad,SDValue NewMemOp)9047 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
9048 SDValue NewMemOp) {
9049 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
9050 SDValue OldChain = SDValue(OldLoad, 1);
9051 SDValue NewMemOpChain = NewMemOp.getValue(1);
9052 return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
9053 }
9054
getSymbolFunctionGlobalAddress(SDValue Op,Function ** OutFunction)9055 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
9056 Function **OutFunction) {
9057 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
9058
9059 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
9060 auto *Module = MF->getFunction().getParent();
9061 auto *Function = Module->getFunction(Symbol);
9062
9063 if (OutFunction != nullptr)
9064 *OutFunction = Function;
9065
9066 if (Function != nullptr) {
9067 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
9068 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
9069 }
9070
9071 std::string ErrorStr;
9072 raw_string_ostream ErrorFormatter(ErrorStr);
9073
9074 ErrorFormatter << "Undefined external symbol ";
9075 ErrorFormatter << '"' << Symbol << '"';
9076 ErrorFormatter.flush();
9077
9078 report_fatal_error(ErrorStr);
9079 }
9080
9081 //===----------------------------------------------------------------------===//
9082 // SDNode Class
9083 //===----------------------------------------------------------------------===//
9084
isNullConstant(SDValue V)9085 bool llvm::isNullConstant(SDValue V) {
9086 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9087 return Const != nullptr && Const->isNullValue();
9088 }
9089
isNullFPConstant(SDValue V)9090 bool llvm::isNullFPConstant(SDValue V) {
9091 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
9092 return Const != nullptr && Const->isZero() && !Const->isNegative();
9093 }
9094
isAllOnesConstant(SDValue V)9095 bool llvm::isAllOnesConstant(SDValue V) {
9096 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9097 return Const != nullptr && Const->isAllOnesValue();
9098 }
9099
isOneConstant(SDValue V)9100 bool llvm::isOneConstant(SDValue V) {
9101 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9102 return Const != nullptr && Const->isOne();
9103 }
9104
peekThroughBitcasts(SDValue V)9105 SDValue llvm::peekThroughBitcasts(SDValue V) {
9106 while (V.getOpcode() == ISD::BITCAST)
9107 V = V.getOperand(0);
9108 return V;
9109 }
9110
peekThroughOneUseBitcasts(SDValue V)9111 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
9112 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
9113 V = V.getOperand(0);
9114 return V;
9115 }
9116
peekThroughExtractSubvectors(SDValue V)9117 SDValue llvm::peekThroughExtractSubvectors(SDValue V) {
9118 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
9119 V = V.getOperand(0);
9120 return V;
9121 }
9122
isBitwiseNot(SDValue V,bool AllowUndefs)9123 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
9124 if (V.getOpcode() != ISD::XOR)
9125 return false;
9126 V = peekThroughBitcasts(V.getOperand(1));
9127 unsigned NumBits = V.getScalarValueSizeInBits();
9128 ConstantSDNode *C =
9129 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
9130 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits);
9131 }
9132
isConstOrConstSplat(SDValue N,bool AllowUndefs,bool AllowTruncation)9133 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
9134 bool AllowTruncation) {
9135 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
9136 return CN;
9137
9138 // SplatVectors can truncate their operands. Ignore that case here unless
9139 // AllowTruncation is set.
9140 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
9141 EVT VecEltVT = N->getValueType(0).getVectorElementType();
9142 if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
9143 EVT CVT = CN->getValueType(0);
9144 assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
9145 if (AllowTruncation || CVT == VecEltVT)
9146 return CN;
9147 }
9148 }
9149
9150 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9151 BitVector UndefElements;
9152 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
9153
9154 // BuildVectors can truncate their operands. Ignore that case here unless
9155 // AllowTruncation is set.
9156 if (CN && (UndefElements.none() || AllowUndefs)) {
9157 EVT CVT = CN->getValueType(0);
9158 EVT NSVT = N.getValueType().getScalarType();
9159 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
9160 if (AllowTruncation || (CVT == NSVT))
9161 return CN;
9162 }
9163 }
9164
9165 return nullptr;
9166 }
9167
isConstOrConstSplat(SDValue N,const APInt & DemandedElts,bool AllowUndefs,bool AllowTruncation)9168 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
9169 bool AllowUndefs,
9170 bool AllowTruncation) {
9171 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
9172 return CN;
9173
9174 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9175 BitVector UndefElements;
9176 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
9177
9178 // BuildVectors can truncate their operands. Ignore that case here unless
9179 // AllowTruncation is set.
9180 if (CN && (UndefElements.none() || AllowUndefs)) {
9181 EVT CVT = CN->getValueType(0);
9182 EVT NSVT = N.getValueType().getScalarType();
9183 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
9184 if (AllowTruncation || (CVT == NSVT))
9185 return CN;
9186 }
9187 }
9188
9189 return nullptr;
9190 }
9191
isConstOrConstSplatFP(SDValue N,bool AllowUndefs)9192 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
9193 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
9194 return CN;
9195
9196 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9197 BitVector UndefElements;
9198 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
9199 if (CN && (UndefElements.none() || AllowUndefs))
9200 return CN;
9201 }
9202
9203 if (N.getOpcode() == ISD::SPLAT_VECTOR)
9204 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
9205 return CN;
9206
9207 return nullptr;
9208 }
9209
isConstOrConstSplatFP(SDValue N,const APInt & DemandedElts,bool AllowUndefs)9210 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
9211 const APInt &DemandedElts,
9212 bool AllowUndefs) {
9213 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
9214 return CN;
9215
9216 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9217 BitVector UndefElements;
9218 ConstantFPSDNode *CN =
9219 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
9220 if (CN && (UndefElements.none() || AllowUndefs))
9221 return CN;
9222 }
9223
9224 return nullptr;
9225 }
9226
isNullOrNullSplat(SDValue N,bool AllowUndefs)9227 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
9228 // TODO: may want to use peekThroughBitcast() here.
9229 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
9230 return C && C->isNullValue();
9231 }
9232
isOneOrOneSplat(SDValue N)9233 bool llvm::isOneOrOneSplat(SDValue N) {
9234 // TODO: may want to use peekThroughBitcast() here.
9235 unsigned BitWidth = N.getScalarValueSizeInBits();
9236 ConstantSDNode *C = isConstOrConstSplat(N);
9237 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth;
9238 }
9239
isAllOnesOrAllOnesSplat(SDValue N)9240 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) {
9241 N = peekThroughBitcasts(N);
9242 unsigned BitWidth = N.getScalarValueSizeInBits();
9243 ConstantSDNode *C = isConstOrConstSplat(N);
9244 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth;
9245 }
9246
~HandleSDNode()9247 HandleSDNode::~HandleSDNode() {
9248 DropOperands();
9249 }
9250
GlobalAddressSDNode(unsigned Opc,unsigned Order,const DebugLoc & DL,const GlobalValue * GA,EVT VT,int64_t o,unsigned TF)9251 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
9252 const DebugLoc &DL,
9253 const GlobalValue *GA, EVT VT,
9254 int64_t o, unsigned TF)
9255 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
9256 TheGlobal = GA;
9257 }
9258
AddrSpaceCastSDNode(unsigned Order,const DebugLoc & dl,EVT VT,unsigned SrcAS,unsigned DestAS)9259 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
9260 EVT VT, unsigned SrcAS,
9261 unsigned DestAS)
9262 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
9263 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
9264
MemSDNode(unsigned Opc,unsigned Order,const DebugLoc & dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)9265 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
9266 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
9267 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
9268 MemSDNodeBits.IsVolatile = MMO->isVolatile();
9269 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
9270 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
9271 MemSDNodeBits.IsInvariant = MMO->isInvariant();
9272
9273 // We check here that the size of the memory operand fits within the size of
9274 // the MMO. This is because the MMO might indicate only a possible address
9275 // range instead of specifying the affected memory addresses precisely.
9276 // TODO: Make MachineMemOperands aware of scalable vectors.
9277 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() &&
9278 "Size mismatch!");
9279 }
9280
9281 /// Profile - Gather unique data for the node.
9282 ///
Profile(FoldingSetNodeID & ID) const9283 void SDNode::Profile(FoldingSetNodeID &ID) const {
9284 AddNodeIDNode(ID, this);
9285 }
9286
9287 namespace {
9288
9289 struct EVTArray {
9290 std::vector<EVT> VTs;
9291
EVTArray__anon917d8cf31011::EVTArray9292 EVTArray() {
9293 VTs.reserve(MVT::LAST_VALUETYPE);
9294 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
9295 VTs.push_back(MVT((MVT::SimpleValueType)i));
9296 }
9297 };
9298
9299 } // end anonymous namespace
9300
9301 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
9302 static ManagedStatic<EVTArray> SimpleVTArray;
9303 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
9304
9305 /// getValueTypeList - Return a pointer to the specified value type.
9306 ///
getValueTypeList(EVT VT)9307 const EVT *SDNode::getValueTypeList(EVT VT) {
9308 if (VT.isExtended()) {
9309 sys::SmartScopedLock<true> Lock(*VTMutex);
9310 return &(*EVTs->insert(VT).first);
9311 } else {
9312 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
9313 "Value type out of range!");
9314 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
9315 }
9316 }
9317
9318 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
9319 /// indicated value. This method ignores uses of other values defined by this
9320 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const9321 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
9322 assert(Value < getNumValues() && "Bad value!");
9323
9324 // TODO: Only iterate over uses of a given value of the node
9325 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
9326 if (UI.getUse().getResNo() == Value) {
9327 if (NUses == 0)
9328 return false;
9329 --NUses;
9330 }
9331 }
9332
9333 // Found exactly the right number of uses?
9334 return NUses == 0;
9335 }
9336
9337 /// hasAnyUseOfValue - Return true if there are any use of the indicated
9338 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const9339 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
9340 assert(Value < getNumValues() && "Bad value!");
9341
9342 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
9343 if (UI.getUse().getResNo() == Value)
9344 return true;
9345
9346 return false;
9347 }
9348
9349 /// isOnlyUserOf - Return true if this node is the only use of N.
isOnlyUserOf(const SDNode * N) const9350 bool SDNode::isOnlyUserOf(const SDNode *N) const {
9351 bool Seen = false;
9352 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9353 SDNode *User = *I;
9354 if (User == this)
9355 Seen = true;
9356 else
9357 return false;
9358 }
9359
9360 return Seen;
9361 }
9362
9363 /// Return true if the only users of N are contained in Nodes.
areOnlyUsersOf(ArrayRef<const SDNode * > Nodes,const SDNode * N)9364 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
9365 bool Seen = false;
9366 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9367 SDNode *User = *I;
9368 if (llvm::is_contained(Nodes, User))
9369 Seen = true;
9370 else
9371 return false;
9372 }
9373
9374 return Seen;
9375 }
9376
9377 /// isOperand - Return true if this node is an operand of N.
isOperandOf(const SDNode * N) const9378 bool SDValue::isOperandOf(const SDNode *N) const {
9379 return is_contained(N->op_values(), *this);
9380 }
9381
isOperandOf(const SDNode * N) const9382 bool SDNode::isOperandOf(const SDNode *N) const {
9383 return any_of(N->op_values(),
9384 [this](SDValue Op) { return this == Op.getNode(); });
9385 }
9386
9387 /// reachesChainWithoutSideEffects - Return true if this operand (which must
9388 /// be a chain) reaches the specified operand without crossing any
9389 /// side-effecting instructions on any chain path. In practice, this looks
9390 /// through token factors and non-volatile loads. In order to remain efficient,
9391 /// this only looks a couple of nodes in, it does not do an exhaustive search.
9392 ///
9393 /// Note that we only need to examine chains when we're searching for
9394 /// side-effects; SelectionDAG requires that all side-effects are represented
9395 /// by chains, even if another operand would force a specific ordering. This
9396 /// constraint is necessary to allow transformations like splitting loads.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const9397 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
9398 unsigned Depth) const {
9399 if (*this == Dest) return true;
9400
9401 // Don't search too deeply, we just want to be able to see through
9402 // TokenFactor's etc.
9403 if (Depth == 0) return false;
9404
9405 // If this is a token factor, all inputs to the TF happen in parallel.
9406 if (getOpcode() == ISD::TokenFactor) {
9407 // First, try a shallow search.
9408 if (is_contained((*this)->ops(), Dest)) {
9409 // We found the chain we want as an operand of this TokenFactor.
9410 // Essentially, we reach the chain without side-effects if we could
9411 // serialize the TokenFactor into a simple chain of operations with
9412 // Dest as the last operation. This is automatically true if the
9413 // chain has one use: there are no other ordering constraints.
9414 // If the chain has more than one use, we give up: some other
9415 // use of Dest might force a side-effect between Dest and the current
9416 // node.
9417 if (Dest.hasOneUse())
9418 return true;
9419 }
9420 // Next, try a deep search: check whether every operand of the TokenFactor
9421 // reaches Dest.
9422 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
9423 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
9424 });
9425 }
9426
9427 // Loads don't have side effects, look through them.
9428 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
9429 if (Ld->isUnordered())
9430 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
9431 }
9432 return false;
9433 }
9434
hasPredecessor(const SDNode * N) const9435 bool SDNode::hasPredecessor(const SDNode *N) const {
9436 SmallPtrSet<const SDNode *, 32> Visited;
9437 SmallVector<const SDNode *, 16> Worklist;
9438 Worklist.push_back(this);
9439 return hasPredecessorHelper(N, Visited, Worklist);
9440 }
9441
intersectFlagsWith(const SDNodeFlags Flags)9442 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
9443 this->Flags.intersectWith(Flags);
9444 }
9445
9446 SDValue
matchBinOpReduction(SDNode * Extract,ISD::NodeType & BinOp,ArrayRef<ISD::NodeType> CandidateBinOps,bool AllowPartials)9447 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
9448 ArrayRef<ISD::NodeType> CandidateBinOps,
9449 bool AllowPartials) {
9450 // The pattern must end in an extract from index 0.
9451 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9452 !isNullConstant(Extract->getOperand(1)))
9453 return SDValue();
9454
9455 // Match against one of the candidate binary ops.
9456 SDValue Op = Extract->getOperand(0);
9457 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
9458 return Op.getOpcode() == unsigned(BinOp);
9459 }))
9460 return SDValue();
9461
9462 // Floating-point reductions may require relaxed constraints on the final step
9463 // of the reduction because they may reorder intermediate operations.
9464 unsigned CandidateBinOp = Op.getOpcode();
9465 if (Op.getValueType().isFloatingPoint()) {
9466 SDNodeFlags Flags = Op->getFlags();
9467 switch (CandidateBinOp) {
9468 case ISD::FADD:
9469 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
9470 return SDValue();
9471 break;
9472 default:
9473 llvm_unreachable("Unhandled FP opcode for binop reduction");
9474 }
9475 }
9476
9477 // Matching failed - attempt to see if we did enough stages that a partial
9478 // reduction from a subvector is possible.
9479 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
9480 if (!AllowPartials || !Op)
9481 return SDValue();
9482 EVT OpVT = Op.getValueType();
9483 EVT OpSVT = OpVT.getScalarType();
9484 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
9485 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
9486 return SDValue();
9487 BinOp = (ISD::NodeType)CandidateBinOp;
9488 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op,
9489 getVectorIdxConstant(0, SDLoc(Op)));
9490 };
9491
9492 // At each stage, we're looking for something that looks like:
9493 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
9494 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
9495 // i32 undef, i32 undef, i32 undef, i32 undef>
9496 // %a = binop <8 x i32> %op, %s
9497 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
9498 // we expect something like:
9499 // <4,5,6,7,u,u,u,u>
9500 // <2,3,u,u,u,u,u,u>
9501 // <1,u,u,u,u,u,u,u>
9502 // While a partial reduction match would be:
9503 // <2,3,u,u,u,u,u,u>
9504 // <1,u,u,u,u,u,u,u>
9505 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
9506 SDValue PrevOp;
9507 for (unsigned i = 0; i < Stages; ++i) {
9508 unsigned MaskEnd = (1 << i);
9509
9510 if (Op.getOpcode() != CandidateBinOp)
9511 return PartialReduction(PrevOp, MaskEnd);
9512
9513 SDValue Op0 = Op.getOperand(0);
9514 SDValue Op1 = Op.getOperand(1);
9515
9516 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
9517 if (Shuffle) {
9518 Op = Op1;
9519 } else {
9520 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
9521 Op = Op0;
9522 }
9523
9524 // The first operand of the shuffle should be the same as the other operand
9525 // of the binop.
9526 if (!Shuffle || Shuffle->getOperand(0) != Op)
9527 return PartialReduction(PrevOp, MaskEnd);
9528
9529 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
9530 for (int Index = 0; Index < (int)MaskEnd; ++Index)
9531 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
9532 return PartialReduction(PrevOp, MaskEnd);
9533
9534 PrevOp = Op;
9535 }
9536
9537 // Handle subvector reductions, which tend to appear after the shuffle
9538 // reduction stages.
9539 while (Op.getOpcode() == CandidateBinOp) {
9540 unsigned NumElts = Op.getValueType().getVectorNumElements();
9541 SDValue Op0 = Op.getOperand(0);
9542 SDValue Op1 = Op.getOperand(1);
9543 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
9544 Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
9545 Op0.getOperand(0) != Op1.getOperand(0))
9546 break;
9547 SDValue Src = Op0.getOperand(0);
9548 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
9549 if (NumSrcElts != (2 * NumElts))
9550 break;
9551 if (!(Op0.getConstantOperandAPInt(1) == 0 &&
9552 Op1.getConstantOperandAPInt(1) == NumElts) &&
9553 !(Op1.getConstantOperandAPInt(1) == 0 &&
9554 Op0.getConstantOperandAPInt(1) == NumElts))
9555 break;
9556 Op = Src;
9557 }
9558
9559 BinOp = (ISD::NodeType)CandidateBinOp;
9560 return Op;
9561 }
9562
UnrollVectorOp(SDNode * N,unsigned ResNE)9563 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
9564 assert(N->getNumValues() == 1 &&
9565 "Can't unroll a vector with multiple results!");
9566
9567 EVT VT = N->getValueType(0);
9568 unsigned NE = VT.getVectorNumElements();
9569 EVT EltVT = VT.getVectorElementType();
9570 SDLoc dl(N);
9571
9572 SmallVector<SDValue, 8> Scalars;
9573 SmallVector<SDValue, 4> Operands(N->getNumOperands());
9574
9575 // If ResNE is 0, fully unroll the vector op.
9576 if (ResNE == 0)
9577 ResNE = NE;
9578 else if (NE > ResNE)
9579 NE = ResNE;
9580
9581 unsigned i;
9582 for (i= 0; i != NE; ++i) {
9583 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
9584 SDValue Operand = N->getOperand(j);
9585 EVT OperandVT = Operand.getValueType();
9586 if (OperandVT.isVector()) {
9587 // A vector operand; extract a single element.
9588 EVT OperandEltVT = OperandVT.getVectorElementType();
9589 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT,
9590 Operand, getVectorIdxConstant(i, dl));
9591 } else {
9592 // A scalar operand; just use it as is.
9593 Operands[j] = Operand;
9594 }
9595 }
9596
9597 switch (N->getOpcode()) {
9598 default: {
9599 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
9600 N->getFlags()));
9601 break;
9602 }
9603 case ISD::VSELECT:
9604 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
9605 break;
9606 case ISD::SHL:
9607 case ISD::SRA:
9608 case ISD::SRL:
9609 case ISD::ROTL:
9610 case ISD::ROTR:
9611 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
9612 getShiftAmountOperand(Operands[0].getValueType(),
9613 Operands[1])));
9614 break;
9615 case ISD::SIGN_EXTEND_INREG: {
9616 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
9617 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
9618 Operands[0],
9619 getValueType(ExtVT)));
9620 }
9621 }
9622 }
9623
9624 for (; i < ResNE; ++i)
9625 Scalars.push_back(getUNDEF(EltVT));
9626
9627 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
9628 return getBuildVector(VecVT, dl, Scalars);
9629 }
9630
UnrollVectorOverflowOp(SDNode * N,unsigned ResNE)9631 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
9632 SDNode *N, unsigned ResNE) {
9633 unsigned Opcode = N->getOpcode();
9634 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
9635 Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
9636 Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
9637 "Expected an overflow opcode");
9638
9639 EVT ResVT = N->getValueType(0);
9640 EVT OvVT = N->getValueType(1);
9641 EVT ResEltVT = ResVT.getVectorElementType();
9642 EVT OvEltVT = OvVT.getVectorElementType();
9643 SDLoc dl(N);
9644
9645 // If ResNE is 0, fully unroll the vector op.
9646 unsigned NE = ResVT.getVectorNumElements();
9647 if (ResNE == 0)
9648 ResNE = NE;
9649 else if (NE > ResNE)
9650 NE = ResNE;
9651
9652 SmallVector<SDValue, 8> LHSScalars;
9653 SmallVector<SDValue, 8> RHSScalars;
9654 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
9655 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
9656
9657 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
9658 SDVTList VTs = getVTList(ResEltVT, SVT);
9659 SmallVector<SDValue, 8> ResScalars;
9660 SmallVector<SDValue, 8> OvScalars;
9661 for (unsigned i = 0; i < NE; ++i) {
9662 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
9663 SDValue Ov =
9664 getSelect(dl, OvEltVT, Res.getValue(1),
9665 getBoolConstant(true, dl, OvEltVT, ResVT),
9666 getConstant(0, dl, OvEltVT));
9667
9668 ResScalars.push_back(Res);
9669 OvScalars.push_back(Ov);
9670 }
9671
9672 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
9673 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
9674
9675 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
9676 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
9677 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
9678 getBuildVector(NewOvVT, dl, OvScalars));
9679 }
9680
areNonVolatileConsecutiveLoads(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const9681 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
9682 LoadSDNode *Base,
9683 unsigned Bytes,
9684 int Dist) const {
9685 if (LD->isVolatile() || Base->isVolatile())
9686 return false;
9687 // TODO: probably too restrictive for atomics, revisit
9688 if (!LD->isSimple())
9689 return false;
9690 if (LD->isIndexed() || Base->isIndexed())
9691 return false;
9692 if (LD->getChain() != Base->getChain())
9693 return false;
9694 EVT VT = LD->getValueType(0);
9695 if (VT.getSizeInBits() / 8 != Bytes)
9696 return false;
9697
9698 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
9699 auto LocDecomp = BaseIndexOffset::match(LD, *this);
9700
9701 int64_t Offset = 0;
9702 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
9703 return (Dist * Bytes == Offset);
9704 return false;
9705 }
9706
9707 /// InferPtrAlignment - Infer alignment of a load / store address. Return None
9708 /// if it cannot be inferred.
InferPtrAlign(SDValue Ptr) const9709 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const {
9710 // If this is a GlobalAddress + cst, return the alignment.
9711 const GlobalValue *GV = nullptr;
9712 int64_t GVOffset = 0;
9713 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
9714 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
9715 KnownBits Known(PtrWidth);
9716 llvm::computeKnownBits(GV, Known, getDataLayout());
9717 unsigned AlignBits = Known.countMinTrailingZeros();
9718 if (AlignBits)
9719 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset);
9720 }
9721
9722 // If this is a direct reference to a stack slot, use information about the
9723 // stack slot's alignment.
9724 int FrameIdx = INT_MIN;
9725 int64_t FrameOffset = 0;
9726 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
9727 FrameIdx = FI->getIndex();
9728 } else if (isBaseWithConstantOffset(Ptr) &&
9729 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9730 // Handle FI+Cst
9731 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9732 FrameOffset = Ptr.getConstantOperandVal(1);
9733 }
9734
9735 if (FrameIdx != INT_MIN) {
9736 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
9737 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset);
9738 }
9739
9740 return None;
9741 }
9742
9743 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
9744 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const9745 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
9746 // Currently all types are split in half.
9747 EVT LoVT, HiVT;
9748 if (!VT.isVector())
9749 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
9750 else
9751 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
9752
9753 return std::make_pair(LoVT, HiVT);
9754 }
9755
9756 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a
9757 /// type, dependent on an enveloping VT that has been split into two identical
9758 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size.
9759 std::pair<EVT, EVT>
GetDependentSplitDestVTs(const EVT & VT,const EVT & EnvVT,bool * HiIsEmpty) const9760 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
9761 bool *HiIsEmpty) const {
9762 EVT EltTp = VT.getVectorElementType();
9763 // Examples:
9764 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty)
9765 // custom VL=9 with enveloping VL=8/8 yields 8/1
9766 // custom VL=10 with enveloping VL=8/8 yields 8/2
9767 // etc.
9768 ElementCount VTNumElts = VT.getVectorElementCount();
9769 ElementCount EnvNumElts = EnvVT.getVectorElementCount();
9770 assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
9771 "Mixing fixed width and scalable vectors when enveloping a type");
9772 EVT LoVT, HiVT;
9773 if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
9774 LoVT = EnvVT;
9775 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
9776 *HiIsEmpty = false;
9777 } else {
9778 // Flag that hi type has zero storage size, but return split envelop type
9779 // (this would be easier if vector types with zero elements were allowed).
9780 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
9781 HiVT = EnvVT;
9782 *HiIsEmpty = true;
9783 }
9784 return std::make_pair(LoVT, HiVT);
9785 }
9786
9787 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
9788 /// low/high part.
9789 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)9790 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
9791 const EVT &HiVT) {
9792 assert(LoVT.isScalableVector() == HiVT.isScalableVector() &&
9793 LoVT.isScalableVector() == N.getValueType().isScalableVector() &&
9794 "Splitting vector with an invalid mixture of fixed and scalable "
9795 "vector types");
9796 assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <=
9797 N.getValueType().getVectorMinNumElements() &&
9798 "More vector elements requested than available!");
9799 SDValue Lo, Hi;
9800 Lo =
9801 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL));
9802 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements()
9803 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales
9804 // IDX with the runtime scaling factor of the result vector type. For
9805 // fixed-width result vectors, that runtime scaling factor is 1.
9806 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
9807 getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
9808 return std::make_pair(Lo, Hi);
9809 }
9810
9811 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
WidenVector(const SDValue & N,const SDLoc & DL)9812 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
9813 EVT VT = N.getValueType();
9814 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
9815 NextPowerOf2(VT.getVectorNumElements()));
9816 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N,
9817 getVectorIdxConstant(0, DL));
9818 }
9819
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count,EVT EltVT)9820 void SelectionDAG::ExtractVectorElements(SDValue Op,
9821 SmallVectorImpl<SDValue> &Args,
9822 unsigned Start, unsigned Count,
9823 EVT EltVT) {
9824 EVT VT = Op.getValueType();
9825 if (Count == 0)
9826 Count = VT.getVectorNumElements();
9827 if (EltVT == EVT())
9828 EltVT = VT.getVectorElementType();
9829 SDLoc SL(Op);
9830 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
9831 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op,
9832 getVectorIdxConstant(i, SL)));
9833 }
9834 }
9835
9836 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const9837 unsigned GlobalAddressSDNode::getAddressSpace() const {
9838 return getGlobal()->getType()->getAddressSpace();
9839 }
9840
getType() const9841 Type *ConstantPoolSDNode::getType() const {
9842 if (isMachineConstantPoolEntry())
9843 return Val.MachineCPVal->getType();
9844 return Val.ConstVal->getType();
9845 }
9846
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool IsBigEndian) const9847 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
9848 unsigned &SplatBitSize,
9849 bool &HasAnyUndefs,
9850 unsigned MinSplatBits,
9851 bool IsBigEndian) const {
9852 EVT VT = getValueType(0);
9853 assert(VT.isVector() && "Expected a vector type");
9854 unsigned VecWidth = VT.getSizeInBits();
9855 if (MinSplatBits > VecWidth)
9856 return false;
9857
9858 // FIXME: The widths are based on this node's type, but build vectors can
9859 // truncate their operands.
9860 SplatValue = APInt(VecWidth, 0);
9861 SplatUndef = APInt(VecWidth, 0);
9862
9863 // Get the bits. Bits with undefined values (when the corresponding element
9864 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
9865 // in SplatValue. If any of the values are not constant, give up and return
9866 // false.
9867 unsigned int NumOps = getNumOperands();
9868 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
9869 unsigned EltWidth = VT.getScalarSizeInBits();
9870
9871 for (unsigned j = 0; j < NumOps; ++j) {
9872 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
9873 SDValue OpVal = getOperand(i);
9874 unsigned BitPos = j * EltWidth;
9875
9876 if (OpVal.isUndef())
9877 SplatUndef.setBits(BitPos, BitPos + EltWidth);
9878 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
9879 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
9880 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
9881 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
9882 else
9883 return false;
9884 }
9885
9886 // The build_vector is all constants or undefs. Find the smallest element
9887 // size that splats the vector.
9888 HasAnyUndefs = (SplatUndef != 0);
9889
9890 // FIXME: This does not work for vectors with elements less than 8 bits.
9891 while (VecWidth > 8) {
9892 unsigned HalfSize = VecWidth / 2;
9893 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
9894 APInt LowValue = SplatValue.trunc(HalfSize);
9895 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
9896 APInt LowUndef = SplatUndef.trunc(HalfSize);
9897
9898 // If the two halves do not match (ignoring undef bits), stop here.
9899 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
9900 MinSplatBits > HalfSize)
9901 break;
9902
9903 SplatValue = HighValue | LowValue;
9904 SplatUndef = HighUndef & LowUndef;
9905
9906 VecWidth = HalfSize;
9907 }
9908
9909 SplatBitSize = VecWidth;
9910 return true;
9911 }
9912
getSplatValue(const APInt & DemandedElts,BitVector * UndefElements) const9913 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
9914 BitVector *UndefElements) const {
9915 unsigned NumOps = getNumOperands();
9916 if (UndefElements) {
9917 UndefElements->clear();
9918 UndefElements->resize(NumOps);
9919 }
9920 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
9921 if (!DemandedElts)
9922 return SDValue();
9923 SDValue Splatted;
9924 for (unsigned i = 0; i != NumOps; ++i) {
9925 if (!DemandedElts[i])
9926 continue;
9927 SDValue Op = getOperand(i);
9928 if (Op.isUndef()) {
9929 if (UndefElements)
9930 (*UndefElements)[i] = true;
9931 } else if (!Splatted) {
9932 Splatted = Op;
9933 } else if (Splatted != Op) {
9934 return SDValue();
9935 }
9936 }
9937
9938 if (!Splatted) {
9939 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros();
9940 assert(getOperand(FirstDemandedIdx).isUndef() &&
9941 "Can only have a splat without a constant for all undefs.");
9942 return getOperand(FirstDemandedIdx);
9943 }
9944
9945 return Splatted;
9946 }
9947
getSplatValue(BitVector * UndefElements) const9948 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
9949 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
9950 return getSplatValue(DemandedElts, UndefElements);
9951 }
9952
getRepeatedSequence(const APInt & DemandedElts,SmallVectorImpl<SDValue> & Sequence,BitVector * UndefElements) const9953 bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
9954 SmallVectorImpl<SDValue> &Sequence,
9955 BitVector *UndefElements) const {
9956 unsigned NumOps = getNumOperands();
9957 Sequence.clear();
9958 if (UndefElements) {
9959 UndefElements->clear();
9960 UndefElements->resize(NumOps);
9961 }
9962 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
9963 if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
9964 return false;
9965
9966 // Set the undefs even if we don't find a sequence (like getSplatValue).
9967 if (UndefElements)
9968 for (unsigned I = 0; I != NumOps; ++I)
9969 if (DemandedElts[I] && getOperand(I).isUndef())
9970 (*UndefElements)[I] = true;
9971
9972 // Iteratively widen the sequence length looking for repetitions.
9973 for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
9974 Sequence.append(SeqLen, SDValue());
9975 for (unsigned I = 0; I != NumOps; ++I) {
9976 if (!DemandedElts[I])
9977 continue;
9978 SDValue &SeqOp = Sequence[I % SeqLen];
9979 SDValue Op = getOperand(I);
9980 if (Op.isUndef()) {
9981 if (!SeqOp)
9982 SeqOp = Op;
9983 continue;
9984 }
9985 if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
9986 Sequence.clear();
9987 break;
9988 }
9989 SeqOp = Op;
9990 }
9991 if (!Sequence.empty())
9992 return true;
9993 }
9994
9995 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
9996 return false;
9997 }
9998
getRepeatedSequence(SmallVectorImpl<SDValue> & Sequence,BitVector * UndefElements) const9999 bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
10000 BitVector *UndefElements) const {
10001 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
10002 return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
10003 }
10004
10005 ConstantSDNode *
getConstantSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const10006 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
10007 BitVector *UndefElements) const {
10008 return dyn_cast_or_null<ConstantSDNode>(
10009 getSplatValue(DemandedElts, UndefElements));
10010 }
10011
10012 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const10013 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
10014 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
10015 }
10016
10017 ConstantFPSDNode *
getConstantFPSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const10018 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts,
10019 BitVector *UndefElements) const {
10020 return dyn_cast_or_null<ConstantFPSDNode>(
10021 getSplatValue(DemandedElts, UndefElements));
10022 }
10023
10024 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const10025 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
10026 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
10027 }
10028
10029 int32_t
getConstantFPSplatPow2ToLog2Int(BitVector * UndefElements,uint32_t BitWidth) const10030 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
10031 uint32_t BitWidth) const {
10032 if (ConstantFPSDNode *CN =
10033 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
10034 bool IsExact;
10035 APSInt IntVal(BitWidth);
10036 const APFloat &APF = CN->getValueAPF();
10037 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
10038 APFloat::opOK ||
10039 !IsExact)
10040 return -1;
10041
10042 return IntVal.exactLogBase2();
10043 }
10044 return -1;
10045 }
10046
isConstant() const10047 bool BuildVectorSDNode::isConstant() const {
10048 for (const SDValue &Op : op_values()) {
10049 unsigned Opc = Op.getOpcode();
10050 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
10051 return false;
10052 }
10053 return true;
10054 }
10055
isSplatMask(const int * Mask,EVT VT)10056 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
10057 // Find the first non-undef value in the shuffle mask.
10058 unsigned i, e;
10059 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
10060 /* search */;
10061
10062 // If all elements are undefined, this shuffle can be considered a splat
10063 // (although it should eventually get simplified away completely).
10064 if (i == e)
10065 return true;
10066
10067 // Make sure all remaining elements are either undef or the same as the first
10068 // non-undef value.
10069 for (int Idx = Mask[i]; i != e; ++i)
10070 if (Mask[i] >= 0 && Mask[i] != Idx)
10071 return false;
10072 return true;
10073 }
10074
10075 // Returns the SDNode if it is a constant integer BuildVector
10076 // or constant integer.
isConstantIntBuildVectorOrConstantInt(SDValue N) const10077 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
10078 if (isa<ConstantSDNode>(N))
10079 return N.getNode();
10080 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
10081 return N.getNode();
10082 // Treat a GlobalAddress supporting constant offset folding as a
10083 // constant integer.
10084 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
10085 if (GA->getOpcode() == ISD::GlobalAddress &&
10086 TLI->isOffsetFoldingLegal(GA))
10087 return GA;
10088 if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
10089 isa<ConstantSDNode>(N.getOperand(0)))
10090 return N.getNode();
10091 return nullptr;
10092 }
10093
10094 // Returns the SDNode if it is a constant float BuildVector
10095 // or constant float.
isConstantFPBuildVectorOrConstantFP(SDValue N) const10096 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
10097 if (isa<ConstantFPSDNode>(N))
10098 return N.getNode();
10099
10100 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
10101 return N.getNode();
10102
10103 return nullptr;
10104 }
10105
createOperands(SDNode * Node,ArrayRef<SDValue> Vals)10106 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
10107 assert(!Node->OperandList && "Node already has operands");
10108 assert(SDNode::getMaxNumOperands() >= Vals.size() &&
10109 "too many operands to fit into SDNode");
10110 SDUse *Ops = OperandRecycler.allocate(
10111 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
10112
10113 bool IsDivergent = false;
10114 for (unsigned I = 0; I != Vals.size(); ++I) {
10115 Ops[I].setUser(Node);
10116 Ops[I].setInitial(Vals[I]);
10117 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
10118 IsDivergent |= Ops[I].getNode()->isDivergent();
10119 }
10120 Node->NumOperands = Vals.size();
10121 Node->OperandList = Ops;
10122 if (!TLI->isSDNodeAlwaysUniform(Node)) {
10123 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
10124 Node->SDNodeBits.IsDivergent = IsDivergent;
10125 }
10126 checkForCycles(Node);
10127 }
10128
getTokenFactor(const SDLoc & DL,SmallVectorImpl<SDValue> & Vals)10129 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
10130 SmallVectorImpl<SDValue> &Vals) {
10131 size_t Limit = SDNode::getMaxNumOperands();
10132 while (Vals.size() > Limit) {
10133 unsigned SliceIdx = Vals.size() - Limit;
10134 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
10135 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
10136 Vals.erase(Vals.begin() + SliceIdx, Vals.end());
10137 Vals.emplace_back(NewTF);
10138 }
10139 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
10140 }
10141
getNeutralElement(unsigned Opcode,const SDLoc & DL,EVT VT,SDNodeFlags Flags)10142 SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL,
10143 EVT VT, SDNodeFlags Flags) {
10144 switch (Opcode) {
10145 default:
10146 return SDValue();
10147 case ISD::ADD:
10148 case ISD::OR:
10149 case ISD::XOR:
10150 case ISD::UMAX:
10151 return getConstant(0, DL, VT);
10152 case ISD::MUL:
10153 return getConstant(1, DL, VT);
10154 case ISD::AND:
10155 case ISD::UMIN:
10156 return getAllOnesConstant(DL, VT);
10157 case ISD::SMAX:
10158 return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT);
10159 case ISD::SMIN:
10160 return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT);
10161 case ISD::FADD:
10162 return getConstantFP(-0.0, DL, VT);
10163 case ISD::FMUL:
10164 return getConstantFP(1.0, DL, VT);
10165 case ISD::FMINNUM:
10166 case ISD::FMAXNUM: {
10167 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
10168 const fltSemantics &Semantics = EVTToAPFloatSemantics(VT);
10169 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
10170 !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
10171 APFloat::getLargest(Semantics);
10172 if (Opcode == ISD::FMAXNUM)
10173 NeutralAF.changeSign();
10174
10175 return getConstantFP(NeutralAF, DL, VT);
10176 }
10177 }
10178 }
10179
10180 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallPtrSetImpl<const SDNode * > & Checked,const llvm::SelectionDAG * DAG)10181 static void checkForCyclesHelper(const SDNode *N,
10182 SmallPtrSetImpl<const SDNode*> &Visited,
10183 SmallPtrSetImpl<const SDNode*> &Checked,
10184 const llvm::SelectionDAG *DAG) {
10185 // If this node has already been checked, don't check it again.
10186 if (Checked.count(N))
10187 return;
10188
10189 // If a node has already been visited on this depth-first walk, reject it as
10190 // a cycle.
10191 if (!Visited.insert(N).second) {
10192 errs() << "Detected cycle in SelectionDAG\n";
10193 dbgs() << "Offending node:\n";
10194 N->dumprFull(DAG); dbgs() << "\n";
10195 abort();
10196 }
10197
10198 for (const SDValue &Op : N->op_values())
10199 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
10200
10201 Checked.insert(N);
10202 Visited.erase(N);
10203 }
10204 #endif
10205
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)10206 void llvm::checkForCycles(const llvm::SDNode *N,
10207 const llvm::SelectionDAG *DAG,
10208 bool force) {
10209 #ifndef NDEBUG
10210 bool check = force;
10211 #ifdef EXPENSIVE_CHECKS
10212 check = true;
10213 #endif // EXPENSIVE_CHECKS
10214 if (check) {
10215 assert(N && "Checking nonexistent SDNode");
10216 SmallPtrSet<const SDNode*, 32> visited;
10217 SmallPtrSet<const SDNode*, 32> checked;
10218 checkForCyclesHelper(N, visited, checked, DAG);
10219 }
10220 #endif // !NDEBUG
10221 }
10222
checkForCycles(const llvm::SelectionDAG * DAG,bool force)10223 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
10224 checkForCycles(DAG->getRoot().getNode(), DAG, force);
10225 }
10226