1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAG class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/FoldingSet.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/Analysis/BlockFrequencyInfo.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/CodeGen/FunctionLoweringInfo.h"
32 #include "llvm/CodeGen/ISDOpcodes.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineConstantPool.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/RuntimeLibcalls.h"
39 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
40 #include "llvm/CodeGen/SelectionDAGNodes.h"
41 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
42 #include "llvm/CodeGen/TargetFrameLowering.h"
43 #include "llvm/CodeGen/TargetLowering.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSubtargetInfo.h"
46 #include "llvm/CodeGen/ValueTypes.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DataLayout.h"
50 #include "llvm/IR/DebugInfoMetadata.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/GlobalValue.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/KnownBits.h"
64 #include "llvm/Support/MachineValueType.h"
65 #include "llvm/Support/ManagedStatic.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/Mutex.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Target/TargetMachine.h"
70 #include "llvm/Target/TargetOptions.h"
71 #include "llvm/Transforms/Utils/SizeOpts.h"
72 #include <algorithm>
73 #include <cassert>
74 #include <cstdint>
75 #include <cstdlib>
76 #include <limits>
77 #include <set>
78 #include <string>
79 #include <utility>
80 #include <vector>
81
82 using namespace llvm;
83
84 /// makeVTList - Return an instance of the SDVTList struct initialized with the
85 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)86 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
87 SDVTList Res = {VTs, NumVTs};
88 return Res;
89 }
90
91 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)92 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)93 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
NodeInserted(SDNode *)94 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
95
anchor()96 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
97
98 #define DEBUG_TYPE "selectiondag"
99
100 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
101 cl::Hidden, cl::init(true),
102 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
103
104 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
105 cl::desc("Number limit for gluing ld/st of memcpy."),
106 cl::Hidden, cl::init(0));
107
NewSDValueDbgMsg(SDValue V,StringRef Msg,SelectionDAG * G)108 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
109 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
110 }
111
112 //===----------------------------------------------------------------------===//
113 // ConstantFPSDNode Class
114 //===----------------------------------------------------------------------===//
115
116 /// isExactlyValue - We don't rely on operator== working on double values, as
117 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
118 /// As such, this method can be used to do an exact bit-for-bit comparison of
119 /// two floating point values.
isExactlyValue(const APFloat & V) const120 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
121 return getValueAPF().bitwiseIsEqual(V);
122 }
123
isValueValidForType(EVT VT,const APFloat & Val)124 bool ConstantFPSDNode::isValueValidForType(EVT VT,
125 const APFloat& Val) {
126 assert(VT.isFloatingPoint() && "Can only convert between FP types");
127
128 // convert modifies in place, so make a copy.
129 APFloat Val2 = APFloat(Val);
130 bool losesInfo;
131 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
132 APFloat::rmNearestTiesToEven,
133 &losesInfo);
134 return !losesInfo;
135 }
136
137 //===----------------------------------------------------------------------===//
138 // ISD Namespace
139 //===----------------------------------------------------------------------===//
140
isConstantSplatVector(const SDNode * N,APInt & SplatVal)141 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
142 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
143 unsigned EltSize =
144 N->getValueType(0).getVectorElementType().getSizeInBits();
145 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
146 SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
147 return true;
148 }
149 if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) {
150 SplatVal = Op0->getValueAPF().bitcastToAPInt().truncOrSelf(EltSize);
151 return true;
152 }
153 }
154
155 auto *BV = dyn_cast<BuildVectorSDNode>(N);
156 if (!BV)
157 return false;
158
159 APInt SplatUndef;
160 unsigned SplatBitSize;
161 bool HasUndefs;
162 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
163 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
164 EltSize) &&
165 EltSize == SplatBitSize;
166 }
167
168 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
169 // specializations of the more general isConstantSplatVector()?
170
isConstantSplatVectorAllOnes(const SDNode * N,bool BuildVectorOnly)171 bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
172 // Look through a bit convert.
173 while (N->getOpcode() == ISD::BITCAST)
174 N = N->getOperand(0).getNode();
175
176 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
177 APInt SplatVal;
178 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnes();
179 }
180
181 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
182
183 unsigned i = 0, e = N->getNumOperands();
184
185 // Skip over all of the undef values.
186 while (i != e && N->getOperand(i).isUndef())
187 ++i;
188
189 // Do not accept an all-undef vector.
190 if (i == e) return false;
191
192 // Do not accept build_vectors that aren't all constants or which have non-~0
193 // elements. We have to be a bit careful here, as the type of the constant
194 // may not be the same as the type of the vector elements due to type
195 // legalization (the elements are promoted to a legal type for the target and
196 // a vector of a type may be legal when the base element type is not).
197 // We only want to check enough bits to cover the vector elements, because
198 // we care if the resultant vector is all ones, not whether the individual
199 // constants are.
200 SDValue NotZero = N->getOperand(i);
201 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
202 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
203 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
204 return false;
205 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
206 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
207 return false;
208 } else
209 return false;
210
211 // Okay, we have at least one ~0 value, check to see if the rest match or are
212 // undefs. Even with the above element type twiddling, this should be OK, as
213 // the same type legalization should have applied to all the elements.
214 for (++i; i != e; ++i)
215 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
216 return false;
217 return true;
218 }
219
isConstantSplatVectorAllZeros(const SDNode * N,bool BuildVectorOnly)220 bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
221 // Look through a bit convert.
222 while (N->getOpcode() == ISD::BITCAST)
223 N = N->getOperand(0).getNode();
224
225 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
226 APInt SplatVal;
227 return isConstantSplatVector(N, SplatVal) && SplatVal.isZero();
228 }
229
230 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
231
232 bool IsAllUndef = true;
233 for (const SDValue &Op : N->op_values()) {
234 if (Op.isUndef())
235 continue;
236 IsAllUndef = false;
237 // Do not accept build_vectors that aren't all constants or which have non-0
238 // elements. We have to be a bit careful here, as the type of the constant
239 // may not be the same as the type of the vector elements due to type
240 // legalization (the elements are promoted to a legal type for the target
241 // and a vector of a type may be legal when the base element type is not).
242 // We only want to check enough bits to cover the vector elements, because
243 // we care if the resultant vector is all zeros, not whether the individual
244 // constants are.
245 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
246 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
247 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
248 return false;
249 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
250 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
251 return false;
252 } else
253 return false;
254 }
255
256 // Do not accept an all-undef vector.
257 if (IsAllUndef)
258 return false;
259 return true;
260 }
261
isBuildVectorAllOnes(const SDNode * N)262 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
263 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
264 }
265
isBuildVectorAllZeros(const SDNode * N)266 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
267 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
268 }
269
isBuildVectorOfConstantSDNodes(const SDNode * N)270 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
271 if (N->getOpcode() != ISD::BUILD_VECTOR)
272 return false;
273
274 for (const SDValue &Op : N->op_values()) {
275 if (Op.isUndef())
276 continue;
277 if (!isa<ConstantSDNode>(Op))
278 return false;
279 }
280 return true;
281 }
282
isBuildVectorOfConstantFPSDNodes(const SDNode * N)283 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
284 if (N->getOpcode() != ISD::BUILD_VECTOR)
285 return false;
286
287 for (const SDValue &Op : N->op_values()) {
288 if (Op.isUndef())
289 continue;
290 if (!isa<ConstantFPSDNode>(Op))
291 return false;
292 }
293 return true;
294 }
295
allOperandsUndef(const SDNode * N)296 bool ISD::allOperandsUndef(const SDNode *N) {
297 // Return false if the node has no operands.
298 // This is "logically inconsistent" with the definition of "all" but
299 // is probably the desired behavior.
300 if (N->getNumOperands() == 0)
301 return false;
302 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
303 }
304
matchUnaryPredicate(SDValue Op,std::function<bool (ConstantSDNode *)> Match,bool AllowUndefs)305 bool ISD::matchUnaryPredicate(SDValue Op,
306 std::function<bool(ConstantSDNode *)> Match,
307 bool AllowUndefs) {
308 // FIXME: Add support for scalar UNDEF cases?
309 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
310 return Match(Cst);
311
312 // FIXME: Add support for vector UNDEF cases?
313 if (ISD::BUILD_VECTOR != Op.getOpcode() &&
314 ISD::SPLAT_VECTOR != Op.getOpcode())
315 return false;
316
317 EVT SVT = Op.getValueType().getScalarType();
318 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
319 if (AllowUndefs && Op.getOperand(i).isUndef()) {
320 if (!Match(nullptr))
321 return false;
322 continue;
323 }
324
325 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
326 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
327 return false;
328 }
329 return true;
330 }
331
matchBinaryPredicate(SDValue LHS,SDValue RHS,std::function<bool (ConstantSDNode *,ConstantSDNode *)> Match,bool AllowUndefs,bool AllowTypeMismatch)332 bool ISD::matchBinaryPredicate(
333 SDValue LHS, SDValue RHS,
334 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
335 bool AllowUndefs, bool AllowTypeMismatch) {
336 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
337 return false;
338
339 // TODO: Add support for scalar UNDEF cases?
340 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
341 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
342 return Match(LHSCst, RHSCst);
343
344 // TODO: Add support for vector UNDEF cases?
345 if (LHS.getOpcode() != RHS.getOpcode() ||
346 (LHS.getOpcode() != ISD::BUILD_VECTOR &&
347 LHS.getOpcode() != ISD::SPLAT_VECTOR))
348 return false;
349
350 EVT SVT = LHS.getValueType().getScalarType();
351 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
352 SDValue LHSOp = LHS.getOperand(i);
353 SDValue RHSOp = RHS.getOperand(i);
354 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
355 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
356 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
357 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
358 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
359 return false;
360 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
361 LHSOp.getValueType() != RHSOp.getValueType()))
362 return false;
363 if (!Match(LHSCst, RHSCst))
364 return false;
365 }
366 return true;
367 }
368
getVecReduceBaseOpcode(unsigned VecReduceOpcode)369 ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
370 switch (VecReduceOpcode) {
371 default:
372 llvm_unreachable("Expected VECREDUCE opcode");
373 case ISD::VECREDUCE_FADD:
374 case ISD::VECREDUCE_SEQ_FADD:
375 return ISD::FADD;
376 case ISD::VECREDUCE_FMUL:
377 case ISD::VECREDUCE_SEQ_FMUL:
378 return ISD::FMUL;
379 case ISD::VECREDUCE_ADD:
380 return ISD::ADD;
381 case ISD::VECREDUCE_MUL:
382 return ISD::MUL;
383 case ISD::VECREDUCE_AND:
384 return ISD::AND;
385 case ISD::VECREDUCE_OR:
386 return ISD::OR;
387 case ISD::VECREDUCE_XOR:
388 return ISD::XOR;
389 case ISD::VECREDUCE_SMAX:
390 return ISD::SMAX;
391 case ISD::VECREDUCE_SMIN:
392 return ISD::SMIN;
393 case ISD::VECREDUCE_UMAX:
394 return ISD::UMAX;
395 case ISD::VECREDUCE_UMIN:
396 return ISD::UMIN;
397 case ISD::VECREDUCE_FMAX:
398 return ISD::FMAXNUM;
399 case ISD::VECREDUCE_FMIN:
400 return ISD::FMINNUM;
401 }
402 }
403
isVPOpcode(unsigned Opcode)404 bool ISD::isVPOpcode(unsigned Opcode) {
405 switch (Opcode) {
406 default:
407 return false;
408 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
409 case ISD::SDOPC: \
410 return true;
411 #include "llvm/IR/VPIntrinsics.def"
412 }
413 }
414
isVPBinaryOp(unsigned Opcode)415 bool ISD::isVPBinaryOp(unsigned Opcode) {
416 switch (Opcode) {
417 default:
418 return false;
419 #define PROPERTY_VP_BINARYOP_SDNODE(SDOPC) \
420 case ISD::SDOPC: \
421 return true;
422 #include "llvm/IR/VPIntrinsics.def"
423 }
424 }
425
isVPReduction(unsigned Opcode)426 bool ISD::isVPReduction(unsigned Opcode) {
427 switch (Opcode) {
428 default:
429 return false;
430 #define PROPERTY_VP_REDUCTION_SDNODE(SDOPC) \
431 case ISD::SDOPC: \
432 return true;
433 #include "llvm/IR/VPIntrinsics.def"
434 }
435 }
436
437 /// The operand position of the vector mask.
getVPMaskIdx(unsigned Opcode)438 Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
439 switch (Opcode) {
440 default:
441 return None;
442 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...) \
443 case ISD::SDOPC: \
444 return MASKPOS;
445 #include "llvm/IR/VPIntrinsics.def"
446 }
447 }
448
449 /// The operand position of the explicit vector length parameter.
getVPExplicitVectorLengthIdx(unsigned Opcode)450 Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
451 switch (Opcode) {
452 default:
453 return None;
454 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
455 case ISD::SDOPC: \
456 return EVLPOS;
457 #include "llvm/IR/VPIntrinsics.def"
458 }
459 }
460
getExtForLoadExtType(bool IsFP,ISD::LoadExtType ExtType)461 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
462 switch (ExtType) {
463 case ISD::EXTLOAD:
464 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
465 case ISD::SEXTLOAD:
466 return ISD::SIGN_EXTEND;
467 case ISD::ZEXTLOAD:
468 return ISD::ZERO_EXTEND;
469 default:
470 break;
471 }
472
473 llvm_unreachable("Invalid LoadExtType");
474 }
475
getSetCCSwappedOperands(ISD::CondCode Operation)476 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
477 // To perform this operation, we just need to swap the L and G bits of the
478 // operation.
479 unsigned OldL = (Operation >> 2) & 1;
480 unsigned OldG = (Operation >> 1) & 1;
481 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
482 (OldL << 1) | // New G bit
483 (OldG << 2)); // New L bit.
484 }
485
getSetCCInverseImpl(ISD::CondCode Op,bool isIntegerLike)486 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
487 unsigned Operation = Op;
488 if (isIntegerLike)
489 Operation ^= 7; // Flip L, G, E bits, but not U.
490 else
491 Operation ^= 15; // Flip all of the condition bits.
492
493 if (Operation > ISD::SETTRUE2)
494 Operation &= ~8; // Don't let N and U bits get set.
495
496 return ISD::CondCode(Operation);
497 }
498
getSetCCInverse(ISD::CondCode Op,EVT Type)499 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
500 return getSetCCInverseImpl(Op, Type.isInteger());
501 }
502
getSetCCInverse(ISD::CondCode Op,bool isIntegerLike)503 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
504 bool isIntegerLike) {
505 return getSetCCInverseImpl(Op, isIntegerLike);
506 }
507
508 /// For an integer comparison, return 1 if the comparison is a signed operation
509 /// and 2 if the result is an unsigned comparison. Return zero if the operation
510 /// does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)511 static int isSignedOp(ISD::CondCode Opcode) {
512 switch (Opcode) {
513 default: llvm_unreachable("Illegal integer setcc operation!");
514 case ISD::SETEQ:
515 case ISD::SETNE: return 0;
516 case ISD::SETLT:
517 case ISD::SETLE:
518 case ISD::SETGT:
519 case ISD::SETGE: return 1;
520 case ISD::SETULT:
521 case ISD::SETULE:
522 case ISD::SETUGT:
523 case ISD::SETUGE: return 2;
524 }
525 }
526
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)527 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
528 EVT Type) {
529 bool IsInteger = Type.isInteger();
530 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
531 // Cannot fold a signed integer setcc with an unsigned integer setcc.
532 return ISD::SETCC_INVALID;
533
534 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
535
536 // If the N and U bits get set, then the resultant comparison DOES suddenly
537 // care about orderedness, and it is true when ordered.
538 if (Op > ISD::SETTRUE2)
539 Op &= ~16; // Clear the U bit if the N bit is set.
540
541 // Canonicalize illegal integer setcc's.
542 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
543 Op = ISD::SETNE;
544
545 return ISD::CondCode(Op);
546 }
547
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)548 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
549 EVT Type) {
550 bool IsInteger = Type.isInteger();
551 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
552 // Cannot fold a signed setcc with an unsigned setcc.
553 return ISD::SETCC_INVALID;
554
555 // Combine all of the condition bits.
556 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
557
558 // Canonicalize illegal integer setcc's.
559 if (IsInteger) {
560 switch (Result) {
561 default: break;
562 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
563 case ISD::SETOEQ: // SETEQ & SETU[LG]E
564 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
565 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
566 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
567 }
568 }
569
570 return Result;
571 }
572
573 //===----------------------------------------------------------------------===//
574 // SDNode Profile Support
575 //===----------------------------------------------------------------------===//
576
577 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)578 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
579 ID.AddInteger(OpC);
580 }
581
582 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
583 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)584 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
585 ID.AddPointer(VTList.VTs);
586 }
587
588 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)589 static void AddNodeIDOperands(FoldingSetNodeID &ID,
590 ArrayRef<SDValue> Ops) {
591 for (auto& Op : Ops) {
592 ID.AddPointer(Op.getNode());
593 ID.AddInteger(Op.getResNo());
594 }
595 }
596
597 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)598 static void AddNodeIDOperands(FoldingSetNodeID &ID,
599 ArrayRef<SDUse> Ops) {
600 for (auto& Op : Ops) {
601 ID.AddPointer(Op.getNode());
602 ID.AddInteger(Op.getResNo());
603 }
604 }
605
AddNodeIDNode(FoldingSetNodeID & ID,unsigned short OpC,SDVTList VTList,ArrayRef<SDValue> OpList)606 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
607 SDVTList VTList, ArrayRef<SDValue> OpList) {
608 AddNodeIDOpcode(ID, OpC);
609 AddNodeIDValueTypes(ID, VTList);
610 AddNodeIDOperands(ID, OpList);
611 }
612
613 /// If this is an SDNode with special info, add this info to the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)614 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
615 switch (N->getOpcode()) {
616 case ISD::TargetExternalSymbol:
617 case ISD::ExternalSymbol:
618 case ISD::MCSymbol:
619 llvm_unreachable("Should only be used on nodes with operands");
620 default: break; // Normal nodes don't need extra info.
621 case ISD::TargetConstant:
622 case ISD::Constant: {
623 const ConstantSDNode *C = cast<ConstantSDNode>(N);
624 ID.AddPointer(C->getConstantIntValue());
625 ID.AddBoolean(C->isOpaque());
626 break;
627 }
628 case ISD::TargetConstantFP:
629 case ISD::ConstantFP:
630 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
631 break;
632 case ISD::TargetGlobalAddress:
633 case ISD::GlobalAddress:
634 case ISD::TargetGlobalTLSAddress:
635 case ISD::GlobalTLSAddress: {
636 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
637 ID.AddPointer(GA->getGlobal());
638 ID.AddInteger(GA->getOffset());
639 ID.AddInteger(GA->getTargetFlags());
640 break;
641 }
642 case ISD::BasicBlock:
643 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
644 break;
645 case ISD::Register:
646 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
647 break;
648 case ISD::RegisterMask:
649 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
650 break;
651 case ISD::SRCVALUE:
652 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
653 break;
654 case ISD::FrameIndex:
655 case ISD::TargetFrameIndex:
656 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
657 break;
658 case ISD::LIFETIME_START:
659 case ISD::LIFETIME_END:
660 if (cast<LifetimeSDNode>(N)->hasOffset()) {
661 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
662 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
663 }
664 break;
665 case ISD::PSEUDO_PROBE:
666 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
667 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
668 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
669 break;
670 case ISD::JumpTable:
671 case ISD::TargetJumpTable:
672 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
673 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
674 break;
675 case ISD::ConstantPool:
676 case ISD::TargetConstantPool: {
677 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
678 ID.AddInteger(CP->getAlign().value());
679 ID.AddInteger(CP->getOffset());
680 if (CP->isMachineConstantPoolEntry())
681 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
682 else
683 ID.AddPointer(CP->getConstVal());
684 ID.AddInteger(CP->getTargetFlags());
685 break;
686 }
687 case ISD::TargetIndex: {
688 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
689 ID.AddInteger(TI->getIndex());
690 ID.AddInteger(TI->getOffset());
691 ID.AddInteger(TI->getTargetFlags());
692 break;
693 }
694 case ISD::LOAD: {
695 const LoadSDNode *LD = cast<LoadSDNode>(N);
696 ID.AddInteger(LD->getMemoryVT().getRawBits());
697 ID.AddInteger(LD->getRawSubclassData());
698 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
699 break;
700 }
701 case ISD::STORE: {
702 const StoreSDNode *ST = cast<StoreSDNode>(N);
703 ID.AddInteger(ST->getMemoryVT().getRawBits());
704 ID.AddInteger(ST->getRawSubclassData());
705 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
706 break;
707 }
708 case ISD::VP_LOAD: {
709 const VPLoadSDNode *ELD = cast<VPLoadSDNode>(N);
710 ID.AddInteger(ELD->getMemoryVT().getRawBits());
711 ID.AddInteger(ELD->getRawSubclassData());
712 ID.AddInteger(ELD->getPointerInfo().getAddrSpace());
713 break;
714 }
715 case ISD::VP_STORE: {
716 const VPStoreSDNode *EST = cast<VPStoreSDNode>(N);
717 ID.AddInteger(EST->getMemoryVT().getRawBits());
718 ID.AddInteger(EST->getRawSubclassData());
719 ID.AddInteger(EST->getPointerInfo().getAddrSpace());
720 break;
721 }
722 case ISD::VP_GATHER: {
723 const VPGatherSDNode *EG = cast<VPGatherSDNode>(N);
724 ID.AddInteger(EG->getMemoryVT().getRawBits());
725 ID.AddInteger(EG->getRawSubclassData());
726 ID.AddInteger(EG->getPointerInfo().getAddrSpace());
727 break;
728 }
729 case ISD::VP_SCATTER: {
730 const VPScatterSDNode *ES = cast<VPScatterSDNode>(N);
731 ID.AddInteger(ES->getMemoryVT().getRawBits());
732 ID.AddInteger(ES->getRawSubclassData());
733 ID.AddInteger(ES->getPointerInfo().getAddrSpace());
734 break;
735 }
736 case ISD::MLOAD: {
737 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
738 ID.AddInteger(MLD->getMemoryVT().getRawBits());
739 ID.AddInteger(MLD->getRawSubclassData());
740 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
741 break;
742 }
743 case ISD::MSTORE: {
744 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
745 ID.AddInteger(MST->getMemoryVT().getRawBits());
746 ID.AddInteger(MST->getRawSubclassData());
747 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
748 break;
749 }
750 case ISD::MGATHER: {
751 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
752 ID.AddInteger(MG->getMemoryVT().getRawBits());
753 ID.AddInteger(MG->getRawSubclassData());
754 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
755 break;
756 }
757 case ISD::MSCATTER: {
758 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
759 ID.AddInteger(MS->getMemoryVT().getRawBits());
760 ID.AddInteger(MS->getRawSubclassData());
761 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
762 break;
763 }
764 case ISD::ATOMIC_CMP_SWAP:
765 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
766 case ISD::ATOMIC_SWAP:
767 case ISD::ATOMIC_LOAD_ADD:
768 case ISD::ATOMIC_LOAD_SUB:
769 case ISD::ATOMIC_LOAD_AND:
770 case ISD::ATOMIC_LOAD_CLR:
771 case ISD::ATOMIC_LOAD_OR:
772 case ISD::ATOMIC_LOAD_XOR:
773 case ISD::ATOMIC_LOAD_NAND:
774 case ISD::ATOMIC_LOAD_MIN:
775 case ISD::ATOMIC_LOAD_MAX:
776 case ISD::ATOMIC_LOAD_UMIN:
777 case ISD::ATOMIC_LOAD_UMAX:
778 case ISD::ATOMIC_LOAD:
779 case ISD::ATOMIC_STORE: {
780 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
781 ID.AddInteger(AT->getMemoryVT().getRawBits());
782 ID.AddInteger(AT->getRawSubclassData());
783 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
784 break;
785 }
786 case ISD::PREFETCH: {
787 const MemSDNode *PF = cast<MemSDNode>(N);
788 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
789 break;
790 }
791 case ISD::VECTOR_SHUFFLE: {
792 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
793 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
794 i != e; ++i)
795 ID.AddInteger(SVN->getMaskElt(i));
796 break;
797 }
798 case ISD::TargetBlockAddress:
799 case ISD::BlockAddress: {
800 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
801 ID.AddPointer(BA->getBlockAddress());
802 ID.AddInteger(BA->getOffset());
803 ID.AddInteger(BA->getTargetFlags());
804 break;
805 }
806 } // end switch (N->getOpcode())
807
808 // Target specific memory nodes could also have address spaces to check.
809 if (N->isTargetMemoryOpcode())
810 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
811 }
812
813 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
814 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)815 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
816 AddNodeIDOpcode(ID, N->getOpcode());
817 // Add the return value info.
818 AddNodeIDValueTypes(ID, N->getVTList());
819 // Add the operand info.
820 AddNodeIDOperands(ID, N->ops());
821
822 // Handle SDNode leafs with special info.
823 AddNodeIDCustom(ID, N);
824 }
825
826 //===----------------------------------------------------------------------===//
827 // SelectionDAG Class
828 //===----------------------------------------------------------------------===//
829
830 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)831 static bool doNotCSE(SDNode *N) {
832 if (N->getValueType(0) == MVT::Glue)
833 return true; // Never CSE anything that produces a flag.
834
835 switch (N->getOpcode()) {
836 default: break;
837 case ISD::HANDLENODE:
838 case ISD::EH_LABEL:
839 return true; // Never CSE these nodes.
840 }
841
842 // Check that remaining values produced are not flags.
843 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
844 if (N->getValueType(i) == MVT::Glue)
845 return true; // Never CSE anything that produces a flag.
846
847 return false;
848 }
849
850 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
851 /// SelectionDAG.
RemoveDeadNodes()852 void SelectionDAG::RemoveDeadNodes() {
853 // Create a dummy node (which is not added to allnodes), that adds a reference
854 // to the root node, preventing it from being deleted.
855 HandleSDNode Dummy(getRoot());
856
857 SmallVector<SDNode*, 128> DeadNodes;
858
859 // Add all obviously-dead nodes to the DeadNodes worklist.
860 for (SDNode &Node : allnodes())
861 if (Node.use_empty())
862 DeadNodes.push_back(&Node);
863
864 RemoveDeadNodes(DeadNodes);
865
866 // If the root changed (e.g. it was a dead load, update the root).
867 setRoot(Dummy.getValue());
868 }
869
870 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
871 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)872 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
873
874 // Process the worklist, deleting the nodes and adding their uses to the
875 // worklist.
876 while (!DeadNodes.empty()) {
877 SDNode *N = DeadNodes.pop_back_val();
878 // Skip to next node if we've already managed to delete the node. This could
879 // happen if replacing a node causes a node previously added to the node to
880 // be deleted.
881 if (N->getOpcode() == ISD::DELETED_NODE)
882 continue;
883
884 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
885 DUL->NodeDeleted(N, nullptr);
886
887 // Take the node out of the appropriate CSE map.
888 RemoveNodeFromCSEMaps(N);
889
890 // Next, brutally remove the operand list. This is safe to do, as there are
891 // no cycles in the graph.
892 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
893 SDUse &Use = *I++;
894 SDNode *Operand = Use.getNode();
895 Use.set(SDValue());
896
897 // Now that we removed this operand, see if there are no uses of it left.
898 if (Operand->use_empty())
899 DeadNodes.push_back(Operand);
900 }
901
902 DeallocateNode(N);
903 }
904 }
905
RemoveDeadNode(SDNode * N)906 void SelectionDAG::RemoveDeadNode(SDNode *N){
907 SmallVector<SDNode*, 16> DeadNodes(1, N);
908
909 // Create a dummy node that adds a reference to the root node, preventing
910 // it from being deleted. (This matters if the root is an operand of the
911 // dead node.)
912 HandleSDNode Dummy(getRoot());
913
914 RemoveDeadNodes(DeadNodes);
915 }
916
DeleteNode(SDNode * N)917 void SelectionDAG::DeleteNode(SDNode *N) {
918 // First take this out of the appropriate CSE map.
919 RemoveNodeFromCSEMaps(N);
920
921 // Finally, remove uses due to operands of this node, remove from the
922 // AllNodes list, and delete the node.
923 DeleteNodeNotInCSEMaps(N);
924 }
925
DeleteNodeNotInCSEMaps(SDNode * N)926 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
927 assert(N->getIterator() != AllNodes.begin() &&
928 "Cannot delete the entry node!");
929 assert(N->use_empty() && "Cannot delete a node that is not dead!");
930
931 // Drop all of the operands and decrement used node's use counts.
932 N->DropOperands();
933
934 DeallocateNode(N);
935 }
936
add(SDDbgValue * V,bool isParameter)937 void SDDbgInfo::add(SDDbgValue *V, bool isParameter) {
938 assert(!(V->isVariadic() && isParameter));
939 if (isParameter)
940 ByvalParmDbgValues.push_back(V);
941 else
942 DbgValues.push_back(V);
943 for (const SDNode *Node : V->getSDNodes())
944 if (Node)
945 DbgValMap[Node].push_back(V);
946 }
947
erase(const SDNode * Node)948 void SDDbgInfo::erase(const SDNode *Node) {
949 DbgValMapType::iterator I = DbgValMap.find(Node);
950 if (I == DbgValMap.end())
951 return;
952 for (auto &Val: I->second)
953 Val->setIsInvalidated();
954 DbgValMap.erase(I);
955 }
956
DeallocateNode(SDNode * N)957 void SelectionDAG::DeallocateNode(SDNode *N) {
958 // If we have operands, deallocate them.
959 removeOperands(N);
960
961 NodeAllocator.Deallocate(AllNodes.remove(N));
962
963 // Set the opcode to DELETED_NODE to help catch bugs when node
964 // memory is reallocated.
965 // FIXME: There are places in SDag that have grown a dependency on the opcode
966 // value in the released node.
967 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
968 N->NodeType = ISD::DELETED_NODE;
969
970 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
971 // them and forget about that node.
972 DbgInfo->erase(N);
973 }
974
975 #ifndef NDEBUG
976 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
VerifySDNode(SDNode * N)977 static void VerifySDNode(SDNode *N) {
978 switch (N->getOpcode()) {
979 default:
980 break;
981 case ISD::BUILD_PAIR: {
982 EVT VT = N->getValueType(0);
983 assert(N->getNumValues() == 1 && "Too many results!");
984 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
985 "Wrong return type!");
986 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
987 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
988 "Mismatched operand types!");
989 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
990 "Wrong operand type!");
991 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
992 "Wrong return type size");
993 break;
994 }
995 case ISD::BUILD_VECTOR: {
996 assert(N->getNumValues() == 1 && "Too many results!");
997 assert(N->getValueType(0).isVector() && "Wrong return type!");
998 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
999 "Wrong number of operands!");
1000 EVT EltVT = N->getValueType(0).getVectorElementType();
1001 for (const SDUse &Op : N->ops()) {
1002 assert((Op.getValueType() == EltVT ||
1003 (EltVT.isInteger() && Op.getValueType().isInteger() &&
1004 EltVT.bitsLE(Op.getValueType()))) &&
1005 "Wrong operand type!");
1006 assert(Op.getValueType() == N->getOperand(0).getValueType() &&
1007 "Operands must all have the same type");
1008 }
1009 break;
1010 }
1011 }
1012 }
1013 #endif // NDEBUG
1014
1015 /// Insert a newly allocated node into the DAG.
1016 ///
1017 /// Handles insertion into the all nodes list and CSE map, as well as
1018 /// verification and other common operations when a new node is allocated.
InsertNode(SDNode * N)1019 void SelectionDAG::InsertNode(SDNode *N) {
1020 AllNodes.push_back(N);
1021 #ifndef NDEBUG
1022 N->PersistentId = NextPersistentId++;
1023 VerifySDNode(N);
1024 #endif
1025 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1026 DUL->NodeInserted(N);
1027 }
1028
1029 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
1030 /// correspond to it. This is useful when we're about to delete or repurpose
1031 /// the node. We don't want future request for structurally identical nodes
1032 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)1033 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
1034 bool Erased = false;
1035 switch (N->getOpcode()) {
1036 case ISD::HANDLENODE: return false; // noop.
1037 case ISD::CONDCODE:
1038 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
1039 "Cond code doesn't exist!");
1040 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
1041 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
1042 break;
1043 case ISD::ExternalSymbol:
1044 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
1045 break;
1046 case ISD::TargetExternalSymbol: {
1047 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
1048 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
1049 ESN->getSymbol(), ESN->getTargetFlags()));
1050 break;
1051 }
1052 case ISD::MCSymbol: {
1053 auto *MCSN = cast<MCSymbolSDNode>(N);
1054 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1055 break;
1056 }
1057 case ISD::VALUETYPE: {
1058 EVT VT = cast<VTSDNode>(N)->getVT();
1059 if (VT.isExtended()) {
1060 Erased = ExtendedValueTypeNodes.erase(VT);
1061 } else {
1062 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
1063 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
1064 }
1065 break;
1066 }
1067 default:
1068 // Remove it from the CSE Map.
1069 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
1070 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
1071 Erased = CSEMap.RemoveNode(N);
1072 break;
1073 }
1074 #ifndef NDEBUG
1075 // Verify that the node was actually in one of the CSE maps, unless it has a
1076 // flag result (which cannot be CSE'd) or is one of the special cases that are
1077 // not subject to CSE.
1078 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1079 !N->isMachineOpcode() && !doNotCSE(N)) {
1080 N->dump(this);
1081 dbgs() << "\n";
1082 llvm_unreachable("Node is not in map!");
1083 }
1084 #endif
1085 return Erased;
1086 }
1087
1088 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1089 /// maps and modified in place. Add it back to the CSE maps, unless an identical
1090 /// node already exists, in which case transfer all its users to the existing
1091 /// node. This transfer can potentially trigger recursive merging.
1092 void
AddModifiedNodeToCSEMaps(SDNode * N)1093 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1094 // For node types that aren't CSE'd, just act as if no identical node
1095 // already exists.
1096 if (!doNotCSE(N)) {
1097 SDNode *Existing = CSEMap.GetOrInsertNode(N);
1098 if (Existing != N) {
1099 // If there was already an existing matching node, use ReplaceAllUsesWith
1100 // to replace the dead one with the existing one. This can cause
1101 // recursive merging of other unrelated nodes down the line.
1102 ReplaceAllUsesWith(N, Existing);
1103
1104 // N is now dead. Inform the listeners and delete it.
1105 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1106 DUL->NodeDeleted(N, Existing);
1107 DeleteNodeNotInCSEMaps(N);
1108 return;
1109 }
1110 }
1111
1112 // If the node doesn't already exist, we updated it. Inform listeners.
1113 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1114 DUL->NodeUpdated(N);
1115 }
1116
1117 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1118 /// were replaced with those specified. If this node is never memoized,
1119 /// return null, otherwise return a pointer to the slot it would take. If a
1120 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)1121 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1122 void *&InsertPos) {
1123 if (doNotCSE(N))
1124 return nullptr;
1125
1126 SDValue Ops[] = { Op };
1127 FoldingSetNodeID ID;
1128 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1129 AddNodeIDCustom(ID, N);
1130 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1131 if (Node)
1132 Node->intersectFlagsWith(N->getFlags());
1133 return Node;
1134 }
1135
1136 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1137 /// were replaced with those specified. If this node is never memoized,
1138 /// return null, otherwise return a pointer to the slot it would take. If a
1139 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)1140 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1141 SDValue Op1, SDValue Op2,
1142 void *&InsertPos) {
1143 if (doNotCSE(N))
1144 return nullptr;
1145
1146 SDValue Ops[] = { Op1, Op2 };
1147 FoldingSetNodeID ID;
1148 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1149 AddNodeIDCustom(ID, N);
1150 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1151 if (Node)
1152 Node->intersectFlagsWith(N->getFlags());
1153 return Node;
1154 }
1155
1156 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1157 /// were replaced with those specified. If this node is never memoized,
1158 /// return null, otherwise return a pointer to the slot it would take. If a
1159 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)1160 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1161 void *&InsertPos) {
1162 if (doNotCSE(N))
1163 return nullptr;
1164
1165 FoldingSetNodeID ID;
1166 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1167 AddNodeIDCustom(ID, N);
1168 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1169 if (Node)
1170 Node->intersectFlagsWith(N->getFlags());
1171 return Node;
1172 }
1173
getEVTAlign(EVT VT) const1174 Align SelectionDAG::getEVTAlign(EVT VT) const {
1175 Type *Ty = VT == MVT::iPTR ?
1176 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
1177 VT.getTypeForEVT(*getContext());
1178
1179 return getDataLayout().getABITypeAlign(Ty);
1180 }
1181
1182 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOpt::Level OL)1183 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
1184 : TM(tm), OptLevel(OL),
1185 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1186 Root(getEntryNode()) {
1187 InsertNode(&EntryNode);
1188 DbgInfo = new SDDbgInfo();
1189 }
1190
init(MachineFunction & NewMF,OptimizationRemarkEmitter & NewORE,Pass * PassPtr,const TargetLibraryInfo * LibraryInfo,LegacyDivergenceAnalysis * Divergence,ProfileSummaryInfo * PSIin,BlockFrequencyInfo * BFIin)1191 void SelectionDAG::init(MachineFunction &NewMF,
1192 OptimizationRemarkEmitter &NewORE,
1193 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1194 LegacyDivergenceAnalysis * Divergence,
1195 ProfileSummaryInfo *PSIin,
1196 BlockFrequencyInfo *BFIin) {
1197 MF = &NewMF;
1198 SDAGISelPass = PassPtr;
1199 ORE = &NewORE;
1200 TLI = getSubtarget().getTargetLowering();
1201 TSI = getSubtarget().getSelectionDAGInfo();
1202 LibInfo = LibraryInfo;
1203 Context = &MF->getFunction().getContext();
1204 DA = Divergence;
1205 PSI = PSIin;
1206 BFI = BFIin;
1207 }
1208
~SelectionDAG()1209 SelectionDAG::~SelectionDAG() {
1210 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1211 allnodes_clear();
1212 OperandRecycler.clear(OperandAllocator);
1213 delete DbgInfo;
1214 }
1215
shouldOptForSize() const1216 bool SelectionDAG::shouldOptForSize() const {
1217 return MF->getFunction().hasOptSize() ||
1218 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1219 }
1220
allnodes_clear()1221 void SelectionDAG::allnodes_clear() {
1222 assert(&*AllNodes.begin() == &EntryNode);
1223 AllNodes.remove(AllNodes.begin());
1224 while (!AllNodes.empty())
1225 DeallocateNode(&AllNodes.front());
1226 #ifndef NDEBUG
1227 NextPersistentId = 0;
1228 #endif
1229 }
1230
FindNodeOrInsertPos(const FoldingSetNodeID & ID,void * & InsertPos)1231 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1232 void *&InsertPos) {
1233 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1234 if (N) {
1235 switch (N->getOpcode()) {
1236 default: break;
1237 case ISD::Constant:
1238 case ISD::ConstantFP:
1239 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1240 "debug location. Use another overload.");
1241 }
1242 }
1243 return N;
1244 }
1245
FindNodeOrInsertPos(const FoldingSetNodeID & ID,const SDLoc & DL,void * & InsertPos)1246 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1247 const SDLoc &DL, void *&InsertPos) {
1248 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1249 if (N) {
1250 switch (N->getOpcode()) {
1251 case ISD::Constant:
1252 case ISD::ConstantFP:
1253 // Erase debug location from the node if the node is used at several
1254 // different places. Do not propagate one location to all uses as it
1255 // will cause a worse single stepping debugging experience.
1256 if (N->getDebugLoc() != DL.getDebugLoc())
1257 N->setDebugLoc(DebugLoc());
1258 break;
1259 default:
1260 // When the node's point of use is located earlier in the instruction
1261 // sequence than its prior point of use, update its debug info to the
1262 // earlier location.
1263 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1264 N->setDebugLoc(DL.getDebugLoc());
1265 break;
1266 }
1267 }
1268 return N;
1269 }
1270
clear()1271 void SelectionDAG::clear() {
1272 allnodes_clear();
1273 OperandRecycler.clear(OperandAllocator);
1274 OperandAllocator.Reset();
1275 CSEMap.clear();
1276
1277 ExtendedValueTypeNodes.clear();
1278 ExternalSymbols.clear();
1279 TargetExternalSymbols.clear();
1280 MCSymbols.clear();
1281 SDCallSiteDbgInfo.clear();
1282 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1283 static_cast<CondCodeSDNode*>(nullptr));
1284 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1285 static_cast<SDNode*>(nullptr));
1286
1287 EntryNode.UseList = nullptr;
1288 InsertNode(&EntryNode);
1289 Root = getEntryNode();
1290 DbgInfo->clear();
1291 }
1292
getFPExtendOrRound(SDValue Op,const SDLoc & DL,EVT VT)1293 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1294 return VT.bitsGT(Op.getValueType())
1295 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1296 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1297 }
1298
1299 std::pair<SDValue, SDValue>
getStrictFPExtendOrRound(SDValue Op,SDValue Chain,const SDLoc & DL,EVT VT)1300 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1301 const SDLoc &DL, EVT VT) {
1302 assert(!VT.bitsEq(Op.getValueType()) &&
1303 "Strict no-op FP extend/round not allowed.");
1304 SDValue Res =
1305 VT.bitsGT(Op.getValueType())
1306 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1307 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1308 {Chain, Op, getIntPtrConstant(0, DL)});
1309
1310 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1311 }
1312
getAnyExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1313 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1314 return VT.bitsGT(Op.getValueType()) ?
1315 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1316 getNode(ISD::TRUNCATE, DL, VT, Op);
1317 }
1318
getSExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1319 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1320 return VT.bitsGT(Op.getValueType()) ?
1321 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1322 getNode(ISD::TRUNCATE, DL, VT, Op);
1323 }
1324
getZExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1325 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1326 return VT.bitsGT(Op.getValueType()) ?
1327 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1328 getNode(ISD::TRUNCATE, DL, VT, Op);
1329 }
1330
getBoolExtOrTrunc(SDValue Op,const SDLoc & SL,EVT VT,EVT OpVT)1331 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1332 EVT OpVT) {
1333 if (VT.bitsLE(Op.getValueType()))
1334 return getNode(ISD::TRUNCATE, SL, VT, Op);
1335
1336 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1337 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1338 }
1339
getZeroExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1340 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1341 EVT OpVT = Op.getValueType();
1342 assert(VT.isInteger() && OpVT.isInteger() &&
1343 "Cannot getZeroExtendInReg FP types");
1344 assert(VT.isVector() == OpVT.isVector() &&
1345 "getZeroExtendInReg type should be vector iff the operand "
1346 "type is vector!");
1347 assert((!VT.isVector() ||
1348 VT.getVectorElementCount() == OpVT.getVectorElementCount()) &&
1349 "Vector element counts must match in getZeroExtendInReg");
1350 assert(VT.bitsLE(OpVT) && "Not extending!");
1351 if (OpVT == VT)
1352 return Op;
1353 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
1354 VT.getScalarSizeInBits());
1355 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1356 }
1357
getPtrExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1358 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1359 // Only unsigned pointer semantics are supported right now. In the future this
1360 // might delegate to TLI to check pointer signedness.
1361 return getZExtOrTrunc(Op, DL, VT);
1362 }
1363
getPtrExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1364 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1365 // Only unsigned pointer semantics are supported right now. In the future this
1366 // might delegate to TLI to check pointer signedness.
1367 return getZeroExtendInReg(Op, DL, VT);
1368 }
1369
1370 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
getNOT(const SDLoc & DL,SDValue Val,EVT VT)1371 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1372 return getNode(ISD::XOR, DL, VT, Val, getAllOnesConstant(DL, VT));
1373 }
1374
getLogicalNOT(const SDLoc & DL,SDValue Val,EVT VT)1375 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1376 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1377 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1378 }
1379
getBoolConstant(bool V,const SDLoc & DL,EVT VT,EVT OpVT)1380 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1381 EVT OpVT) {
1382 if (!V)
1383 return getConstant(0, DL, VT);
1384
1385 switch (TLI->getBooleanContents(OpVT)) {
1386 case TargetLowering::ZeroOrOneBooleanContent:
1387 case TargetLowering::UndefinedBooleanContent:
1388 return getConstant(1, DL, VT);
1389 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1390 return getAllOnesConstant(DL, VT);
1391 }
1392 llvm_unreachable("Unexpected boolean content enum!");
1393 }
1394
getConstant(uint64_t Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1395 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1396 bool isT, bool isO) {
1397 EVT EltVT = VT.getScalarType();
1398 assert((EltVT.getSizeInBits() >= 64 ||
1399 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1400 "getConstant with a uint64_t value that doesn't fit in the type!");
1401 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1402 }
1403
getConstant(const APInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1404 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1405 bool isT, bool isO) {
1406 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1407 }
1408
getConstant(const ConstantInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1409 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1410 EVT VT, bool isT, bool isO) {
1411 assert(VT.isInteger() && "Cannot create FP integer constant!");
1412
1413 EVT EltVT = VT.getScalarType();
1414 const ConstantInt *Elt = &Val;
1415
1416 // In some cases the vector type is legal but the element type is illegal and
1417 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1418 // inserted value (the type does not need to match the vector element type).
1419 // Any extra bits introduced will be truncated away.
1420 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1421 TargetLowering::TypePromoteInteger) {
1422 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1423 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1424 Elt = ConstantInt::get(*getContext(), NewVal);
1425 }
1426 // In other cases the element type is illegal and needs to be expanded, for
1427 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1428 // the value into n parts and use a vector type with n-times the elements.
1429 // Then bitcast to the type requested.
1430 // Legalizing constants too early makes the DAGCombiner's job harder so we
1431 // only legalize if the DAG tells us we must produce legal types.
1432 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1433 TLI->getTypeAction(*getContext(), EltVT) ==
1434 TargetLowering::TypeExpandInteger) {
1435 const APInt &NewVal = Elt->getValue();
1436 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1437 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1438
1439 // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node.
1440 if (VT.isScalableVector()) {
1441 assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 &&
1442 "Can only handle an even split!");
1443 unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits;
1444
1445 SmallVector<SDValue, 2> ScalarParts;
1446 for (unsigned i = 0; i != Parts; ++i)
1447 ScalarParts.push_back(getConstant(
1448 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1449 ViaEltVT, isT, isO));
1450
1451 return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts);
1452 }
1453
1454 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1455 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1456
1457 // Check the temporary vector is the correct size. If this fails then
1458 // getTypeToTransformTo() probably returned a type whose size (in bits)
1459 // isn't a power-of-2 factor of the requested type size.
1460 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1461
1462 SmallVector<SDValue, 2> EltParts;
1463 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i)
1464 EltParts.push_back(getConstant(
1465 NewVal.extractBits(ViaEltSizeInBits, i * ViaEltSizeInBits), DL,
1466 ViaEltVT, isT, isO));
1467
1468 // EltParts is currently in little endian order. If we actually want
1469 // big-endian order then reverse it now.
1470 if (getDataLayout().isBigEndian())
1471 std::reverse(EltParts.begin(), EltParts.end());
1472
1473 // The elements must be reversed when the element order is different
1474 // to the endianness of the elements (because the BITCAST is itself a
1475 // vector shuffle in this situation). However, we do not need any code to
1476 // perform this reversal because getConstant() is producing a vector
1477 // splat.
1478 // This situation occurs in MIPS MSA.
1479
1480 SmallVector<SDValue, 8> Ops;
1481 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1482 llvm::append_range(Ops, EltParts);
1483
1484 SDValue V =
1485 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1486 return V;
1487 }
1488
1489 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1490 "APInt size does not match type size!");
1491 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1492 FoldingSetNodeID ID;
1493 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1494 ID.AddPointer(Elt);
1495 ID.AddBoolean(isO);
1496 void *IP = nullptr;
1497 SDNode *N = nullptr;
1498 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1499 if (!VT.isVector())
1500 return SDValue(N, 0);
1501
1502 if (!N) {
1503 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1504 CSEMap.InsertNode(N, IP);
1505 InsertNode(N);
1506 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1507 }
1508
1509 SDValue Result(N, 0);
1510 if (VT.isScalableVector())
1511 Result = getSplatVector(VT, DL, Result);
1512 else if (VT.isVector())
1513 Result = getSplatBuildVector(VT, DL, Result);
1514
1515 return Result;
1516 }
1517
getIntPtrConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1518 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1519 bool isTarget) {
1520 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1521 }
1522
getShiftAmountConstant(uint64_t Val,EVT VT,const SDLoc & DL,bool LegalTypes)1523 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1524 const SDLoc &DL, bool LegalTypes) {
1525 assert(VT.isInteger() && "Shift amount is not an integer type!");
1526 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1527 return getConstant(Val, DL, ShiftVT);
1528 }
1529
getVectorIdxConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1530 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
1531 bool isTarget) {
1532 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1533 }
1534
getConstantFP(const APFloat & V,const SDLoc & DL,EVT VT,bool isTarget)1535 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1536 bool isTarget) {
1537 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1538 }
1539
getConstantFP(const ConstantFP & V,const SDLoc & DL,EVT VT,bool isTarget)1540 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1541 EVT VT, bool isTarget) {
1542 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1543
1544 EVT EltVT = VT.getScalarType();
1545
1546 // Do the map lookup using the actual bit pattern for the floating point
1547 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1548 // we don't have issues with SNANs.
1549 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1550 FoldingSetNodeID ID;
1551 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1552 ID.AddPointer(&V);
1553 void *IP = nullptr;
1554 SDNode *N = nullptr;
1555 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1556 if (!VT.isVector())
1557 return SDValue(N, 0);
1558
1559 if (!N) {
1560 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1561 CSEMap.InsertNode(N, IP);
1562 InsertNode(N);
1563 }
1564
1565 SDValue Result(N, 0);
1566 if (VT.isScalableVector())
1567 Result = getSplatVector(VT, DL, Result);
1568 else if (VT.isVector())
1569 Result = getSplatBuildVector(VT, DL, Result);
1570 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1571 return Result;
1572 }
1573
getConstantFP(double Val,const SDLoc & DL,EVT VT,bool isTarget)1574 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1575 bool isTarget) {
1576 EVT EltVT = VT.getScalarType();
1577 if (EltVT == MVT::f32)
1578 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1579 if (EltVT == MVT::f64)
1580 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1581 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1582 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1583 bool Ignored;
1584 APFloat APF = APFloat(Val);
1585 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1586 &Ignored);
1587 return getConstantFP(APF, DL, VT, isTarget);
1588 }
1589 llvm_unreachable("Unsupported type in getConstantFP");
1590 }
1591
getGlobalAddress(const GlobalValue * GV,const SDLoc & DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned TargetFlags)1592 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1593 EVT VT, int64_t Offset, bool isTargetGA,
1594 unsigned TargetFlags) {
1595 assert((TargetFlags == 0 || isTargetGA) &&
1596 "Cannot set target flags on target-independent globals");
1597
1598 // Truncate (with sign-extension) the offset value to the pointer size.
1599 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1600 if (BitWidth < 64)
1601 Offset = SignExtend64(Offset, BitWidth);
1602
1603 unsigned Opc;
1604 if (GV->isThreadLocal())
1605 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1606 else
1607 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1608
1609 FoldingSetNodeID ID;
1610 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1611 ID.AddPointer(GV);
1612 ID.AddInteger(Offset);
1613 ID.AddInteger(TargetFlags);
1614 void *IP = nullptr;
1615 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1616 return SDValue(E, 0);
1617
1618 auto *N = newSDNode<GlobalAddressSDNode>(
1619 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1620 CSEMap.InsertNode(N, IP);
1621 InsertNode(N);
1622 return SDValue(N, 0);
1623 }
1624
getFrameIndex(int FI,EVT VT,bool isTarget)1625 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1626 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1627 FoldingSetNodeID ID;
1628 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1629 ID.AddInteger(FI);
1630 void *IP = nullptr;
1631 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1632 return SDValue(E, 0);
1633
1634 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1635 CSEMap.InsertNode(N, IP);
1636 InsertNode(N);
1637 return SDValue(N, 0);
1638 }
1639
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned TargetFlags)1640 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1641 unsigned TargetFlags) {
1642 assert((TargetFlags == 0 || isTarget) &&
1643 "Cannot set target flags on target-independent jump tables");
1644 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1645 FoldingSetNodeID ID;
1646 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1647 ID.AddInteger(JTI);
1648 ID.AddInteger(TargetFlags);
1649 void *IP = nullptr;
1650 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1651 return SDValue(E, 0);
1652
1653 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1654 CSEMap.InsertNode(N, IP);
1655 InsertNode(N);
1656 return SDValue(N, 0);
1657 }
1658
getConstantPool(const Constant * C,EVT VT,MaybeAlign Alignment,int Offset,bool isTarget,unsigned TargetFlags)1659 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1660 MaybeAlign Alignment, int Offset,
1661 bool isTarget, unsigned TargetFlags) {
1662 assert((TargetFlags == 0 || isTarget) &&
1663 "Cannot set target flags on target-independent globals");
1664 if (!Alignment)
1665 Alignment = shouldOptForSize()
1666 ? getDataLayout().getABITypeAlign(C->getType())
1667 : getDataLayout().getPrefTypeAlign(C->getType());
1668 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1669 FoldingSetNodeID ID;
1670 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1671 ID.AddInteger(Alignment->value());
1672 ID.AddInteger(Offset);
1673 ID.AddPointer(C);
1674 ID.AddInteger(TargetFlags);
1675 void *IP = nullptr;
1676 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1677 return SDValue(E, 0);
1678
1679 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1680 TargetFlags);
1681 CSEMap.InsertNode(N, IP);
1682 InsertNode(N);
1683 SDValue V = SDValue(N, 0);
1684 NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
1685 return V;
1686 }
1687
getConstantPool(MachineConstantPoolValue * C,EVT VT,MaybeAlign Alignment,int Offset,bool isTarget,unsigned TargetFlags)1688 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1689 MaybeAlign Alignment, int Offset,
1690 bool isTarget, unsigned TargetFlags) {
1691 assert((TargetFlags == 0 || isTarget) &&
1692 "Cannot set target flags on target-independent globals");
1693 if (!Alignment)
1694 Alignment = getDataLayout().getPrefTypeAlign(C->getType());
1695 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1696 FoldingSetNodeID ID;
1697 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1698 ID.AddInteger(Alignment->value());
1699 ID.AddInteger(Offset);
1700 C->addSelectionDAGCSEId(ID);
1701 ID.AddInteger(TargetFlags);
1702 void *IP = nullptr;
1703 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1704 return SDValue(E, 0);
1705
1706 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1707 TargetFlags);
1708 CSEMap.InsertNode(N, IP);
1709 InsertNode(N);
1710 return SDValue(N, 0);
1711 }
1712
getTargetIndex(int Index,EVT VT,int64_t Offset,unsigned TargetFlags)1713 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1714 unsigned TargetFlags) {
1715 FoldingSetNodeID ID;
1716 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1717 ID.AddInteger(Index);
1718 ID.AddInteger(Offset);
1719 ID.AddInteger(TargetFlags);
1720 void *IP = nullptr;
1721 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1722 return SDValue(E, 0);
1723
1724 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1725 CSEMap.InsertNode(N, IP);
1726 InsertNode(N);
1727 return SDValue(N, 0);
1728 }
1729
getBasicBlock(MachineBasicBlock * MBB)1730 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1731 FoldingSetNodeID ID;
1732 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1733 ID.AddPointer(MBB);
1734 void *IP = nullptr;
1735 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1736 return SDValue(E, 0);
1737
1738 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1739 CSEMap.InsertNode(N, IP);
1740 InsertNode(N);
1741 return SDValue(N, 0);
1742 }
1743
getValueType(EVT VT)1744 SDValue SelectionDAG::getValueType(EVT VT) {
1745 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1746 ValueTypeNodes.size())
1747 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1748
1749 SDNode *&N = VT.isExtended() ?
1750 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1751
1752 if (N) return SDValue(N, 0);
1753 N = newSDNode<VTSDNode>(VT);
1754 InsertNode(N);
1755 return SDValue(N, 0);
1756 }
1757
getExternalSymbol(const char * Sym,EVT VT)1758 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1759 SDNode *&N = ExternalSymbols[Sym];
1760 if (N) return SDValue(N, 0);
1761 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1762 InsertNode(N);
1763 return SDValue(N, 0);
1764 }
1765
getMCSymbol(MCSymbol * Sym,EVT VT)1766 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1767 SDNode *&N = MCSymbols[Sym];
1768 if (N)
1769 return SDValue(N, 0);
1770 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1771 InsertNode(N);
1772 return SDValue(N, 0);
1773 }
1774
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned TargetFlags)1775 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1776 unsigned TargetFlags) {
1777 SDNode *&N =
1778 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1779 if (N) return SDValue(N, 0);
1780 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1781 InsertNode(N);
1782 return SDValue(N, 0);
1783 }
1784
getCondCode(ISD::CondCode Cond)1785 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1786 if ((unsigned)Cond >= CondCodeNodes.size())
1787 CondCodeNodes.resize(Cond+1);
1788
1789 if (!CondCodeNodes[Cond]) {
1790 auto *N = newSDNode<CondCodeSDNode>(Cond);
1791 CondCodeNodes[Cond] = N;
1792 InsertNode(N);
1793 }
1794
1795 return SDValue(CondCodeNodes[Cond], 0);
1796 }
1797
getStepVector(const SDLoc & DL,EVT ResVT)1798 SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT) {
1799 APInt One(ResVT.getScalarSizeInBits(), 1);
1800 return getStepVector(DL, ResVT, One);
1801 }
1802
getStepVector(const SDLoc & DL,EVT ResVT,APInt StepVal)1803 SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal) {
1804 assert(ResVT.getScalarSizeInBits() == StepVal.getBitWidth());
1805 if (ResVT.isScalableVector())
1806 return getNode(
1807 ISD::STEP_VECTOR, DL, ResVT,
1808 getTargetConstant(StepVal, DL, ResVT.getVectorElementType()));
1809
1810 SmallVector<SDValue, 16> OpsStepConstants;
1811 for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++)
1812 OpsStepConstants.push_back(
1813 getConstant(StepVal * i, DL, ResVT.getVectorElementType()));
1814 return getBuildVector(ResVT, DL, OpsStepConstants);
1815 }
1816
1817 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1818 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,MutableArrayRef<int> M)1819 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1820 std::swap(N1, N2);
1821 ShuffleVectorSDNode::commuteMask(M);
1822 }
1823
getVectorShuffle(EVT VT,const SDLoc & dl,SDValue N1,SDValue N2,ArrayRef<int> Mask)1824 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1825 SDValue N2, ArrayRef<int> Mask) {
1826 assert(VT.getVectorNumElements() == Mask.size() &&
1827 "Must have the same number of vector elements as mask elements!");
1828 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1829 "Invalid VECTOR_SHUFFLE");
1830
1831 // Canonicalize shuffle undef, undef -> undef
1832 if (N1.isUndef() && N2.isUndef())
1833 return getUNDEF(VT);
1834
1835 // Validate that all indices in Mask are within the range of the elements
1836 // input to the shuffle.
1837 int NElts = Mask.size();
1838 assert(llvm::all_of(Mask,
1839 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1840 "Index out of range");
1841
1842 // Copy the mask so we can do any needed cleanup.
1843 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1844
1845 // Canonicalize shuffle v, v -> v, undef
1846 if (N1 == N2) {
1847 N2 = getUNDEF(VT);
1848 for (int i = 0; i != NElts; ++i)
1849 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1850 }
1851
1852 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1853 if (N1.isUndef())
1854 commuteShuffle(N1, N2, MaskVec);
1855
1856 if (TLI->hasVectorBlend()) {
1857 // If shuffling a splat, try to blend the splat instead. We do this here so
1858 // that even when this arises during lowering we don't have to re-handle it.
1859 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1860 BitVector UndefElements;
1861 SDValue Splat = BV->getSplatValue(&UndefElements);
1862 if (!Splat)
1863 return;
1864
1865 for (int i = 0; i < NElts; ++i) {
1866 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1867 continue;
1868
1869 // If this input comes from undef, mark it as such.
1870 if (UndefElements[MaskVec[i] - Offset]) {
1871 MaskVec[i] = -1;
1872 continue;
1873 }
1874
1875 // If we can blend a non-undef lane, use that instead.
1876 if (!UndefElements[i])
1877 MaskVec[i] = i + Offset;
1878 }
1879 };
1880 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1881 BlendSplat(N1BV, 0);
1882 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1883 BlendSplat(N2BV, NElts);
1884 }
1885
1886 // Canonicalize all index into lhs, -> shuffle lhs, undef
1887 // Canonicalize all index into rhs, -> shuffle rhs, undef
1888 bool AllLHS = true, AllRHS = true;
1889 bool N2Undef = N2.isUndef();
1890 for (int i = 0; i != NElts; ++i) {
1891 if (MaskVec[i] >= NElts) {
1892 if (N2Undef)
1893 MaskVec[i] = -1;
1894 else
1895 AllLHS = false;
1896 } else if (MaskVec[i] >= 0) {
1897 AllRHS = false;
1898 }
1899 }
1900 if (AllLHS && AllRHS)
1901 return getUNDEF(VT);
1902 if (AllLHS && !N2Undef)
1903 N2 = getUNDEF(VT);
1904 if (AllRHS) {
1905 N1 = getUNDEF(VT);
1906 commuteShuffle(N1, N2, MaskVec);
1907 }
1908 // Reset our undef status after accounting for the mask.
1909 N2Undef = N2.isUndef();
1910 // Re-check whether both sides ended up undef.
1911 if (N1.isUndef() && N2Undef)
1912 return getUNDEF(VT);
1913
1914 // If Identity shuffle return that node.
1915 bool Identity = true, AllSame = true;
1916 for (int i = 0; i != NElts; ++i) {
1917 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1918 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1919 }
1920 if (Identity && NElts)
1921 return N1;
1922
1923 // Shuffling a constant splat doesn't change the result.
1924 if (N2Undef) {
1925 SDValue V = N1;
1926
1927 // Look through any bitcasts. We check that these don't change the number
1928 // (and size) of elements and just changes their types.
1929 while (V.getOpcode() == ISD::BITCAST)
1930 V = V->getOperand(0);
1931
1932 // A splat should always show up as a build vector node.
1933 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1934 BitVector UndefElements;
1935 SDValue Splat = BV->getSplatValue(&UndefElements);
1936 // If this is a splat of an undef, shuffling it is also undef.
1937 if (Splat && Splat.isUndef())
1938 return getUNDEF(VT);
1939
1940 bool SameNumElts =
1941 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1942
1943 // We only have a splat which can skip shuffles if there is a splatted
1944 // value and no undef lanes rearranged by the shuffle.
1945 if (Splat && UndefElements.none()) {
1946 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1947 // number of elements match or the value splatted is a zero constant.
1948 if (SameNumElts)
1949 return N1;
1950 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1951 if (C->isZero())
1952 return N1;
1953 }
1954
1955 // If the shuffle itself creates a splat, build the vector directly.
1956 if (AllSame && SameNumElts) {
1957 EVT BuildVT = BV->getValueType(0);
1958 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1959 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1960
1961 // We may have jumped through bitcasts, so the type of the
1962 // BUILD_VECTOR may not match the type of the shuffle.
1963 if (BuildVT != VT)
1964 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1965 return NewBV;
1966 }
1967 }
1968 }
1969
1970 FoldingSetNodeID ID;
1971 SDValue Ops[2] = { N1, N2 };
1972 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1973 for (int i = 0; i != NElts; ++i)
1974 ID.AddInteger(MaskVec[i]);
1975
1976 void* IP = nullptr;
1977 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1978 return SDValue(E, 0);
1979
1980 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1981 // SDNode doesn't have access to it. This memory will be "leaked" when
1982 // the node is deallocated, but recovered when the NodeAllocator is released.
1983 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1984 llvm::copy(MaskVec, MaskAlloc);
1985
1986 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1987 dl.getDebugLoc(), MaskAlloc);
1988 createOperands(N, Ops);
1989
1990 CSEMap.InsertNode(N, IP);
1991 InsertNode(N);
1992 SDValue V = SDValue(N, 0);
1993 NewSDValueDbgMsg(V, "Creating new node: ", this);
1994 return V;
1995 }
1996
getCommutedVectorShuffle(const ShuffleVectorSDNode & SV)1997 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1998 EVT VT = SV.getValueType(0);
1999 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
2000 ShuffleVectorSDNode::commuteMask(MaskVec);
2001
2002 SDValue Op0 = SV.getOperand(0);
2003 SDValue Op1 = SV.getOperand(1);
2004 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
2005 }
2006
getRegister(unsigned RegNo,EVT VT)2007 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
2008 FoldingSetNodeID ID;
2009 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
2010 ID.AddInteger(RegNo);
2011 void *IP = nullptr;
2012 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2013 return SDValue(E, 0);
2014
2015 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
2016 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
2017 CSEMap.InsertNode(N, IP);
2018 InsertNode(N);
2019 return SDValue(N, 0);
2020 }
2021
getRegisterMask(const uint32_t * RegMask)2022 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
2023 FoldingSetNodeID ID;
2024 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
2025 ID.AddPointer(RegMask);
2026 void *IP = nullptr;
2027 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2028 return SDValue(E, 0);
2029
2030 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
2031 CSEMap.InsertNode(N, IP);
2032 InsertNode(N);
2033 return SDValue(N, 0);
2034 }
2035
getEHLabel(const SDLoc & dl,SDValue Root,MCSymbol * Label)2036 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
2037 MCSymbol *Label) {
2038 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
2039 }
2040
getLabelNode(unsigned Opcode,const SDLoc & dl,SDValue Root,MCSymbol * Label)2041 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
2042 SDValue Root, MCSymbol *Label) {
2043 FoldingSetNodeID ID;
2044 SDValue Ops[] = { Root };
2045 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
2046 ID.AddPointer(Label);
2047 void *IP = nullptr;
2048 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2049 return SDValue(E, 0);
2050
2051 auto *N =
2052 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
2053 createOperands(N, Ops);
2054
2055 CSEMap.InsertNode(N, IP);
2056 InsertNode(N);
2057 return SDValue(N, 0);
2058 }
2059
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned TargetFlags)2060 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
2061 int64_t Offset, bool isTarget,
2062 unsigned TargetFlags) {
2063 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
2064
2065 FoldingSetNodeID ID;
2066 AddNodeIDNode(ID, Opc, getVTList(VT), None);
2067 ID.AddPointer(BA);
2068 ID.AddInteger(Offset);
2069 ID.AddInteger(TargetFlags);
2070 void *IP = nullptr;
2071 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2072 return SDValue(E, 0);
2073
2074 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
2075 CSEMap.InsertNode(N, IP);
2076 InsertNode(N);
2077 return SDValue(N, 0);
2078 }
2079
getSrcValue(const Value * V)2080 SDValue SelectionDAG::getSrcValue(const Value *V) {
2081 FoldingSetNodeID ID;
2082 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
2083 ID.AddPointer(V);
2084
2085 void *IP = nullptr;
2086 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2087 return SDValue(E, 0);
2088
2089 auto *N = newSDNode<SrcValueSDNode>(V);
2090 CSEMap.InsertNode(N, IP);
2091 InsertNode(N);
2092 return SDValue(N, 0);
2093 }
2094
getMDNode(const MDNode * MD)2095 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
2096 FoldingSetNodeID ID;
2097 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
2098 ID.AddPointer(MD);
2099
2100 void *IP = nullptr;
2101 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2102 return SDValue(E, 0);
2103
2104 auto *N = newSDNode<MDNodeSDNode>(MD);
2105 CSEMap.InsertNode(N, IP);
2106 InsertNode(N);
2107 return SDValue(N, 0);
2108 }
2109
getBitcast(EVT VT,SDValue V)2110 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
2111 if (VT == V.getValueType())
2112 return V;
2113
2114 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2115 }
2116
getAddrSpaceCast(const SDLoc & dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)2117 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
2118 unsigned SrcAS, unsigned DestAS) {
2119 SDValue Ops[] = {Ptr};
2120 FoldingSetNodeID ID;
2121 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
2122 ID.AddInteger(SrcAS);
2123 ID.AddInteger(DestAS);
2124
2125 void *IP = nullptr;
2126 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2127 return SDValue(E, 0);
2128
2129 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2130 VT, SrcAS, DestAS);
2131 createOperands(N, Ops);
2132
2133 CSEMap.InsertNode(N, IP);
2134 InsertNode(N);
2135 return SDValue(N, 0);
2136 }
2137
getFreeze(SDValue V)2138 SDValue SelectionDAG::getFreeze(SDValue V) {
2139 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2140 }
2141
2142 /// getShiftAmountOperand - Return the specified value casted to
2143 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)2144 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
2145 EVT OpTy = Op.getValueType();
2146 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2147 if (OpTy == ShTy || OpTy.isVector()) return Op;
2148
2149 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2150 }
2151
expandVAArg(SDNode * Node)2152 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
2153 SDLoc dl(Node);
2154 const TargetLowering &TLI = getTargetLoweringInfo();
2155 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2156 EVT VT = Node->getValueType(0);
2157 SDValue Tmp1 = Node->getOperand(0);
2158 SDValue Tmp2 = Node->getOperand(1);
2159 const MaybeAlign MA(Node->getConstantOperandVal(3));
2160
2161 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2162 Tmp2, MachinePointerInfo(V));
2163 SDValue VAList = VAListLoad;
2164
2165 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2166 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2167 getConstant(MA->value() - 1, dl, VAList.getValueType()));
2168
2169 VAList =
2170 getNode(ISD::AND, dl, VAList.getValueType(), VAList,
2171 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2172 }
2173
2174 // Increment the pointer, VAList, to the next vaarg
2175 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2176 getConstant(getDataLayout().getTypeAllocSize(
2177 VT.getTypeForEVT(*getContext())),
2178 dl, VAList.getValueType()));
2179 // Store the incremented VAList to the legalized pointer
2180 Tmp1 =
2181 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2182 // Load the actual argument out of the pointer VAList
2183 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2184 }
2185
expandVACopy(SDNode * Node)2186 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
2187 SDLoc dl(Node);
2188 const TargetLowering &TLI = getTargetLoweringInfo();
2189 // This defaults to loading a pointer from the input and storing it to the
2190 // output, returning the chain.
2191 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2192 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2193 SDValue Tmp1 =
2194 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2195 Node->getOperand(2), MachinePointerInfo(VS));
2196 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2197 MachinePointerInfo(VD));
2198 }
2199
getReducedAlign(EVT VT,bool UseABI)2200 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) {
2201 const DataLayout &DL = getDataLayout();
2202 Type *Ty = VT.getTypeForEVT(*getContext());
2203 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2204
2205 if (TLI->isTypeLegal(VT) || !VT.isVector())
2206 return RedAlign;
2207
2208 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2209 const Align StackAlign = TFI->getStackAlign();
2210
2211 // See if we can choose a smaller ABI alignment in cases where it's an
2212 // illegal vector type that will get broken down.
2213 if (RedAlign > StackAlign) {
2214 EVT IntermediateVT;
2215 MVT RegisterVT;
2216 unsigned NumIntermediates;
2217 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2218 NumIntermediates, RegisterVT);
2219 Ty = IntermediateVT.getTypeForEVT(*getContext());
2220 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2221 if (RedAlign2 < RedAlign)
2222 RedAlign = RedAlign2;
2223 }
2224
2225 return RedAlign;
2226 }
2227
CreateStackTemporary(TypeSize Bytes,Align Alignment)2228 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) {
2229 MachineFrameInfo &MFI = MF->getFrameInfo();
2230 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2231 int StackID = 0;
2232 if (Bytes.isScalable())
2233 StackID = TFI->getStackIDForScalableVectors();
2234 // The stack id gives an indication of whether the object is scalable or
2235 // not, so it's safe to pass in the minimum size here.
2236 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
2237 false, nullptr, StackID);
2238 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2239 }
2240
CreateStackTemporary(EVT VT,unsigned minAlign)2241 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
2242 Type *Ty = VT.getTypeForEVT(*getContext());
2243 Align StackAlign =
2244 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2245 return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2246 }
2247
CreateStackTemporary(EVT VT1,EVT VT2)2248 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
2249 TypeSize VT1Size = VT1.getStoreSize();
2250 TypeSize VT2Size = VT2.getStoreSize();
2251 assert(VT1Size.isScalable() == VT2Size.isScalable() &&
2252 "Don't know how to choose the maximum size when creating a stack "
2253 "temporary");
2254 TypeSize Bytes =
2255 VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
2256
2257 Type *Ty1 = VT1.getTypeForEVT(*getContext());
2258 Type *Ty2 = VT2.getTypeForEVT(*getContext());
2259 const DataLayout &DL = getDataLayout();
2260 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2261 return CreateStackTemporary(Bytes, Align);
2262 }
2263
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,const SDLoc & dl)2264 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2265 ISD::CondCode Cond, const SDLoc &dl) {
2266 EVT OpVT = N1.getValueType();
2267
2268 // These setcc operations always fold.
2269 switch (Cond) {
2270 default: break;
2271 case ISD::SETFALSE:
2272 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2273 case ISD::SETTRUE:
2274 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2275
2276 case ISD::SETOEQ:
2277 case ISD::SETOGT:
2278 case ISD::SETOGE:
2279 case ISD::SETOLT:
2280 case ISD::SETOLE:
2281 case ISD::SETONE:
2282 case ISD::SETO:
2283 case ISD::SETUO:
2284 case ISD::SETUEQ:
2285 case ISD::SETUNE:
2286 assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2287 break;
2288 }
2289
2290 if (OpVT.isInteger()) {
2291 // For EQ and NE, we can always pick a value for the undef to make the
2292 // predicate pass or fail, so we can return undef.
2293 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2294 // icmp eq/ne X, undef -> undef.
2295 if ((N1.isUndef() || N2.isUndef()) &&
2296 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2297 return getUNDEF(VT);
2298
2299 // If both operands are undef, we can return undef for int comparison.
2300 // icmp undef, undef -> undef.
2301 if (N1.isUndef() && N2.isUndef())
2302 return getUNDEF(VT);
2303
2304 // icmp X, X -> true/false
2305 // icmp X, undef -> true/false because undef could be X.
2306 if (N1 == N2)
2307 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2308 }
2309
2310 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2311 const APInt &C2 = N2C->getAPIntValue();
2312 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2313 const APInt &C1 = N1C->getAPIntValue();
2314
2315 switch (Cond) {
2316 default: llvm_unreachable("Unknown integer setcc!");
2317 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT);
2318 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT);
2319 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
2320 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
2321 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
2322 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
2323 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
2324 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
2325 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
2326 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
2327 }
2328 }
2329 }
2330
2331 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2332 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2333
2334 if (N1CFP && N2CFP) {
2335 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2336 switch (Cond) {
2337 default: break;
2338 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2339 return getUNDEF(VT);
2340 LLVM_FALLTHROUGH;
2341 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2342 OpVT);
2343 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2344 return getUNDEF(VT);
2345 LLVM_FALLTHROUGH;
2346 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2347 R==APFloat::cmpLessThan, dl, VT,
2348 OpVT);
2349 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2350 return getUNDEF(VT);
2351 LLVM_FALLTHROUGH;
2352 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2353 OpVT);
2354 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2355 return getUNDEF(VT);
2356 LLVM_FALLTHROUGH;
2357 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2358 VT, OpVT);
2359 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2360 return getUNDEF(VT);
2361 LLVM_FALLTHROUGH;
2362 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2363 R==APFloat::cmpEqual, dl, VT,
2364 OpVT);
2365 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2366 return getUNDEF(VT);
2367 LLVM_FALLTHROUGH;
2368 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2369 R==APFloat::cmpEqual, dl, VT, OpVT);
2370 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2371 OpVT);
2372 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2373 OpVT);
2374 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2375 R==APFloat::cmpEqual, dl, VT,
2376 OpVT);
2377 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2378 OpVT);
2379 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2380 R==APFloat::cmpLessThan, dl, VT,
2381 OpVT);
2382 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2383 R==APFloat::cmpUnordered, dl, VT,
2384 OpVT);
2385 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2386 VT, OpVT);
2387 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2388 OpVT);
2389 }
2390 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2391 // Ensure that the constant occurs on the RHS.
2392 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2393 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2394 return SDValue();
2395 return getSetCC(dl, VT, N2, N1, SwappedCond);
2396 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2397 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2398 // If an operand is known to be a nan (or undef that could be a nan), we can
2399 // fold it.
2400 // Choosing NaN for the undef will always make unordered comparison succeed
2401 // and ordered comparison fails.
2402 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2403 switch (ISD::getUnorderedFlavor(Cond)) {
2404 default:
2405 llvm_unreachable("Unknown flavor!");
2406 case 0: // Known false.
2407 return getBoolConstant(false, dl, VT, OpVT);
2408 case 1: // Known true.
2409 return getBoolConstant(true, dl, VT, OpVT);
2410 case 2: // Undefined.
2411 return getUNDEF(VT);
2412 }
2413 }
2414
2415 // Could not fold it.
2416 return SDValue();
2417 }
2418
2419 /// See if the specified operand can be simplified with the knowledge that only
2420 /// the bits specified by DemandedBits are used.
2421 /// TODO: really we should be making this into the DAG equivalent of
2422 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits)2423 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2424 EVT VT = V.getValueType();
2425
2426 if (VT.isScalableVector())
2427 return SDValue();
2428
2429 APInt DemandedElts = VT.isVector()
2430 ? APInt::getAllOnes(VT.getVectorNumElements())
2431 : APInt(1, 1);
2432 return GetDemandedBits(V, DemandedBits, DemandedElts);
2433 }
2434
2435 /// See if the specified operand can be simplified with the knowledge that only
2436 /// the bits specified by DemandedBits are used in the elements specified by
2437 /// DemandedElts.
2438 /// TODO: really we should be making this into the DAG equivalent of
2439 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits,const APInt & DemandedElts)2440 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2441 const APInt &DemandedElts) {
2442 switch (V.getOpcode()) {
2443 default:
2444 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2445 *this, 0);
2446 case ISD::Constant: {
2447 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue();
2448 APInt NewVal = CVal & DemandedBits;
2449 if (NewVal != CVal)
2450 return getConstant(NewVal, SDLoc(V), V.getValueType());
2451 break;
2452 }
2453 case ISD::SRL:
2454 // Only look at single-use SRLs.
2455 if (!V.getNode()->hasOneUse())
2456 break;
2457 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2458 // See if we can recursively simplify the LHS.
2459 unsigned Amt = RHSC->getZExtValue();
2460
2461 // Watch out for shift count overflow though.
2462 if (Amt >= DemandedBits.getBitWidth())
2463 break;
2464 APInt SrcDemandedBits = DemandedBits << Amt;
2465 if (SDValue SimplifyLHS =
2466 GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2467 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2468 V.getOperand(1));
2469 }
2470 break;
2471 }
2472 return SDValue();
2473 }
2474
2475 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2476 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const2477 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2478 unsigned BitWidth = Op.getScalarValueSizeInBits();
2479 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2480 }
2481
2482 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2483 /// this predicate to simplify operations downstream. Mask is known to be zero
2484 /// for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,unsigned Depth) const2485 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2486 unsigned Depth) const {
2487 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2488 }
2489
2490 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2491 /// DemandedElts. We use this predicate to simplify operations downstream.
2492 /// Mask is known to be zero for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,const APInt & DemandedElts,unsigned Depth) const2493 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2494 const APInt &DemandedElts,
2495 unsigned Depth) const {
2496 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2497 }
2498
2499 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
MaskedValueIsAllOnes(SDValue V,const APInt & Mask,unsigned Depth) const2500 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2501 unsigned Depth) const {
2502 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2503 }
2504
2505 /// isSplatValue - Return true if the vector V has the same value
2506 /// across all DemandedElts. For scalable vectors it does not make
2507 /// sense to specify which elements are demanded or undefined, therefore
2508 /// they are simply ignored.
isSplatValue(SDValue V,const APInt & DemandedElts,APInt & UndefElts,unsigned Depth)2509 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2510 APInt &UndefElts, unsigned Depth) {
2511 EVT VT = V.getValueType();
2512 assert(VT.isVector() && "Vector type expected");
2513
2514 if (!VT.isScalableVector() && !DemandedElts)
2515 return false; // No demanded elts, better to assume we don't know anything.
2516
2517 if (Depth >= MaxRecursionDepth)
2518 return false; // Limit search depth.
2519
2520 // Deal with some common cases here that work for both fixed and scalable
2521 // vector types.
2522 switch (V.getOpcode()) {
2523 case ISD::SPLAT_VECTOR:
2524 UndefElts = V.getOperand(0).isUndef()
2525 ? APInt::getAllOnes(DemandedElts.getBitWidth())
2526 : APInt(DemandedElts.getBitWidth(), 0);
2527 return true;
2528 case ISD::ADD:
2529 case ISD::SUB:
2530 case ISD::AND:
2531 case ISD::XOR:
2532 case ISD::OR: {
2533 APInt UndefLHS, UndefRHS;
2534 SDValue LHS = V.getOperand(0);
2535 SDValue RHS = V.getOperand(1);
2536 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
2537 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
2538 UndefElts = UndefLHS | UndefRHS;
2539 return true;
2540 }
2541 return false;
2542 }
2543 case ISD::ABS:
2544 case ISD::TRUNCATE:
2545 case ISD::SIGN_EXTEND:
2546 case ISD::ZERO_EXTEND:
2547 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
2548 }
2549
2550 // We don't support other cases than those above for scalable vectors at
2551 // the moment.
2552 if (VT.isScalableVector())
2553 return false;
2554
2555 unsigned NumElts = VT.getVectorNumElements();
2556 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2557 UndefElts = APInt::getZero(NumElts);
2558
2559 switch (V.getOpcode()) {
2560 case ISD::BUILD_VECTOR: {
2561 SDValue Scl;
2562 for (unsigned i = 0; i != NumElts; ++i) {
2563 SDValue Op = V.getOperand(i);
2564 if (Op.isUndef()) {
2565 UndefElts.setBit(i);
2566 continue;
2567 }
2568 if (!DemandedElts[i])
2569 continue;
2570 if (Scl && Scl != Op)
2571 return false;
2572 Scl = Op;
2573 }
2574 return true;
2575 }
2576 case ISD::VECTOR_SHUFFLE: {
2577 // Check if this is a shuffle node doing a splat.
2578 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2579 int SplatIndex = -1;
2580 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2581 for (int i = 0; i != (int)NumElts; ++i) {
2582 int M = Mask[i];
2583 if (M < 0) {
2584 UndefElts.setBit(i);
2585 continue;
2586 }
2587 if (!DemandedElts[i])
2588 continue;
2589 if (0 <= SplatIndex && SplatIndex != M)
2590 return false;
2591 SplatIndex = M;
2592 }
2593 return true;
2594 }
2595 case ISD::EXTRACT_SUBVECTOR: {
2596 // Offset the demanded elts by the subvector index.
2597 SDValue Src = V.getOperand(0);
2598 // We don't support scalable vectors at the moment.
2599 if (Src.getValueType().isScalableVector())
2600 return false;
2601 uint64_t Idx = V.getConstantOperandVal(1);
2602 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2603 APInt UndefSrcElts;
2604 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2605 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2606 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2607 return true;
2608 }
2609 break;
2610 }
2611 }
2612
2613 return false;
2614 }
2615
2616 /// Helper wrapper to main isSplatValue function.
isSplatValue(SDValue V,bool AllowUndefs)2617 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
2618 EVT VT = V.getValueType();
2619 assert(VT.isVector() && "Vector type expected");
2620
2621 APInt UndefElts;
2622 APInt DemandedElts;
2623
2624 // For now we don't support this with scalable vectors.
2625 if (!VT.isScalableVector())
2626 DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
2627 return isSplatValue(V, DemandedElts, UndefElts) &&
2628 (AllowUndefs || !UndefElts);
2629 }
2630
getSplatSourceVector(SDValue V,int & SplatIdx)2631 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2632 V = peekThroughExtractSubvectors(V);
2633
2634 EVT VT = V.getValueType();
2635 unsigned Opcode = V.getOpcode();
2636 switch (Opcode) {
2637 default: {
2638 APInt UndefElts;
2639 APInt DemandedElts;
2640
2641 if (!VT.isScalableVector())
2642 DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
2643
2644 if (isSplatValue(V, DemandedElts, UndefElts)) {
2645 if (VT.isScalableVector()) {
2646 // DemandedElts and UndefElts are ignored for scalable vectors, since
2647 // the only supported cases are SPLAT_VECTOR nodes.
2648 SplatIdx = 0;
2649 } else {
2650 // Handle case where all demanded elements are UNDEF.
2651 if (DemandedElts.isSubsetOf(UndefElts)) {
2652 SplatIdx = 0;
2653 return getUNDEF(VT);
2654 }
2655 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2656 }
2657 return V;
2658 }
2659 break;
2660 }
2661 case ISD::SPLAT_VECTOR:
2662 SplatIdx = 0;
2663 return V;
2664 case ISD::VECTOR_SHUFFLE: {
2665 if (VT.isScalableVector())
2666 return SDValue();
2667
2668 // Check if this is a shuffle node doing a splat.
2669 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2670 // getTargetVShiftNode currently struggles without the splat source.
2671 auto *SVN = cast<ShuffleVectorSDNode>(V);
2672 if (!SVN->isSplat())
2673 break;
2674 int Idx = SVN->getSplatIndex();
2675 int NumElts = V.getValueType().getVectorNumElements();
2676 SplatIdx = Idx % NumElts;
2677 return V.getOperand(Idx / NumElts);
2678 }
2679 }
2680
2681 return SDValue();
2682 }
2683
getSplatValue(SDValue V,bool LegalTypes)2684 SDValue SelectionDAG::getSplatValue(SDValue V, bool LegalTypes) {
2685 int SplatIdx;
2686 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) {
2687 EVT SVT = SrcVector.getValueType().getScalarType();
2688 EVT LegalSVT = SVT;
2689 if (LegalTypes && !TLI->isTypeLegal(SVT)) {
2690 if (!SVT.isInteger())
2691 return SDValue();
2692 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
2693 if (LegalSVT.bitsLT(SVT))
2694 return SDValue();
2695 }
2696 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), LegalSVT, SrcVector,
2697 getVectorIdxConstant(SplatIdx, SDLoc(V)));
2698 }
2699 return SDValue();
2700 }
2701
2702 const APInt *
getValidShiftAmountConstant(SDValue V,const APInt & DemandedElts) const2703 SelectionDAG::getValidShiftAmountConstant(SDValue V,
2704 const APInt &DemandedElts) const {
2705 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2706 V.getOpcode() == ISD::SRA) &&
2707 "Unknown shift node");
2708 unsigned BitWidth = V.getScalarValueSizeInBits();
2709 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) {
2710 // Shifting more than the bitwidth is not valid.
2711 const APInt &ShAmt = SA->getAPIntValue();
2712 if (ShAmt.ult(BitWidth))
2713 return &ShAmt;
2714 }
2715 return nullptr;
2716 }
2717
getValidMinimumShiftAmountConstant(SDValue V,const APInt & DemandedElts) const2718 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant(
2719 SDValue V, const APInt &DemandedElts) const {
2720 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2721 V.getOpcode() == ISD::SRA) &&
2722 "Unknown shift node");
2723 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2724 return ValidAmt;
2725 unsigned BitWidth = V.getScalarValueSizeInBits();
2726 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2727 if (!BV)
2728 return nullptr;
2729 const APInt *MinShAmt = nullptr;
2730 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2731 if (!DemandedElts[i])
2732 continue;
2733 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2734 if (!SA)
2735 return nullptr;
2736 // Shifting more than the bitwidth is not valid.
2737 const APInt &ShAmt = SA->getAPIntValue();
2738 if (ShAmt.uge(BitWidth))
2739 return nullptr;
2740 if (MinShAmt && MinShAmt->ule(ShAmt))
2741 continue;
2742 MinShAmt = &ShAmt;
2743 }
2744 return MinShAmt;
2745 }
2746
getValidMaximumShiftAmountConstant(SDValue V,const APInt & DemandedElts) const2747 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant(
2748 SDValue V, const APInt &DemandedElts) const {
2749 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2750 V.getOpcode() == ISD::SRA) &&
2751 "Unknown shift node");
2752 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2753 return ValidAmt;
2754 unsigned BitWidth = V.getScalarValueSizeInBits();
2755 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2756 if (!BV)
2757 return nullptr;
2758 const APInt *MaxShAmt = nullptr;
2759 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2760 if (!DemandedElts[i])
2761 continue;
2762 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2763 if (!SA)
2764 return nullptr;
2765 // Shifting more than the bitwidth is not valid.
2766 const APInt &ShAmt = SA->getAPIntValue();
2767 if (ShAmt.uge(BitWidth))
2768 return nullptr;
2769 if (MaxShAmt && MaxShAmt->uge(ShAmt))
2770 continue;
2771 MaxShAmt = &ShAmt;
2772 }
2773 return MaxShAmt;
2774 }
2775
2776 /// Determine which bits of Op are known to be either zero or one and return
2777 /// them in Known. For vectors, the known bits are those that are shared by
2778 /// every vector element.
computeKnownBits(SDValue Op,unsigned Depth) const2779 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2780 EVT VT = Op.getValueType();
2781
2782 // TOOD: Until we have a plan for how to represent demanded elements for
2783 // scalable vectors, we can just bail out for now.
2784 if (Op.getValueType().isScalableVector()) {
2785 unsigned BitWidth = Op.getScalarValueSizeInBits();
2786 return KnownBits(BitWidth);
2787 }
2788
2789 APInt DemandedElts = VT.isVector()
2790 ? APInt::getAllOnes(VT.getVectorNumElements())
2791 : APInt(1, 1);
2792 return computeKnownBits(Op, DemandedElts, Depth);
2793 }
2794
2795 /// Determine which bits of Op are known to be either zero or one and return
2796 /// them in Known. The DemandedElts argument allows us to only collect the known
2797 /// bits that are shared by the requested vector elements.
computeKnownBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const2798 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2799 unsigned Depth) const {
2800 unsigned BitWidth = Op.getScalarValueSizeInBits();
2801
2802 KnownBits Known(BitWidth); // Don't know anything.
2803
2804 // TOOD: Until we have a plan for how to represent demanded elements for
2805 // scalable vectors, we can just bail out for now.
2806 if (Op.getValueType().isScalableVector())
2807 return Known;
2808
2809 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2810 // We know all of the bits for a constant!
2811 return KnownBits::makeConstant(C->getAPIntValue());
2812 }
2813 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2814 // We know all of the bits for a constant fp!
2815 return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
2816 }
2817
2818 if (Depth >= MaxRecursionDepth)
2819 return Known; // Limit search depth.
2820
2821 KnownBits Known2;
2822 unsigned NumElts = DemandedElts.getBitWidth();
2823 assert((!Op.getValueType().isVector() ||
2824 NumElts == Op.getValueType().getVectorNumElements()) &&
2825 "Unexpected vector size");
2826
2827 if (!DemandedElts)
2828 return Known; // No demanded elts, better to assume we don't know anything.
2829
2830 unsigned Opcode = Op.getOpcode();
2831 switch (Opcode) {
2832 case ISD::BUILD_VECTOR:
2833 // Collect the known bits that are shared by every demanded vector element.
2834 Known.Zero.setAllBits(); Known.One.setAllBits();
2835 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2836 if (!DemandedElts[i])
2837 continue;
2838
2839 SDValue SrcOp = Op.getOperand(i);
2840 Known2 = computeKnownBits(SrcOp, Depth + 1);
2841
2842 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2843 if (SrcOp.getValueSizeInBits() != BitWidth) {
2844 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2845 "Expected BUILD_VECTOR implicit truncation");
2846 Known2 = Known2.trunc(BitWidth);
2847 }
2848
2849 // Known bits are the values that are shared by every demanded element.
2850 Known = KnownBits::commonBits(Known, Known2);
2851
2852 // If we don't know any bits, early out.
2853 if (Known.isUnknown())
2854 break;
2855 }
2856 break;
2857 case ISD::VECTOR_SHUFFLE: {
2858 // Collect the known bits that are shared by every vector element referenced
2859 // by the shuffle.
2860 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2861 Known.Zero.setAllBits(); Known.One.setAllBits();
2862 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2863 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2864 for (unsigned i = 0; i != NumElts; ++i) {
2865 if (!DemandedElts[i])
2866 continue;
2867
2868 int M = SVN->getMaskElt(i);
2869 if (M < 0) {
2870 // For UNDEF elements, we don't know anything about the common state of
2871 // the shuffle result.
2872 Known.resetAll();
2873 DemandedLHS.clearAllBits();
2874 DemandedRHS.clearAllBits();
2875 break;
2876 }
2877
2878 if ((unsigned)M < NumElts)
2879 DemandedLHS.setBit((unsigned)M % NumElts);
2880 else
2881 DemandedRHS.setBit((unsigned)M % NumElts);
2882 }
2883 // Known bits are the values that are shared by every demanded element.
2884 if (!!DemandedLHS) {
2885 SDValue LHS = Op.getOperand(0);
2886 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2887 Known = KnownBits::commonBits(Known, Known2);
2888 }
2889 // If we don't know any bits, early out.
2890 if (Known.isUnknown())
2891 break;
2892 if (!!DemandedRHS) {
2893 SDValue RHS = Op.getOperand(1);
2894 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2895 Known = KnownBits::commonBits(Known, Known2);
2896 }
2897 break;
2898 }
2899 case ISD::CONCAT_VECTORS: {
2900 // Split DemandedElts and test each of the demanded subvectors.
2901 Known.Zero.setAllBits(); Known.One.setAllBits();
2902 EVT SubVectorVT = Op.getOperand(0).getValueType();
2903 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2904 unsigned NumSubVectors = Op.getNumOperands();
2905 for (unsigned i = 0; i != NumSubVectors; ++i) {
2906 APInt DemandedSub =
2907 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
2908 if (!!DemandedSub) {
2909 SDValue Sub = Op.getOperand(i);
2910 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2911 Known = KnownBits::commonBits(Known, Known2);
2912 }
2913 // If we don't know any bits, early out.
2914 if (Known.isUnknown())
2915 break;
2916 }
2917 break;
2918 }
2919 case ISD::INSERT_SUBVECTOR: {
2920 // Demand any elements from the subvector and the remainder from the src its
2921 // inserted into.
2922 SDValue Src = Op.getOperand(0);
2923 SDValue Sub = Op.getOperand(1);
2924 uint64_t Idx = Op.getConstantOperandVal(2);
2925 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2926 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2927 APInt DemandedSrcElts = DemandedElts;
2928 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
2929
2930 Known.One.setAllBits();
2931 Known.Zero.setAllBits();
2932 if (!!DemandedSubElts) {
2933 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2934 if (Known.isUnknown())
2935 break; // early-out.
2936 }
2937 if (!!DemandedSrcElts) {
2938 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2939 Known = KnownBits::commonBits(Known, Known2);
2940 }
2941 break;
2942 }
2943 case ISD::EXTRACT_SUBVECTOR: {
2944 // Offset the demanded elts by the subvector index.
2945 SDValue Src = Op.getOperand(0);
2946 // Bail until we can represent demanded elements for scalable vectors.
2947 if (Src.getValueType().isScalableVector())
2948 break;
2949 uint64_t Idx = Op.getConstantOperandVal(1);
2950 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2951 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2952 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2953 break;
2954 }
2955 case ISD::SCALAR_TO_VECTOR: {
2956 // We know about scalar_to_vector as much as we know about it source,
2957 // which becomes the first element of otherwise unknown vector.
2958 if (DemandedElts != 1)
2959 break;
2960
2961 SDValue N0 = Op.getOperand(0);
2962 Known = computeKnownBits(N0, Depth + 1);
2963 if (N0.getValueSizeInBits() != BitWidth)
2964 Known = Known.trunc(BitWidth);
2965
2966 break;
2967 }
2968 case ISD::BITCAST: {
2969 SDValue N0 = Op.getOperand(0);
2970 EVT SubVT = N0.getValueType();
2971 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2972
2973 // Ignore bitcasts from unsupported types.
2974 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2975 break;
2976
2977 // Fast handling of 'identity' bitcasts.
2978 if (BitWidth == SubBitWidth) {
2979 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2980 break;
2981 }
2982
2983 bool IsLE = getDataLayout().isLittleEndian();
2984
2985 // Bitcast 'small element' vector to 'large element' scalar/vector.
2986 if ((BitWidth % SubBitWidth) == 0) {
2987 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2988
2989 // Collect known bits for the (larger) output by collecting the known
2990 // bits from each set of sub elements and shift these into place.
2991 // We need to separately call computeKnownBits for each set of
2992 // sub elements as the knownbits for each is likely to be different.
2993 unsigned SubScale = BitWidth / SubBitWidth;
2994 APInt SubDemandedElts(NumElts * SubScale, 0);
2995 for (unsigned i = 0; i != NumElts; ++i)
2996 if (DemandedElts[i])
2997 SubDemandedElts.setBit(i * SubScale);
2998
2999 for (unsigned i = 0; i != SubScale; ++i) {
3000 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
3001 Depth + 1);
3002 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
3003 Known.insertBits(Known2, SubBitWidth * Shifts);
3004 }
3005 }
3006
3007 // Bitcast 'large element' scalar/vector to 'small element' vector.
3008 if ((SubBitWidth % BitWidth) == 0) {
3009 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
3010
3011 // Collect known bits for the (smaller) output by collecting the known
3012 // bits from the overlapping larger input elements and extracting the
3013 // sub sections we actually care about.
3014 unsigned SubScale = SubBitWidth / BitWidth;
3015 APInt SubDemandedElts =
3016 APIntOps::ScaleBitMask(DemandedElts, NumElts / SubScale);
3017 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
3018
3019 Known.Zero.setAllBits(); Known.One.setAllBits();
3020 for (unsigned i = 0; i != NumElts; ++i)
3021 if (DemandedElts[i]) {
3022 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
3023 unsigned Offset = (Shifts % SubScale) * BitWidth;
3024 Known = KnownBits::commonBits(Known,
3025 Known2.extractBits(BitWidth, Offset));
3026 // If we don't know any bits, early out.
3027 if (Known.isUnknown())
3028 break;
3029 }
3030 }
3031 break;
3032 }
3033 case ISD::AND:
3034 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3035 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3036
3037 Known &= Known2;
3038 break;
3039 case ISD::OR:
3040 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3041 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3042
3043 Known |= Known2;
3044 break;
3045 case ISD::XOR:
3046 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3047 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3048
3049 Known ^= Known2;
3050 break;
3051 case ISD::MUL: {
3052 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3053 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3054 Known = KnownBits::mul(Known, Known2);
3055 break;
3056 }
3057 case ISD::MULHU: {
3058 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3059 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3060 Known = KnownBits::mulhu(Known, Known2);
3061 break;
3062 }
3063 case ISD::MULHS: {
3064 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3065 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3066 Known = KnownBits::mulhs(Known, Known2);
3067 break;
3068 }
3069 case ISD::UMUL_LOHI: {
3070 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3071 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3072 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3073 if (Op.getResNo() == 0)
3074 Known = KnownBits::mul(Known, Known2);
3075 else
3076 Known = KnownBits::mulhu(Known, Known2);
3077 break;
3078 }
3079 case ISD::SMUL_LOHI: {
3080 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3081 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3082 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3083 if (Op.getResNo() == 0)
3084 Known = KnownBits::mul(Known, Known2);
3085 else
3086 Known = KnownBits::mulhs(Known, Known2);
3087 break;
3088 }
3089 case ISD::UDIV: {
3090 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3091 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3092 Known = KnownBits::udiv(Known, Known2);
3093 break;
3094 }
3095 case ISD::SELECT:
3096 case ISD::VSELECT:
3097 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3098 // If we don't know any bits, early out.
3099 if (Known.isUnknown())
3100 break;
3101 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
3102
3103 // Only known if known in both the LHS and RHS.
3104 Known = KnownBits::commonBits(Known, Known2);
3105 break;
3106 case ISD::SELECT_CC:
3107 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
3108 // If we don't know any bits, early out.
3109 if (Known.isUnknown())
3110 break;
3111 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3112
3113 // Only known if known in both the LHS and RHS.
3114 Known = KnownBits::commonBits(Known, Known2);
3115 break;
3116 case ISD::SMULO:
3117 case ISD::UMULO:
3118 if (Op.getResNo() != 1)
3119 break;
3120 // The boolean result conforms to getBooleanContents.
3121 // If we know the result of a setcc has the top bits zero, use this info.
3122 // We know that we have an integer-based boolean since these operations
3123 // are only available for integer.
3124 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3125 TargetLowering::ZeroOrOneBooleanContent &&
3126 BitWidth > 1)
3127 Known.Zero.setBitsFrom(1);
3128 break;
3129 case ISD::SETCC:
3130 case ISD::STRICT_FSETCC:
3131 case ISD::STRICT_FSETCCS: {
3132 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3133 // If we know the result of a setcc has the top bits zero, use this info.
3134 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3135 TargetLowering::ZeroOrOneBooleanContent &&
3136 BitWidth > 1)
3137 Known.Zero.setBitsFrom(1);
3138 break;
3139 }
3140 case ISD::SHL:
3141 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3142 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3143 Known = KnownBits::shl(Known, Known2);
3144
3145 // Minimum shift low bits are known zero.
3146 if (const APInt *ShMinAmt =
3147 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3148 Known.Zero.setLowBits(ShMinAmt->getZExtValue());
3149 break;
3150 case ISD::SRL:
3151 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3152 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3153 Known = KnownBits::lshr(Known, Known2);
3154
3155 // Minimum shift high bits are known zero.
3156 if (const APInt *ShMinAmt =
3157 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3158 Known.Zero.setHighBits(ShMinAmt->getZExtValue());
3159 break;
3160 case ISD::SRA:
3161 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3162 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3163 Known = KnownBits::ashr(Known, Known2);
3164 // TODO: Add minimum shift high known sign bits.
3165 break;
3166 case ISD::FSHL:
3167 case ISD::FSHR:
3168 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3169 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3170
3171 // For fshl, 0-shift returns the 1st arg.
3172 // For fshr, 0-shift returns the 2nd arg.
3173 if (Amt == 0) {
3174 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3175 DemandedElts, Depth + 1);
3176 break;
3177 }
3178
3179 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3180 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3181 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3182 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3183 if (Opcode == ISD::FSHL) {
3184 Known.One <<= Amt;
3185 Known.Zero <<= Amt;
3186 Known2.One.lshrInPlace(BitWidth - Amt);
3187 Known2.Zero.lshrInPlace(BitWidth - Amt);
3188 } else {
3189 Known.One <<= BitWidth - Amt;
3190 Known.Zero <<= BitWidth - Amt;
3191 Known2.One.lshrInPlace(Amt);
3192 Known2.Zero.lshrInPlace(Amt);
3193 }
3194 Known.One |= Known2.One;
3195 Known.Zero |= Known2.Zero;
3196 }
3197 break;
3198 case ISD::SIGN_EXTEND_INREG: {
3199 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3200 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3201 Known = Known.sextInReg(EVT.getScalarSizeInBits());
3202 break;
3203 }
3204 case ISD::CTTZ:
3205 case ISD::CTTZ_ZERO_UNDEF: {
3206 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3207 // If we have a known 1, its position is our upper bound.
3208 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3209 unsigned LowBits = Log2_32(PossibleTZ) + 1;
3210 Known.Zero.setBitsFrom(LowBits);
3211 break;
3212 }
3213 case ISD::CTLZ:
3214 case ISD::CTLZ_ZERO_UNDEF: {
3215 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3216 // If we have a known 1, its position is our upper bound.
3217 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3218 unsigned LowBits = Log2_32(PossibleLZ) + 1;
3219 Known.Zero.setBitsFrom(LowBits);
3220 break;
3221 }
3222 case ISD::CTPOP: {
3223 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3224 // If we know some of the bits are zero, they can't be one.
3225 unsigned PossibleOnes = Known2.countMaxPopulation();
3226 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
3227 break;
3228 }
3229 case ISD::PARITY: {
3230 // Parity returns 0 everywhere but the LSB.
3231 Known.Zero.setBitsFrom(1);
3232 break;
3233 }
3234 case ISD::LOAD: {
3235 LoadSDNode *LD = cast<LoadSDNode>(Op);
3236 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3237 if (ISD::isNON_EXTLoad(LD) && Cst) {
3238 // Determine any common known bits from the loaded constant pool value.
3239 Type *CstTy = Cst->getType();
3240 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
3241 // If its a vector splat, then we can (quickly) reuse the scalar path.
3242 // NOTE: We assume all elements match and none are UNDEF.
3243 if (CstTy->isVectorTy()) {
3244 if (const Constant *Splat = Cst->getSplatValue()) {
3245 Cst = Splat;
3246 CstTy = Cst->getType();
3247 }
3248 }
3249 // TODO - do we need to handle different bitwidths?
3250 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3251 // Iterate across all vector elements finding common known bits.
3252 Known.One.setAllBits();
3253 Known.Zero.setAllBits();
3254 for (unsigned i = 0; i != NumElts; ++i) {
3255 if (!DemandedElts[i])
3256 continue;
3257 if (Constant *Elt = Cst->getAggregateElement(i)) {
3258 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3259 const APInt &Value = CInt->getValue();
3260 Known.One &= Value;
3261 Known.Zero &= ~Value;
3262 continue;
3263 }
3264 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3265 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3266 Known.One &= Value;
3267 Known.Zero &= ~Value;
3268 continue;
3269 }
3270 }
3271 Known.One.clearAllBits();
3272 Known.Zero.clearAllBits();
3273 break;
3274 }
3275 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3276 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3277 Known = KnownBits::makeConstant(CInt->getValue());
3278 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3279 Known =
3280 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
3281 }
3282 }
3283 }
3284 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
3285 // If this is a ZEXTLoad and we are looking at the loaded value.
3286 EVT VT = LD->getMemoryVT();
3287 unsigned MemBits = VT.getScalarSizeInBits();
3288 Known.Zero.setBitsFrom(MemBits);
3289 } else if (const MDNode *Ranges = LD->getRanges()) {
3290 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
3291 computeKnownBitsFromRangeMetadata(*Ranges, Known);
3292 }
3293 break;
3294 }
3295 case ISD::ZERO_EXTEND_VECTOR_INREG: {
3296 EVT InVT = Op.getOperand(0).getValueType();
3297 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3298 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3299 Known = Known.zext(BitWidth);
3300 break;
3301 }
3302 case ISD::ZERO_EXTEND: {
3303 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3304 Known = Known.zext(BitWidth);
3305 break;
3306 }
3307 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3308 EVT InVT = Op.getOperand(0).getValueType();
3309 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3310 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3311 // If the sign bit is known to be zero or one, then sext will extend
3312 // it to the top bits, else it will just zext.
3313 Known = Known.sext(BitWidth);
3314 break;
3315 }
3316 case ISD::SIGN_EXTEND: {
3317 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3318 // If the sign bit is known to be zero or one, then sext will extend
3319 // it to the top bits, else it will just zext.
3320 Known = Known.sext(BitWidth);
3321 break;
3322 }
3323 case ISD::ANY_EXTEND_VECTOR_INREG: {
3324 EVT InVT = Op.getOperand(0).getValueType();
3325 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3326 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3327 Known = Known.anyext(BitWidth);
3328 break;
3329 }
3330 case ISD::ANY_EXTEND: {
3331 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3332 Known = Known.anyext(BitWidth);
3333 break;
3334 }
3335 case ISD::TRUNCATE: {
3336 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3337 Known = Known.trunc(BitWidth);
3338 break;
3339 }
3340 case ISD::AssertZext: {
3341 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3342 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3343 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3344 Known.Zero |= (~InMask);
3345 Known.One &= (~Known.Zero);
3346 break;
3347 }
3348 case ISD::AssertAlign: {
3349 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
3350 assert(LogOfAlign != 0);
3351 // If a node is guaranteed to be aligned, set low zero bits accordingly as
3352 // well as clearing one bits.
3353 Known.Zero.setLowBits(LogOfAlign);
3354 Known.One.clearLowBits(LogOfAlign);
3355 break;
3356 }
3357 case ISD::FGETSIGN:
3358 // All bits are zero except the low bit.
3359 Known.Zero.setBitsFrom(1);
3360 break;
3361 case ISD::USUBO:
3362 case ISD::SSUBO:
3363 if (Op.getResNo() == 1) {
3364 // If we know the result of a setcc has the top bits zero, use this info.
3365 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3366 TargetLowering::ZeroOrOneBooleanContent &&
3367 BitWidth > 1)
3368 Known.Zero.setBitsFrom(1);
3369 break;
3370 }
3371 LLVM_FALLTHROUGH;
3372 case ISD::SUB:
3373 case ISD::SUBC: {
3374 assert(Op.getResNo() == 0 &&
3375 "We only compute knownbits for the difference here.");
3376
3377 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3378 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3379 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3380 Known, Known2);
3381 break;
3382 }
3383 case ISD::UADDO:
3384 case ISD::SADDO:
3385 case ISD::ADDCARRY:
3386 if (Op.getResNo() == 1) {
3387 // If we know the result of a setcc has the top bits zero, use this info.
3388 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3389 TargetLowering::ZeroOrOneBooleanContent &&
3390 BitWidth > 1)
3391 Known.Zero.setBitsFrom(1);
3392 break;
3393 }
3394 LLVM_FALLTHROUGH;
3395 case ISD::ADD:
3396 case ISD::ADDC:
3397 case ISD::ADDE: {
3398 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
3399
3400 // With ADDE and ADDCARRY, a carry bit may be added in.
3401 KnownBits Carry(1);
3402 if (Opcode == ISD::ADDE)
3403 // Can't track carry from glue, set carry to unknown.
3404 Carry.resetAll();
3405 else if (Opcode == ISD::ADDCARRY)
3406 // TODO: Compute known bits for the carry operand. Not sure if it is worth
3407 // the trouble (how often will we find a known carry bit). And I haven't
3408 // tested this very much yet, but something like this might work:
3409 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3410 // Carry = Carry.zextOrTrunc(1, false);
3411 Carry.resetAll();
3412 else
3413 Carry.setAllZero();
3414
3415 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3416 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3417 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3418 break;
3419 }
3420 case ISD::SREM: {
3421 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3422 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3423 Known = KnownBits::srem(Known, Known2);
3424 break;
3425 }
3426 case ISD::UREM: {
3427 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3428 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3429 Known = KnownBits::urem(Known, Known2);
3430 break;
3431 }
3432 case ISD::EXTRACT_ELEMENT: {
3433 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3434 const unsigned Index = Op.getConstantOperandVal(1);
3435 const unsigned EltBitWidth = Op.getValueSizeInBits();
3436
3437 // Remove low part of known bits mask
3438 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3439 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3440
3441 // Remove high part of known bit mask
3442 Known = Known.trunc(EltBitWidth);
3443 break;
3444 }
3445 case ISD::EXTRACT_VECTOR_ELT: {
3446 SDValue InVec = Op.getOperand(0);
3447 SDValue EltNo = Op.getOperand(1);
3448 EVT VecVT = InVec.getValueType();
3449 // computeKnownBits not yet implemented for scalable vectors.
3450 if (VecVT.isScalableVector())
3451 break;
3452 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3453 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3454
3455 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3456 // anything about the extended bits.
3457 if (BitWidth > EltBitWidth)
3458 Known = Known.trunc(EltBitWidth);
3459
3460 // If we know the element index, just demand that vector element, else for
3461 // an unknown element index, ignore DemandedElts and demand them all.
3462 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
3463 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3464 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3465 DemandedSrcElts =
3466 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3467
3468 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
3469 if (BitWidth > EltBitWidth)
3470 Known = Known.anyext(BitWidth);
3471 break;
3472 }
3473 case ISD::INSERT_VECTOR_ELT: {
3474 // If we know the element index, split the demand between the
3475 // source vector and the inserted element, otherwise assume we need
3476 // the original demanded vector elements and the value.
3477 SDValue InVec = Op.getOperand(0);
3478 SDValue InVal = Op.getOperand(1);
3479 SDValue EltNo = Op.getOperand(2);
3480 bool DemandedVal = true;
3481 APInt DemandedVecElts = DemandedElts;
3482 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3483 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3484 unsigned EltIdx = CEltNo->getZExtValue();
3485 DemandedVal = !!DemandedElts[EltIdx];
3486 DemandedVecElts.clearBit(EltIdx);
3487 }
3488 Known.One.setAllBits();
3489 Known.Zero.setAllBits();
3490 if (DemandedVal) {
3491 Known2 = computeKnownBits(InVal, Depth + 1);
3492 Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth));
3493 }
3494 if (!!DemandedVecElts) {
3495 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
3496 Known = KnownBits::commonBits(Known, Known2);
3497 }
3498 break;
3499 }
3500 case ISD::BITREVERSE: {
3501 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3502 Known = Known2.reverseBits();
3503 break;
3504 }
3505 case ISD::BSWAP: {
3506 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3507 Known = Known2.byteSwap();
3508 break;
3509 }
3510 case ISD::ABS: {
3511 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3512 Known = Known2.abs();
3513 break;
3514 }
3515 case ISD::USUBSAT: {
3516 // The result of usubsat will never be larger than the LHS.
3517 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3518 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
3519 break;
3520 }
3521 case ISD::UMIN: {
3522 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3523 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3524 Known = KnownBits::umin(Known, Known2);
3525 break;
3526 }
3527 case ISD::UMAX: {
3528 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3529 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3530 Known = KnownBits::umax(Known, Known2);
3531 break;
3532 }
3533 case ISD::SMIN:
3534 case ISD::SMAX: {
3535 // If we have a clamp pattern, we know that the number of sign bits will be
3536 // the minimum of the clamp min/max range.
3537 bool IsMax = (Opcode == ISD::SMAX);
3538 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3539 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3540 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3541 CstHigh =
3542 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3543 if (CstLow && CstHigh) {
3544 if (!IsMax)
3545 std::swap(CstLow, CstHigh);
3546
3547 const APInt &ValueLow = CstLow->getAPIntValue();
3548 const APInt &ValueHigh = CstHigh->getAPIntValue();
3549 if (ValueLow.sle(ValueHigh)) {
3550 unsigned LowSignBits = ValueLow.getNumSignBits();
3551 unsigned HighSignBits = ValueHigh.getNumSignBits();
3552 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3553 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3554 Known.One.setHighBits(MinSignBits);
3555 break;
3556 }
3557 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3558 Known.Zero.setHighBits(MinSignBits);
3559 break;
3560 }
3561 }
3562 }
3563
3564 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3565 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3566 if (IsMax)
3567 Known = KnownBits::smax(Known, Known2);
3568 else
3569 Known = KnownBits::smin(Known, Known2);
3570 break;
3571 }
3572 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
3573 if (Op.getResNo() == 1) {
3574 // The boolean result conforms to getBooleanContents.
3575 // If we know the result of a setcc has the top bits zero, use this info.
3576 // We know that we have an integer-based boolean since these operations
3577 // are only available for integer.
3578 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3579 TargetLowering::ZeroOrOneBooleanContent &&
3580 BitWidth > 1)
3581 Known.Zero.setBitsFrom(1);
3582 break;
3583 }
3584 LLVM_FALLTHROUGH;
3585 case ISD::ATOMIC_CMP_SWAP:
3586 case ISD::ATOMIC_SWAP:
3587 case ISD::ATOMIC_LOAD_ADD:
3588 case ISD::ATOMIC_LOAD_SUB:
3589 case ISD::ATOMIC_LOAD_AND:
3590 case ISD::ATOMIC_LOAD_CLR:
3591 case ISD::ATOMIC_LOAD_OR:
3592 case ISD::ATOMIC_LOAD_XOR:
3593 case ISD::ATOMIC_LOAD_NAND:
3594 case ISD::ATOMIC_LOAD_MIN:
3595 case ISD::ATOMIC_LOAD_MAX:
3596 case ISD::ATOMIC_LOAD_UMIN:
3597 case ISD::ATOMIC_LOAD_UMAX:
3598 case ISD::ATOMIC_LOAD: {
3599 unsigned MemBits =
3600 cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
3601 // If we are looking at the loaded value.
3602 if (Op.getResNo() == 0) {
3603 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
3604 Known.Zero.setBitsFrom(MemBits);
3605 }
3606 break;
3607 }
3608 case ISD::FrameIndex:
3609 case ISD::TargetFrameIndex:
3610 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
3611 Known, getMachineFunction());
3612 break;
3613
3614 default:
3615 if (Opcode < ISD::BUILTIN_OP_END)
3616 break;
3617 LLVM_FALLTHROUGH;
3618 case ISD::INTRINSIC_WO_CHAIN:
3619 case ISD::INTRINSIC_W_CHAIN:
3620 case ISD::INTRINSIC_VOID:
3621 // Allow the target to implement this method for its nodes.
3622 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3623 break;
3624 }
3625
3626 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3627 return Known;
3628 }
3629
computeOverflowKind(SDValue N0,SDValue N1) const3630 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3631 SDValue N1) const {
3632 // X + 0 never overflow
3633 if (isNullConstant(N1))
3634 return OFK_Never;
3635
3636 KnownBits N1Known = computeKnownBits(N1);
3637 if (N1Known.Zero.getBoolValue()) {
3638 KnownBits N0Known = computeKnownBits(N0);
3639
3640 bool overflow;
3641 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow);
3642 if (!overflow)
3643 return OFK_Never;
3644 }
3645
3646 // mulhi + 1 never overflow
3647 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3648 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue())
3649 return OFK_Never;
3650
3651 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3652 KnownBits N0Known = computeKnownBits(N0);
3653
3654 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue())
3655 return OFK_Never;
3656 }
3657
3658 return OFK_Sometime;
3659 }
3660
isKnownToBeAPowerOfTwo(SDValue Val) const3661 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3662 EVT OpVT = Val.getValueType();
3663 unsigned BitWidth = OpVT.getScalarSizeInBits();
3664
3665 // Is the constant a known power of 2?
3666 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3667 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3668
3669 // A left-shift of a constant one will have exactly one bit set because
3670 // shifting the bit off the end is undefined.
3671 if (Val.getOpcode() == ISD::SHL) {
3672 auto *C = isConstOrConstSplat(Val.getOperand(0));
3673 if (C && C->getAPIntValue() == 1)
3674 return true;
3675 }
3676
3677 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3678 // one bit set.
3679 if (Val.getOpcode() == ISD::SRL) {
3680 auto *C = isConstOrConstSplat(Val.getOperand(0));
3681 if (C && C->getAPIntValue().isSignMask())
3682 return true;
3683 }
3684
3685 // Are all operands of a build vector constant powers of two?
3686 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3687 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3688 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3689 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3690 return false;
3691 }))
3692 return true;
3693
3694 // Is the operand of a splat vector a constant power of two?
3695 if (Val.getOpcode() == ISD::SPLAT_VECTOR)
3696 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val->getOperand(0)))
3697 if (C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2())
3698 return true;
3699
3700 // More could be done here, though the above checks are enough
3701 // to handle some common cases.
3702
3703 // Fall back to computeKnownBits to catch other known cases.
3704 KnownBits Known = computeKnownBits(Val);
3705 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3706 }
3707
ComputeNumSignBits(SDValue Op,unsigned Depth) const3708 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3709 EVT VT = Op.getValueType();
3710
3711 // TODO: Assume we don't know anything for now.
3712 if (VT.isScalableVector())
3713 return 1;
3714
3715 APInt DemandedElts = VT.isVector()
3716 ? APInt::getAllOnes(VT.getVectorNumElements())
3717 : APInt(1, 1);
3718 return ComputeNumSignBits(Op, DemandedElts, Depth);
3719 }
3720
ComputeNumSignBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const3721 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3722 unsigned Depth) const {
3723 EVT VT = Op.getValueType();
3724 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3725 unsigned VTBits = VT.getScalarSizeInBits();
3726 unsigned NumElts = DemandedElts.getBitWidth();
3727 unsigned Tmp, Tmp2;
3728 unsigned FirstAnswer = 1;
3729
3730 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3731 const APInt &Val = C->getAPIntValue();
3732 return Val.getNumSignBits();
3733 }
3734
3735 if (Depth >= MaxRecursionDepth)
3736 return 1; // Limit search depth.
3737
3738 if (!DemandedElts || VT.isScalableVector())
3739 return 1; // No demanded elts, better to assume we don't know anything.
3740
3741 unsigned Opcode = Op.getOpcode();
3742 switch (Opcode) {
3743 default: break;
3744 case ISD::AssertSext:
3745 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3746 return VTBits-Tmp+1;
3747 case ISD::AssertZext:
3748 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3749 return VTBits-Tmp;
3750
3751 case ISD::BUILD_VECTOR:
3752 Tmp = VTBits;
3753 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3754 if (!DemandedElts[i])
3755 continue;
3756
3757 SDValue SrcOp = Op.getOperand(i);
3758 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
3759
3760 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3761 if (SrcOp.getValueSizeInBits() != VTBits) {
3762 assert(SrcOp.getValueSizeInBits() > VTBits &&
3763 "Expected BUILD_VECTOR implicit truncation");
3764 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3765 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3766 }
3767 Tmp = std::min(Tmp, Tmp2);
3768 }
3769 return Tmp;
3770
3771 case ISD::VECTOR_SHUFFLE: {
3772 // Collect the minimum number of sign bits that are shared by every vector
3773 // element referenced by the shuffle.
3774 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3775 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3776 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3777 for (unsigned i = 0; i != NumElts; ++i) {
3778 int M = SVN->getMaskElt(i);
3779 if (!DemandedElts[i])
3780 continue;
3781 // For UNDEF elements, we don't know anything about the common state of
3782 // the shuffle result.
3783 if (M < 0)
3784 return 1;
3785 if ((unsigned)M < NumElts)
3786 DemandedLHS.setBit((unsigned)M % NumElts);
3787 else
3788 DemandedRHS.setBit((unsigned)M % NumElts);
3789 }
3790 Tmp = std::numeric_limits<unsigned>::max();
3791 if (!!DemandedLHS)
3792 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3793 if (!!DemandedRHS) {
3794 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3795 Tmp = std::min(Tmp, Tmp2);
3796 }
3797 // If we don't know anything, early out and try computeKnownBits fall-back.
3798 if (Tmp == 1)
3799 break;
3800 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3801 return Tmp;
3802 }
3803
3804 case ISD::BITCAST: {
3805 SDValue N0 = Op.getOperand(0);
3806 EVT SrcVT = N0.getValueType();
3807 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3808
3809 // Ignore bitcasts from unsupported types..
3810 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3811 break;
3812
3813 // Fast handling of 'identity' bitcasts.
3814 if (VTBits == SrcBits)
3815 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3816
3817 bool IsLE = getDataLayout().isLittleEndian();
3818
3819 // Bitcast 'large element' scalar/vector to 'small element' vector.
3820 if ((SrcBits % VTBits) == 0) {
3821 assert(VT.isVector() && "Expected bitcast to vector");
3822
3823 unsigned Scale = SrcBits / VTBits;
3824 APInt SrcDemandedElts =
3825 APIntOps::ScaleBitMask(DemandedElts, NumElts / Scale);
3826
3827 // Fast case - sign splat can be simply split across the small elements.
3828 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3829 if (Tmp == SrcBits)
3830 return VTBits;
3831
3832 // Slow case - determine how far the sign extends into each sub-element.
3833 Tmp2 = VTBits;
3834 for (unsigned i = 0; i != NumElts; ++i)
3835 if (DemandedElts[i]) {
3836 unsigned SubOffset = i % Scale;
3837 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3838 SubOffset = SubOffset * VTBits;
3839 if (Tmp <= SubOffset)
3840 return 1;
3841 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3842 }
3843 return Tmp2;
3844 }
3845 break;
3846 }
3847
3848 case ISD::SIGN_EXTEND:
3849 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3850 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3851 case ISD::SIGN_EXTEND_INREG:
3852 // Max of the input and what this extends.
3853 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3854 Tmp = VTBits-Tmp+1;
3855 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3856 return std::max(Tmp, Tmp2);
3857 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3858 SDValue Src = Op.getOperand(0);
3859 EVT SrcVT = Src.getValueType();
3860 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3861 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3862 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3863 }
3864 case ISD::SRA:
3865 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3866 // SRA X, C -> adds C sign bits.
3867 if (const APInt *ShAmt =
3868 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3869 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3870 return Tmp;
3871 case ISD::SHL:
3872 if (const APInt *ShAmt =
3873 getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
3874 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
3875 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3876 if (ShAmt->ult(Tmp))
3877 return Tmp - ShAmt->getZExtValue();
3878 }
3879 break;
3880 case ISD::AND:
3881 case ISD::OR:
3882 case ISD::XOR: // NOT is handled here.
3883 // Logical binary ops preserve the number of sign bits at the worst.
3884 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3885 if (Tmp != 1) {
3886 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3887 FirstAnswer = std::min(Tmp, Tmp2);
3888 // We computed what we know about the sign bits as our first
3889 // answer. Now proceed to the generic code that uses
3890 // computeKnownBits, and pick whichever answer is better.
3891 }
3892 break;
3893
3894 case ISD::SELECT:
3895 case ISD::VSELECT:
3896 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3897 if (Tmp == 1) return 1; // Early out.
3898 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3899 return std::min(Tmp, Tmp2);
3900 case ISD::SELECT_CC:
3901 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3902 if (Tmp == 1) return 1; // Early out.
3903 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3904 return std::min(Tmp, Tmp2);
3905
3906 case ISD::SMIN:
3907 case ISD::SMAX: {
3908 // If we have a clamp pattern, we know that the number of sign bits will be
3909 // the minimum of the clamp min/max range.
3910 bool IsMax = (Opcode == ISD::SMAX);
3911 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3912 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3913 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3914 CstHigh =
3915 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3916 if (CstLow && CstHigh) {
3917 if (!IsMax)
3918 std::swap(CstLow, CstHigh);
3919 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3920 Tmp = CstLow->getAPIntValue().getNumSignBits();
3921 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3922 return std::min(Tmp, Tmp2);
3923 }
3924 }
3925
3926 // Fallback - just get the minimum number of sign bits of the operands.
3927 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3928 if (Tmp == 1)
3929 return 1; // Early out.
3930 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3931 return std::min(Tmp, Tmp2);
3932 }
3933 case ISD::UMIN:
3934 case ISD::UMAX:
3935 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3936 if (Tmp == 1)
3937 return 1; // Early out.
3938 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3939 return std::min(Tmp, Tmp2);
3940 case ISD::SADDO:
3941 case ISD::UADDO:
3942 case ISD::SSUBO:
3943 case ISD::USUBO:
3944 case ISD::SMULO:
3945 case ISD::UMULO:
3946 if (Op.getResNo() != 1)
3947 break;
3948 // The boolean result conforms to getBooleanContents. Fall through.
3949 // If setcc returns 0/-1, all bits are sign bits.
3950 // We know that we have an integer-based boolean since these operations
3951 // are only available for integer.
3952 if (TLI->getBooleanContents(VT.isVector(), false) ==
3953 TargetLowering::ZeroOrNegativeOneBooleanContent)
3954 return VTBits;
3955 break;
3956 case ISD::SETCC:
3957 case ISD::STRICT_FSETCC:
3958 case ISD::STRICT_FSETCCS: {
3959 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3960 // If setcc returns 0/-1, all bits are sign bits.
3961 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3962 TargetLowering::ZeroOrNegativeOneBooleanContent)
3963 return VTBits;
3964 break;
3965 }
3966 case ISD::ROTL:
3967 case ISD::ROTR:
3968 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3969
3970 // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
3971 if (Tmp == VTBits)
3972 return VTBits;
3973
3974 if (ConstantSDNode *C =
3975 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
3976 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3977
3978 // Handle rotate right by N like a rotate left by 32-N.
3979 if (Opcode == ISD::ROTR)
3980 RotAmt = (VTBits - RotAmt) % VTBits;
3981
3982 // If we aren't rotating out all of the known-in sign bits, return the
3983 // number that are left. This handles rotl(sext(x), 1) for example.
3984 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3985 }
3986 break;
3987 case ISD::ADD:
3988 case ISD::ADDC:
3989 // Add can have at most one carry bit. Thus we know that the output
3990 // is, at worst, one more bit than the inputs.
3991 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3992 if (Tmp == 1) return 1; // Early out.
3993
3994 // Special case decrementing a value (ADD X, -1):
3995 if (ConstantSDNode *CRHS =
3996 isConstOrConstSplat(Op.getOperand(1), DemandedElts))
3997 if (CRHS->isAllOnes()) {
3998 KnownBits Known =
3999 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
4000
4001 // If the input is known to be 0 or 1, the output is 0/-1, which is all
4002 // sign bits set.
4003 if ((Known.Zero | 1).isAllOnes())
4004 return VTBits;
4005
4006 // If we are subtracting one from a positive number, there is no carry
4007 // out of the result.
4008 if (Known.isNonNegative())
4009 return Tmp;
4010 }
4011
4012 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4013 if (Tmp2 == 1) return 1; // Early out.
4014 return std::min(Tmp, Tmp2) - 1;
4015 case ISD::SUB:
4016 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
4017 if (Tmp2 == 1) return 1; // Early out.
4018
4019 // Handle NEG.
4020 if (ConstantSDNode *CLHS =
4021 isConstOrConstSplat(Op.getOperand(0), DemandedElts))
4022 if (CLHS->isZero()) {
4023 KnownBits Known =
4024 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
4025 // If the input is known to be 0 or 1, the output is 0/-1, which is all
4026 // sign bits set.
4027 if ((Known.Zero | 1).isAllOnes())
4028 return VTBits;
4029
4030 // If the input is known to be positive (the sign bit is known clear),
4031 // the output of the NEG has the same number of sign bits as the input.
4032 if (Known.isNonNegative())
4033 return Tmp2;
4034
4035 // Otherwise, we treat this like a SUB.
4036 }
4037
4038 // Sub can have at most one carry bit. Thus we know that the output
4039 // is, at worst, one more bit than the inputs.
4040 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4041 if (Tmp == 1) return 1; // Early out.
4042 return std::min(Tmp, Tmp2) - 1;
4043 case ISD::MUL: {
4044 // The output of the Mul can be at most twice the valid bits in the inputs.
4045 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4046 if (SignBitsOp0 == 1)
4047 break;
4048 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
4049 if (SignBitsOp1 == 1)
4050 break;
4051 unsigned OutValidBits =
4052 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
4053 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
4054 }
4055 case ISD::SREM:
4056 // The sign bit is the LHS's sign bit, except when the result of the
4057 // remainder is zero. The magnitude of the result should be less than or
4058 // equal to the magnitude of the LHS. Therefore, the result should have
4059 // at least as many sign bits as the left hand side.
4060 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4061 case ISD::TRUNCATE: {
4062 // Check if the sign bits of source go down as far as the truncated value.
4063 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
4064 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4065 if (NumSrcSignBits > (NumSrcBits - VTBits))
4066 return NumSrcSignBits - (NumSrcBits - VTBits);
4067 break;
4068 }
4069 case ISD::EXTRACT_ELEMENT: {
4070 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
4071 const int BitWidth = Op.getValueSizeInBits();
4072 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
4073
4074 // Get reverse index (starting from 1), Op1 value indexes elements from
4075 // little end. Sign starts at big end.
4076 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
4077
4078 // If the sign portion ends in our element the subtraction gives correct
4079 // result. Otherwise it gives either negative or > bitwidth result
4080 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
4081 }
4082 case ISD::INSERT_VECTOR_ELT: {
4083 // If we know the element index, split the demand between the
4084 // source vector and the inserted element, otherwise assume we need
4085 // the original demanded vector elements and the value.
4086 SDValue InVec = Op.getOperand(0);
4087 SDValue InVal = Op.getOperand(1);
4088 SDValue EltNo = Op.getOperand(2);
4089 bool DemandedVal = true;
4090 APInt DemandedVecElts = DemandedElts;
4091 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4092 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4093 unsigned EltIdx = CEltNo->getZExtValue();
4094 DemandedVal = !!DemandedElts[EltIdx];
4095 DemandedVecElts.clearBit(EltIdx);
4096 }
4097 Tmp = std::numeric_limits<unsigned>::max();
4098 if (DemandedVal) {
4099 // TODO - handle implicit truncation of inserted elements.
4100 if (InVal.getScalarValueSizeInBits() != VTBits)
4101 break;
4102 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
4103 Tmp = std::min(Tmp, Tmp2);
4104 }
4105 if (!!DemandedVecElts) {
4106 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
4107 Tmp = std::min(Tmp, Tmp2);
4108 }
4109 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4110 return Tmp;
4111 }
4112 case ISD::EXTRACT_VECTOR_ELT: {
4113 SDValue InVec = Op.getOperand(0);
4114 SDValue EltNo = Op.getOperand(1);
4115 EVT VecVT = InVec.getValueType();
4116 // ComputeNumSignBits not yet implemented for scalable vectors.
4117 if (VecVT.isScalableVector())
4118 break;
4119 const unsigned BitWidth = Op.getValueSizeInBits();
4120 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
4121 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4122
4123 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
4124 // anything about sign bits. But if the sizes match we can derive knowledge
4125 // about sign bits from the vector operand.
4126 if (BitWidth != EltBitWidth)
4127 break;
4128
4129 // If we know the element index, just demand that vector element, else for
4130 // an unknown element index, ignore DemandedElts and demand them all.
4131 APInt DemandedSrcElts = APInt::getAllOnes(NumSrcElts);
4132 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4133 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4134 DemandedSrcElts =
4135 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
4136
4137 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
4138 }
4139 case ISD::EXTRACT_SUBVECTOR: {
4140 // Offset the demanded elts by the subvector index.
4141 SDValue Src = Op.getOperand(0);
4142 // Bail until we can represent demanded elements for scalable vectors.
4143 if (Src.getValueType().isScalableVector())
4144 break;
4145 uint64_t Idx = Op.getConstantOperandVal(1);
4146 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
4147 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
4148 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4149 }
4150 case ISD::CONCAT_VECTORS: {
4151 // Determine the minimum number of sign bits across all demanded
4152 // elts of the input vectors. Early out if the result is already 1.
4153 Tmp = std::numeric_limits<unsigned>::max();
4154 EVT SubVectorVT = Op.getOperand(0).getValueType();
4155 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
4156 unsigned NumSubVectors = Op.getNumOperands();
4157 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
4158 APInt DemandedSub =
4159 DemandedElts.extractBits(NumSubVectorElts, i * NumSubVectorElts);
4160 if (!DemandedSub)
4161 continue;
4162 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
4163 Tmp = std::min(Tmp, Tmp2);
4164 }
4165 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4166 return Tmp;
4167 }
4168 case ISD::INSERT_SUBVECTOR: {
4169 // Demand any elements from the subvector and the remainder from the src its
4170 // inserted into.
4171 SDValue Src = Op.getOperand(0);
4172 SDValue Sub = Op.getOperand(1);
4173 uint64_t Idx = Op.getConstantOperandVal(2);
4174 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
4175 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
4176 APInt DemandedSrcElts = DemandedElts;
4177 DemandedSrcElts.insertBits(APInt::getZero(NumSubElts), Idx);
4178
4179 Tmp = std::numeric_limits<unsigned>::max();
4180 if (!!DemandedSubElts) {
4181 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
4182 if (Tmp == 1)
4183 return 1; // early-out
4184 }
4185 if (!!DemandedSrcElts) {
4186 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4187 Tmp = std::min(Tmp, Tmp2);
4188 }
4189 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4190 return Tmp;
4191 }
4192 case ISD::ATOMIC_CMP_SWAP:
4193 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
4194 case ISD::ATOMIC_SWAP:
4195 case ISD::ATOMIC_LOAD_ADD:
4196 case ISD::ATOMIC_LOAD_SUB:
4197 case ISD::ATOMIC_LOAD_AND:
4198 case ISD::ATOMIC_LOAD_CLR:
4199 case ISD::ATOMIC_LOAD_OR:
4200 case ISD::ATOMIC_LOAD_XOR:
4201 case ISD::ATOMIC_LOAD_NAND:
4202 case ISD::ATOMIC_LOAD_MIN:
4203 case ISD::ATOMIC_LOAD_MAX:
4204 case ISD::ATOMIC_LOAD_UMIN:
4205 case ISD::ATOMIC_LOAD_UMAX:
4206 case ISD::ATOMIC_LOAD: {
4207 Tmp = cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
4208 // If we are looking at the loaded value.
4209 if (Op.getResNo() == 0) {
4210 if (Tmp == VTBits)
4211 return 1; // early-out
4212 if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND)
4213 return VTBits - Tmp + 1;
4214 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
4215 return VTBits - Tmp;
4216 }
4217 break;
4218 }
4219 }
4220
4221 // If we are looking at the loaded value of the SDNode.
4222 if (Op.getResNo() == 0) {
4223 // Handle LOADX separately here. EXTLOAD case will fallthrough.
4224 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
4225 unsigned ExtType = LD->getExtensionType();
4226 switch (ExtType) {
4227 default: break;
4228 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
4229 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4230 return VTBits - Tmp + 1;
4231 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
4232 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4233 return VTBits - Tmp;
4234 case ISD::NON_EXTLOAD:
4235 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
4236 // We only need to handle vectors - computeKnownBits should handle
4237 // scalar cases.
4238 Type *CstTy = Cst->getType();
4239 if (CstTy->isVectorTy() &&
4240 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
4241 Tmp = VTBits;
4242 for (unsigned i = 0; i != NumElts; ++i) {
4243 if (!DemandedElts[i])
4244 continue;
4245 if (Constant *Elt = Cst->getAggregateElement(i)) {
4246 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4247 const APInt &Value = CInt->getValue();
4248 Tmp = std::min(Tmp, Value.getNumSignBits());
4249 continue;
4250 }
4251 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4252 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4253 Tmp = std::min(Tmp, Value.getNumSignBits());
4254 continue;
4255 }
4256 }
4257 // Unknown type. Conservatively assume no bits match sign bit.
4258 return 1;
4259 }
4260 return Tmp;
4261 }
4262 }
4263 break;
4264 }
4265 }
4266 }
4267
4268 // Allow the target to implement this method for its nodes.
4269 if (Opcode >= ISD::BUILTIN_OP_END ||
4270 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4271 Opcode == ISD::INTRINSIC_W_CHAIN ||
4272 Opcode == ISD::INTRINSIC_VOID) {
4273 unsigned NumBits =
4274 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
4275 if (NumBits > 1)
4276 FirstAnswer = std::max(FirstAnswer, NumBits);
4277 }
4278
4279 // Finally, if we can prove that the top bits of the result are 0's or 1's,
4280 // use this information.
4281 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4282
4283 APInt Mask;
4284 if (Known.isNonNegative()) { // sign bit is 0
4285 Mask = Known.Zero;
4286 } else if (Known.isNegative()) { // sign bit is 1;
4287 Mask = Known.One;
4288 } else {
4289 // Nothing known.
4290 return FirstAnswer;
4291 }
4292
4293 // Okay, we know that the sign bit in Mask is set. Use CLO to determine
4294 // the number of identical bits in the top of the input value.
4295 Mask <<= Mask.getBitWidth()-VTBits;
4296 return std::max(FirstAnswer, Mask.countLeadingOnes());
4297 }
4298
isGuaranteedNotToBeUndefOrPoison(SDValue Op,bool PoisonOnly,unsigned Depth) const4299 bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly,
4300 unsigned Depth) const {
4301 // Early out for FREEZE.
4302 if (Op.getOpcode() == ISD::FREEZE)
4303 return true;
4304
4305 // TODO: Assume we don't know anything for now.
4306 EVT VT = Op.getValueType();
4307 if (VT.isScalableVector())
4308 return false;
4309
4310 APInt DemandedElts = VT.isVector()
4311 ? APInt::getAllOnes(VT.getVectorNumElements())
4312 : APInt(1, 1);
4313 return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts, PoisonOnly, Depth);
4314 }
4315
isGuaranteedNotToBeUndefOrPoison(SDValue Op,const APInt & DemandedElts,bool PoisonOnly,unsigned Depth) const4316 bool SelectionDAG::isGuaranteedNotToBeUndefOrPoison(SDValue Op,
4317 const APInt &DemandedElts,
4318 bool PoisonOnly,
4319 unsigned Depth) const {
4320 unsigned Opcode = Op.getOpcode();
4321
4322 // Early out for FREEZE.
4323 if (Opcode == ISD::FREEZE)
4324 return true;
4325
4326 if (Depth >= MaxRecursionDepth)
4327 return false; // Limit search depth.
4328
4329 if (isIntOrFPConstant(Op))
4330 return true;
4331
4332 switch (Opcode) {
4333 case ISD::UNDEF:
4334 return PoisonOnly;
4335
4336 case ISD::BUILD_VECTOR:
4337 // NOTE: BUILD_VECTOR has implicit truncation of wider scalar elements -
4338 // this shouldn't affect the result.
4339 for (unsigned i = 0, e = Op.getNumOperands(); i < e; ++i) {
4340 if (!DemandedElts[i])
4341 continue;
4342 if (!isGuaranteedNotToBeUndefOrPoison(Op.getOperand(i), PoisonOnly,
4343 Depth + 1))
4344 return false;
4345 }
4346 return true;
4347
4348 // TODO: Search for noundef attributes from library functions.
4349
4350 // TODO: Pointers dereferenced by ISD::LOAD/STORE ops are noundef.
4351
4352 default:
4353 // Allow the target to implement this method for its nodes.
4354 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::INTRINSIC_WO_CHAIN ||
4355 Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID)
4356 return TLI->isGuaranteedNotToBeUndefOrPoisonForTargetNode(
4357 Op, DemandedElts, *this, PoisonOnly, Depth);
4358 break;
4359 }
4360
4361 return false;
4362 }
4363
isBaseWithConstantOffset(SDValue Op) const4364 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
4365 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
4366 !isa<ConstantSDNode>(Op.getOperand(1)))
4367 return false;
4368
4369 if (Op.getOpcode() == ISD::OR &&
4370 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
4371 return false;
4372
4373 return true;
4374 }
4375
isKnownNeverNaN(SDValue Op,bool SNaN,unsigned Depth) const4376 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
4377 // If we're told that NaNs won't happen, assume they won't.
4378 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
4379 return true;
4380
4381 if (Depth >= MaxRecursionDepth)
4382 return false; // Limit search depth.
4383
4384 // TODO: Handle vectors.
4385 // If the value is a constant, we can obviously see if it is a NaN or not.
4386 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
4387 return !C->getValueAPF().isNaN() ||
4388 (SNaN && !C->getValueAPF().isSignaling());
4389 }
4390
4391 unsigned Opcode = Op.getOpcode();
4392 switch (Opcode) {
4393 case ISD::FADD:
4394 case ISD::FSUB:
4395 case ISD::FMUL:
4396 case ISD::FDIV:
4397 case ISD::FREM:
4398 case ISD::FSIN:
4399 case ISD::FCOS: {
4400 if (SNaN)
4401 return true;
4402 // TODO: Need isKnownNeverInfinity
4403 return false;
4404 }
4405 case ISD::FCANONICALIZE:
4406 case ISD::FEXP:
4407 case ISD::FEXP2:
4408 case ISD::FTRUNC:
4409 case ISD::FFLOOR:
4410 case ISD::FCEIL:
4411 case ISD::FROUND:
4412 case ISD::FROUNDEVEN:
4413 case ISD::FRINT:
4414 case ISD::FNEARBYINT: {
4415 if (SNaN)
4416 return true;
4417 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4418 }
4419 case ISD::FABS:
4420 case ISD::FNEG:
4421 case ISD::FCOPYSIGN: {
4422 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4423 }
4424 case ISD::SELECT:
4425 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4426 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4427 case ISD::FP_EXTEND:
4428 case ISD::FP_ROUND: {
4429 if (SNaN)
4430 return true;
4431 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4432 }
4433 case ISD::SINT_TO_FP:
4434 case ISD::UINT_TO_FP:
4435 return true;
4436 case ISD::FMA:
4437 case ISD::FMAD: {
4438 if (SNaN)
4439 return true;
4440 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4441 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4442 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4443 }
4444 case ISD::FSQRT: // Need is known positive
4445 case ISD::FLOG:
4446 case ISD::FLOG2:
4447 case ISD::FLOG10:
4448 case ISD::FPOWI:
4449 case ISD::FPOW: {
4450 if (SNaN)
4451 return true;
4452 // TODO: Refine on operand
4453 return false;
4454 }
4455 case ISD::FMINNUM:
4456 case ISD::FMAXNUM: {
4457 // Only one needs to be known not-nan, since it will be returned if the
4458 // other ends up being one.
4459 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4460 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4461 }
4462 case ISD::FMINNUM_IEEE:
4463 case ISD::FMAXNUM_IEEE: {
4464 if (SNaN)
4465 return true;
4466 // This can return a NaN if either operand is an sNaN, or if both operands
4467 // are NaN.
4468 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4469 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4470 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4471 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4472 }
4473 case ISD::FMINIMUM:
4474 case ISD::FMAXIMUM: {
4475 // TODO: Does this quiet or return the origina NaN as-is?
4476 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4477 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4478 }
4479 case ISD::EXTRACT_VECTOR_ELT: {
4480 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4481 }
4482 default:
4483 if (Opcode >= ISD::BUILTIN_OP_END ||
4484 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4485 Opcode == ISD::INTRINSIC_W_CHAIN ||
4486 Opcode == ISD::INTRINSIC_VOID) {
4487 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4488 }
4489
4490 return false;
4491 }
4492 }
4493
isKnownNeverZeroFloat(SDValue Op) const4494 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4495 assert(Op.getValueType().isFloatingPoint() &&
4496 "Floating point type expected");
4497
4498 // If the value is a constant, we can obviously see if it is a zero or not.
4499 // TODO: Add BuildVector support.
4500 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4501 return !C->isZero();
4502 return false;
4503 }
4504
isKnownNeverZero(SDValue Op) const4505 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4506 assert(!Op.getValueType().isFloatingPoint() &&
4507 "Floating point types unsupported - use isKnownNeverZeroFloat");
4508
4509 // If the value is a constant, we can obviously see if it is a zero or not.
4510 if (ISD::matchUnaryPredicate(Op,
4511 [](ConstantSDNode *C) { return !C->isZero(); }))
4512 return true;
4513
4514 // TODO: Recognize more cases here.
4515 switch (Op.getOpcode()) {
4516 default: break;
4517 case ISD::OR:
4518 if (isKnownNeverZero(Op.getOperand(1)) ||
4519 isKnownNeverZero(Op.getOperand(0)))
4520 return true;
4521 break;
4522 }
4523
4524 return false;
4525 }
4526
isEqualTo(SDValue A,SDValue B) const4527 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4528 // Check the obvious case.
4529 if (A == B) return true;
4530
4531 // For for negative and positive zero.
4532 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4533 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4534 if (CA->isZero() && CB->isZero()) return true;
4535
4536 // Otherwise they may not be equal.
4537 return false;
4538 }
4539
4540 // FIXME: unify with llvm::haveNoCommonBitsSet.
4541 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
haveNoCommonBitsSet(SDValue A,SDValue B) const4542 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4543 assert(A.getValueType() == B.getValueType() &&
4544 "Values must have the same type");
4545 return KnownBits::haveNoCommonBitsSet(computeKnownBits(A),
4546 computeKnownBits(B));
4547 }
4548
FoldSTEP_VECTOR(const SDLoc & DL,EVT VT,SDValue Step,SelectionDAG & DAG)4549 static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step,
4550 SelectionDAG &DAG) {
4551 if (cast<ConstantSDNode>(Step)->isZero())
4552 return DAG.getConstant(0, DL, VT);
4553
4554 return SDValue();
4555 }
4556
FoldBUILD_VECTOR(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4557 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4558 ArrayRef<SDValue> Ops,
4559 SelectionDAG &DAG) {
4560 int NumOps = Ops.size();
4561 assert(NumOps != 0 && "Can't build an empty vector!");
4562 assert(!VT.isScalableVector() &&
4563 "BUILD_VECTOR cannot be used with scalable types");
4564 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
4565 "Incorrect element count in BUILD_VECTOR!");
4566
4567 // BUILD_VECTOR of UNDEFs is UNDEF.
4568 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4569 return DAG.getUNDEF(VT);
4570
4571 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4572 SDValue IdentitySrc;
4573 bool IsIdentity = true;
4574 for (int i = 0; i != NumOps; ++i) {
4575 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4576 Ops[i].getOperand(0).getValueType() != VT ||
4577 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4578 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4579 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4580 IsIdentity = false;
4581 break;
4582 }
4583 IdentitySrc = Ops[i].getOperand(0);
4584 }
4585 if (IsIdentity)
4586 return IdentitySrc;
4587
4588 return SDValue();
4589 }
4590
4591 /// Try to simplify vector concatenation to an input value, undef, or build
4592 /// vector.
foldCONCAT_VECTORS(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4593 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4594 ArrayRef<SDValue> Ops,
4595 SelectionDAG &DAG) {
4596 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
4597 assert(llvm::all_of(Ops,
4598 [Ops](SDValue Op) {
4599 return Ops[0].getValueType() == Op.getValueType();
4600 }) &&
4601 "Concatenation of vectors with inconsistent value types!");
4602 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==
4603 VT.getVectorElementCount() &&
4604 "Incorrect element count in vector concatenation!");
4605
4606 if (Ops.size() == 1)
4607 return Ops[0];
4608
4609 // Concat of UNDEFs is UNDEF.
4610 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4611 return DAG.getUNDEF(VT);
4612
4613 // Scan the operands and look for extract operations from a single source
4614 // that correspond to insertion at the same location via this concatenation:
4615 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4616 SDValue IdentitySrc;
4617 bool IsIdentity = true;
4618 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4619 SDValue Op = Ops[i];
4620 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
4621 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4622 Op.getOperand(0).getValueType() != VT ||
4623 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4624 Op.getConstantOperandVal(1) != IdentityIndex) {
4625 IsIdentity = false;
4626 break;
4627 }
4628 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
4629 "Unexpected identity source vector for concat of extracts");
4630 IdentitySrc = Op.getOperand(0);
4631 }
4632 if (IsIdentity) {
4633 assert(IdentitySrc && "Failed to set source vector of extracts");
4634 return IdentitySrc;
4635 }
4636
4637 // The code below this point is only designed to work for fixed width
4638 // vectors, so we bail out for now.
4639 if (VT.isScalableVector())
4640 return SDValue();
4641
4642 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4643 // simplified to one big BUILD_VECTOR.
4644 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4645 EVT SVT = VT.getScalarType();
4646 SmallVector<SDValue, 16> Elts;
4647 for (SDValue Op : Ops) {
4648 EVT OpVT = Op.getValueType();
4649 if (Op.isUndef())
4650 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4651 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4652 Elts.append(Op->op_begin(), Op->op_end());
4653 else
4654 return SDValue();
4655 }
4656
4657 // BUILD_VECTOR requires all inputs to be of the same type, find the
4658 // maximum type and extend them all.
4659 for (SDValue Op : Elts)
4660 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4661
4662 if (SVT.bitsGT(VT.getScalarType())) {
4663 for (SDValue &Op : Elts) {
4664 if (Op.isUndef())
4665 Op = DAG.getUNDEF(SVT);
4666 else
4667 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4668 ? DAG.getZExtOrTrunc(Op, DL, SVT)
4669 : DAG.getSExtOrTrunc(Op, DL, SVT);
4670 }
4671 }
4672
4673 SDValue V = DAG.getBuildVector(VT, DL, Elts);
4674 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4675 return V;
4676 }
4677
4678 /// Gets or creates the specified node.
getNode(unsigned Opcode,const SDLoc & DL,EVT VT)4679 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4680 FoldingSetNodeID ID;
4681 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4682 void *IP = nullptr;
4683 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4684 return SDValue(E, 0);
4685
4686 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4687 getVTList(VT));
4688 CSEMap.InsertNode(N, IP);
4689
4690 InsertNode(N);
4691 SDValue V = SDValue(N, 0);
4692 NewSDValueDbgMsg(V, "Creating new node: ", this);
4693 return V;
4694 }
4695
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue Operand)4696 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4697 SDValue Operand) {
4698 SDNodeFlags Flags;
4699 if (Inserter)
4700 Flags = Inserter->getFlags();
4701 return getNode(Opcode, DL, VT, Operand, Flags);
4702 }
4703
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue Operand,const SDNodeFlags Flags)4704 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4705 SDValue Operand, const SDNodeFlags Flags) {
4706 assert(Operand.getOpcode() != ISD::DELETED_NODE &&
4707 "Operand is DELETED_NODE!");
4708 // Constant fold unary operations with an integer constant operand. Even
4709 // opaque constant will be folded, because the folding of unary operations
4710 // doesn't create new constants with different values. Nevertheless, the
4711 // opaque flag is preserved during folding to prevent future folding with
4712 // other constants.
4713 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4714 const APInt &Val = C->getAPIntValue();
4715 switch (Opcode) {
4716 default: break;
4717 case ISD::SIGN_EXTEND:
4718 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4719 C->isTargetOpcode(), C->isOpaque());
4720 case ISD::TRUNCATE:
4721 if (C->isOpaque())
4722 break;
4723 LLVM_FALLTHROUGH;
4724 case ISD::ZERO_EXTEND:
4725 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4726 C->isTargetOpcode(), C->isOpaque());
4727 case ISD::ANY_EXTEND:
4728 // Some targets like RISCV prefer to sign extend some types.
4729 if (TLI->isSExtCheaperThanZExt(Operand.getValueType(), VT))
4730 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4731 C->isTargetOpcode(), C->isOpaque());
4732 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4733 C->isTargetOpcode(), C->isOpaque());
4734 case ISD::UINT_TO_FP:
4735 case ISD::SINT_TO_FP: {
4736 APFloat apf(EVTToAPFloatSemantics(VT),
4737 APInt::getZero(VT.getSizeInBits()));
4738 (void)apf.convertFromAPInt(Val,
4739 Opcode==ISD::SINT_TO_FP,
4740 APFloat::rmNearestTiesToEven);
4741 return getConstantFP(apf, DL, VT);
4742 }
4743 case ISD::BITCAST:
4744 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4745 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4746 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4747 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4748 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4749 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4750 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4751 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4752 break;
4753 case ISD::ABS:
4754 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4755 C->isOpaque());
4756 case ISD::BITREVERSE:
4757 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4758 C->isOpaque());
4759 case ISD::BSWAP:
4760 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4761 C->isOpaque());
4762 case ISD::CTPOP:
4763 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4764 C->isOpaque());
4765 case ISD::CTLZ:
4766 case ISD::CTLZ_ZERO_UNDEF:
4767 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4768 C->isOpaque());
4769 case ISD::CTTZ:
4770 case ISD::CTTZ_ZERO_UNDEF:
4771 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4772 C->isOpaque());
4773 case ISD::FP16_TO_FP: {
4774 bool Ignored;
4775 APFloat FPV(APFloat::IEEEhalf(),
4776 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4777
4778 // This can return overflow, underflow, or inexact; we don't care.
4779 // FIXME need to be more flexible about rounding mode.
4780 (void)FPV.convert(EVTToAPFloatSemantics(VT),
4781 APFloat::rmNearestTiesToEven, &Ignored);
4782 return getConstantFP(FPV, DL, VT);
4783 }
4784 case ISD::STEP_VECTOR: {
4785 if (SDValue V = FoldSTEP_VECTOR(DL, VT, Operand, *this))
4786 return V;
4787 break;
4788 }
4789 }
4790 }
4791
4792 // Constant fold unary operations with a floating point constant operand.
4793 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4794 APFloat V = C->getValueAPF(); // make copy
4795 switch (Opcode) {
4796 case ISD::FNEG:
4797 V.changeSign();
4798 return getConstantFP(V, DL, VT);
4799 case ISD::FABS:
4800 V.clearSign();
4801 return getConstantFP(V, DL, VT);
4802 case ISD::FCEIL: {
4803 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4804 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4805 return getConstantFP(V, DL, VT);
4806 break;
4807 }
4808 case ISD::FTRUNC: {
4809 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4810 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4811 return getConstantFP(V, DL, VT);
4812 break;
4813 }
4814 case ISD::FFLOOR: {
4815 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4816 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4817 return getConstantFP(V, DL, VT);
4818 break;
4819 }
4820 case ISD::FP_EXTEND: {
4821 bool ignored;
4822 // This can return overflow, underflow, or inexact; we don't care.
4823 // FIXME need to be more flexible about rounding mode.
4824 (void)V.convert(EVTToAPFloatSemantics(VT),
4825 APFloat::rmNearestTiesToEven, &ignored);
4826 return getConstantFP(V, DL, VT);
4827 }
4828 case ISD::FP_TO_SINT:
4829 case ISD::FP_TO_UINT: {
4830 bool ignored;
4831 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4832 // FIXME need to be more flexible about rounding mode.
4833 APFloat::opStatus s =
4834 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4835 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4836 break;
4837 return getConstant(IntVal, DL, VT);
4838 }
4839 case ISD::BITCAST:
4840 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4841 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4842 if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16)
4843 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4844 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4845 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4846 if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4847 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4848 break;
4849 case ISD::FP_TO_FP16: {
4850 bool Ignored;
4851 // This can return overflow, underflow, or inexact; we don't care.
4852 // FIXME need to be more flexible about rounding mode.
4853 (void)V.convert(APFloat::IEEEhalf(),
4854 APFloat::rmNearestTiesToEven, &Ignored);
4855 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4856 }
4857 }
4858 }
4859
4860 // Constant fold unary operations with a vector integer or float operand.
4861 switch (Opcode) {
4862 default:
4863 // FIXME: Entirely reasonable to perform folding of other unary
4864 // operations here as the need arises.
4865 break;
4866 case ISD::FNEG:
4867 case ISD::FABS:
4868 case ISD::FCEIL:
4869 case ISD::FTRUNC:
4870 case ISD::FFLOOR:
4871 case ISD::FP_EXTEND:
4872 case ISD::FP_TO_SINT:
4873 case ISD::FP_TO_UINT:
4874 case ISD::TRUNCATE:
4875 case ISD::ANY_EXTEND:
4876 case ISD::ZERO_EXTEND:
4877 case ISD::SIGN_EXTEND:
4878 case ISD::UINT_TO_FP:
4879 case ISD::SINT_TO_FP:
4880 case ISD::ABS:
4881 case ISD::BITREVERSE:
4882 case ISD::BSWAP:
4883 case ISD::CTLZ:
4884 case ISD::CTLZ_ZERO_UNDEF:
4885 case ISD::CTTZ:
4886 case ISD::CTTZ_ZERO_UNDEF:
4887 case ISD::CTPOP: {
4888 SDValue Ops = {Operand};
4889 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4890 return Fold;
4891 }
4892 }
4893
4894 unsigned OpOpcode = Operand.getNode()->getOpcode();
4895 switch (Opcode) {
4896 case ISD::STEP_VECTOR:
4897 assert(VT.isScalableVector() &&
4898 "STEP_VECTOR can only be used with scalable types");
4899 assert(OpOpcode == ISD::TargetConstant &&
4900 VT.getVectorElementType() == Operand.getValueType() &&
4901 "Unexpected step operand");
4902 break;
4903 case ISD::FREEZE:
4904 assert(VT == Operand.getValueType() && "Unexpected VT!");
4905 break;
4906 case ISD::TokenFactor:
4907 case ISD::MERGE_VALUES:
4908 case ISD::CONCAT_VECTORS:
4909 return Operand; // Factor, merge or concat of one node? No need.
4910 case ISD::BUILD_VECTOR: {
4911 // Attempt to simplify BUILD_VECTOR.
4912 SDValue Ops[] = {Operand};
4913 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4914 return V;
4915 break;
4916 }
4917 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
4918 case ISD::FP_EXTEND:
4919 assert(VT.isFloatingPoint() &&
4920 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
4921 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4922 assert((!VT.isVector() ||
4923 VT.getVectorElementCount() ==
4924 Operand.getValueType().getVectorElementCount()) &&
4925 "Vector element count mismatch!");
4926 assert(Operand.getValueType().bitsLT(VT) &&
4927 "Invalid fpext node, dst < src!");
4928 if (Operand.isUndef())
4929 return getUNDEF(VT);
4930 break;
4931 case ISD::FP_TO_SINT:
4932 case ISD::FP_TO_UINT:
4933 if (Operand.isUndef())
4934 return getUNDEF(VT);
4935 break;
4936 case ISD::SINT_TO_FP:
4937 case ISD::UINT_TO_FP:
4938 // [us]itofp(undef) = 0, because the result value is bounded.
4939 if (Operand.isUndef())
4940 return getConstantFP(0.0, DL, VT);
4941 break;
4942 case ISD::SIGN_EXTEND:
4943 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4944 "Invalid SIGN_EXTEND!");
4945 assert(VT.isVector() == Operand.getValueType().isVector() &&
4946 "SIGN_EXTEND result type type should be vector iff the operand "
4947 "type is vector!");
4948 if (Operand.getValueType() == VT) return Operand; // noop extension
4949 assert((!VT.isVector() ||
4950 VT.getVectorElementCount() ==
4951 Operand.getValueType().getVectorElementCount()) &&
4952 "Vector element count mismatch!");
4953 assert(Operand.getValueType().bitsLT(VT) &&
4954 "Invalid sext node, dst < src!");
4955 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4956 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4957 if (OpOpcode == ISD::UNDEF)
4958 // sext(undef) = 0, because the top bits will all be the same.
4959 return getConstant(0, DL, VT);
4960 break;
4961 case ISD::ZERO_EXTEND:
4962 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4963 "Invalid ZERO_EXTEND!");
4964 assert(VT.isVector() == Operand.getValueType().isVector() &&
4965 "ZERO_EXTEND result type type should be vector iff the operand "
4966 "type is vector!");
4967 if (Operand.getValueType() == VT) return Operand; // noop extension
4968 assert((!VT.isVector() ||
4969 VT.getVectorElementCount() ==
4970 Operand.getValueType().getVectorElementCount()) &&
4971 "Vector element count mismatch!");
4972 assert(Operand.getValueType().bitsLT(VT) &&
4973 "Invalid zext node, dst < src!");
4974 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
4975 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4976 if (OpOpcode == ISD::UNDEF)
4977 // zext(undef) = 0, because the top bits will be zero.
4978 return getConstant(0, DL, VT);
4979 break;
4980 case ISD::ANY_EXTEND:
4981 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4982 "Invalid ANY_EXTEND!");
4983 assert(VT.isVector() == Operand.getValueType().isVector() &&
4984 "ANY_EXTEND result type type should be vector iff the operand "
4985 "type is vector!");
4986 if (Operand.getValueType() == VT) return Operand; // noop extension
4987 assert((!VT.isVector() ||
4988 VT.getVectorElementCount() ==
4989 Operand.getValueType().getVectorElementCount()) &&
4990 "Vector element count mismatch!");
4991 assert(Operand.getValueType().bitsLT(VT) &&
4992 "Invalid anyext node, dst < src!");
4993
4994 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4995 OpOpcode == ISD::ANY_EXTEND)
4996 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4997 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4998 if (OpOpcode == ISD::UNDEF)
4999 return getUNDEF(VT);
5000
5001 // (ext (trunc x)) -> x
5002 if (OpOpcode == ISD::TRUNCATE) {
5003 SDValue OpOp = Operand.getOperand(0);
5004 if (OpOp.getValueType() == VT) {
5005 transferDbgValues(Operand, OpOp);
5006 return OpOp;
5007 }
5008 }
5009 break;
5010 case ISD::TRUNCATE:
5011 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
5012 "Invalid TRUNCATE!");
5013 assert(VT.isVector() == Operand.getValueType().isVector() &&
5014 "TRUNCATE result type type should be vector iff the operand "
5015 "type is vector!");
5016 if (Operand.getValueType() == VT) return Operand; // noop truncate
5017 assert((!VT.isVector() ||
5018 VT.getVectorElementCount() ==
5019 Operand.getValueType().getVectorElementCount()) &&
5020 "Vector element count mismatch!");
5021 assert(Operand.getValueType().bitsGT(VT) &&
5022 "Invalid truncate node, src < dst!");
5023 if (OpOpcode == ISD::TRUNCATE)
5024 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
5025 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
5026 OpOpcode == ISD::ANY_EXTEND) {
5027 // If the source is smaller than the dest, we still need an extend.
5028 if (Operand.getOperand(0).getValueType().getScalarType()
5029 .bitsLT(VT.getScalarType()))
5030 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
5031 if (Operand.getOperand(0).getValueType().bitsGT(VT))
5032 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
5033 return Operand.getOperand(0);
5034 }
5035 if (OpOpcode == ISD::UNDEF)
5036 return getUNDEF(VT);
5037 break;
5038 case ISD::ANY_EXTEND_VECTOR_INREG:
5039 case ISD::ZERO_EXTEND_VECTOR_INREG:
5040 case ISD::SIGN_EXTEND_VECTOR_INREG:
5041 assert(VT.isVector() && "This DAG node is restricted to vector types.");
5042 assert(Operand.getValueType().bitsLE(VT) &&
5043 "The input must be the same size or smaller than the result.");
5044 assert(VT.getVectorMinNumElements() <
5045 Operand.getValueType().getVectorMinNumElements() &&
5046 "The destination vector type must have fewer lanes than the input.");
5047 break;
5048 case ISD::ABS:
5049 assert(VT.isInteger() && VT == Operand.getValueType() &&
5050 "Invalid ABS!");
5051 if (OpOpcode == ISD::UNDEF)
5052 return getUNDEF(VT);
5053 break;
5054 case ISD::BSWAP:
5055 assert(VT.isInteger() && VT == Operand.getValueType() &&
5056 "Invalid BSWAP!");
5057 assert((VT.getScalarSizeInBits() % 16 == 0) &&
5058 "BSWAP types must be a multiple of 16 bits!");
5059 if (OpOpcode == ISD::UNDEF)
5060 return getUNDEF(VT);
5061 break;
5062 case ISD::BITREVERSE:
5063 assert(VT.isInteger() && VT == Operand.getValueType() &&
5064 "Invalid BITREVERSE!");
5065 if (OpOpcode == ISD::UNDEF)
5066 return getUNDEF(VT);
5067 break;
5068 case ISD::BITCAST:
5069 // Basic sanity checking.
5070 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
5071 "Cannot BITCAST between types of different sizes!");
5072 if (VT == Operand.getValueType()) return Operand; // noop conversion.
5073 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
5074 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
5075 if (OpOpcode == ISD::UNDEF)
5076 return getUNDEF(VT);
5077 break;
5078 case ISD::SCALAR_TO_VECTOR:
5079 assert(VT.isVector() && !Operand.getValueType().isVector() &&
5080 (VT.getVectorElementType() == Operand.getValueType() ||
5081 (VT.getVectorElementType().isInteger() &&
5082 Operand.getValueType().isInteger() &&
5083 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
5084 "Illegal SCALAR_TO_VECTOR node!");
5085 if (OpOpcode == ISD::UNDEF)
5086 return getUNDEF(VT);
5087 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
5088 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
5089 isa<ConstantSDNode>(Operand.getOperand(1)) &&
5090 Operand.getConstantOperandVal(1) == 0 &&
5091 Operand.getOperand(0).getValueType() == VT)
5092 return Operand.getOperand(0);
5093 break;
5094 case ISD::FNEG:
5095 // Negation of an unknown bag of bits is still completely undefined.
5096 if (OpOpcode == ISD::UNDEF)
5097 return getUNDEF(VT);
5098
5099 if (OpOpcode == ISD::FNEG) // --X -> X
5100 return Operand.getOperand(0);
5101 break;
5102 case ISD::FABS:
5103 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
5104 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
5105 break;
5106 case ISD::VSCALE:
5107 assert(VT == Operand.getValueType() && "Unexpected VT!");
5108 break;
5109 case ISD::CTPOP:
5110 if (Operand.getValueType().getScalarType() == MVT::i1)
5111 return Operand;
5112 break;
5113 case ISD::CTLZ:
5114 case ISD::CTTZ:
5115 if (Operand.getValueType().getScalarType() == MVT::i1)
5116 return getNOT(DL, Operand, Operand.getValueType());
5117 break;
5118 case ISD::VECREDUCE_SMIN:
5119 case ISD::VECREDUCE_UMAX:
5120 if (Operand.getValueType().getScalarType() == MVT::i1)
5121 return getNode(ISD::VECREDUCE_OR, DL, VT, Operand);
5122 break;
5123 case ISD::VECREDUCE_SMAX:
5124 case ISD::VECREDUCE_UMIN:
5125 if (Operand.getValueType().getScalarType() == MVT::i1)
5126 return getNode(ISD::VECREDUCE_AND, DL, VT, Operand);
5127 break;
5128 }
5129
5130 SDNode *N;
5131 SDVTList VTs = getVTList(VT);
5132 SDValue Ops[] = {Operand};
5133 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
5134 FoldingSetNodeID ID;
5135 AddNodeIDNode(ID, Opcode, VTs, Ops);
5136 void *IP = nullptr;
5137 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5138 E->intersectFlagsWith(Flags);
5139 return SDValue(E, 0);
5140 }
5141
5142 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5143 N->setFlags(Flags);
5144 createOperands(N, Ops);
5145 CSEMap.InsertNode(N, IP);
5146 } else {
5147 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5148 createOperands(N, Ops);
5149 }
5150
5151 InsertNode(N);
5152 SDValue V = SDValue(N, 0);
5153 NewSDValueDbgMsg(V, "Creating new node: ", this);
5154 return V;
5155 }
5156
FoldValue(unsigned Opcode,const APInt & C1,const APInt & C2)5157 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
5158 const APInt &C2) {
5159 switch (Opcode) {
5160 case ISD::ADD: return C1 + C2;
5161 case ISD::SUB: return C1 - C2;
5162 case ISD::MUL: return C1 * C2;
5163 case ISD::AND: return C1 & C2;
5164 case ISD::OR: return C1 | C2;
5165 case ISD::XOR: return C1 ^ C2;
5166 case ISD::SHL: return C1 << C2;
5167 case ISD::SRL: return C1.lshr(C2);
5168 case ISD::SRA: return C1.ashr(C2);
5169 case ISD::ROTL: return C1.rotl(C2);
5170 case ISD::ROTR: return C1.rotr(C2);
5171 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
5172 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
5173 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
5174 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
5175 case ISD::SADDSAT: return C1.sadd_sat(C2);
5176 case ISD::UADDSAT: return C1.uadd_sat(C2);
5177 case ISD::SSUBSAT: return C1.ssub_sat(C2);
5178 case ISD::USUBSAT: return C1.usub_sat(C2);
5179 case ISD::UDIV:
5180 if (!C2.getBoolValue())
5181 break;
5182 return C1.udiv(C2);
5183 case ISD::UREM:
5184 if (!C2.getBoolValue())
5185 break;
5186 return C1.urem(C2);
5187 case ISD::SDIV:
5188 if (!C2.getBoolValue())
5189 break;
5190 return C1.sdiv(C2);
5191 case ISD::SREM:
5192 if (!C2.getBoolValue())
5193 break;
5194 return C1.srem(C2);
5195 case ISD::MULHS: {
5196 unsigned FullWidth = C1.getBitWidth() * 2;
5197 APInt C1Ext = C1.sext(FullWidth);
5198 APInt C2Ext = C2.sext(FullWidth);
5199 return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth());
5200 }
5201 case ISD::MULHU: {
5202 unsigned FullWidth = C1.getBitWidth() * 2;
5203 APInt C1Ext = C1.zext(FullWidth);
5204 APInt C2Ext = C2.zext(FullWidth);
5205 return (C1Ext * C2Ext).extractBits(C1.getBitWidth(), C1.getBitWidth());
5206 }
5207 }
5208 return llvm::None;
5209 }
5210
FoldSymbolOffset(unsigned Opcode,EVT VT,const GlobalAddressSDNode * GA,const SDNode * N2)5211 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
5212 const GlobalAddressSDNode *GA,
5213 const SDNode *N2) {
5214 if (GA->getOpcode() != ISD::GlobalAddress)
5215 return SDValue();
5216 if (!TLI->isOffsetFoldingLegal(GA))
5217 return SDValue();
5218 auto *C2 = dyn_cast<ConstantSDNode>(N2);
5219 if (!C2)
5220 return SDValue();
5221 int64_t Offset = C2->getSExtValue();
5222 switch (Opcode) {
5223 case ISD::ADD: break;
5224 case ISD::SUB: Offset = -uint64_t(Offset); break;
5225 default: return SDValue();
5226 }
5227 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
5228 GA->getOffset() + uint64_t(Offset));
5229 }
5230
isUndef(unsigned Opcode,ArrayRef<SDValue> Ops)5231 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
5232 switch (Opcode) {
5233 case ISD::SDIV:
5234 case ISD::UDIV:
5235 case ISD::SREM:
5236 case ISD::UREM: {
5237 // If a divisor is zero/undef or any element of a divisor vector is
5238 // zero/undef, the whole op is undef.
5239 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
5240 SDValue Divisor = Ops[1];
5241 if (Divisor.isUndef() || isNullConstant(Divisor))
5242 return true;
5243
5244 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
5245 llvm::any_of(Divisor->op_values(),
5246 [](SDValue V) { return V.isUndef() ||
5247 isNullConstant(V); });
5248 // TODO: Handle signed overflow.
5249 }
5250 // TODO: Handle oversized shifts.
5251 default:
5252 return false;
5253 }
5254 }
5255
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops)5256 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
5257 EVT VT, ArrayRef<SDValue> Ops) {
5258 // If the opcode is a target-specific ISD node, there's nothing we can
5259 // do here and the operand rules may not line up with the below, so
5260 // bail early.
5261 // We can't create a scalar CONCAT_VECTORS so skip it. It will break
5262 // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by
5263 // foldCONCAT_VECTORS in getNode before this is called.
5264 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS)
5265 return SDValue();
5266
5267 // For now, the array Ops should only contain two values.
5268 // This enforcement will be removed once this function is merged with
5269 // FoldConstantVectorArithmetic
5270 if (Ops.size() != 2)
5271 return SDValue();
5272
5273 if (isUndef(Opcode, Ops))
5274 return getUNDEF(VT);
5275
5276 SDNode *N1 = Ops[0].getNode();
5277 SDNode *N2 = Ops[1].getNode();
5278
5279 // Handle the case of two scalars.
5280 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) {
5281 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) {
5282 if (C1->isOpaque() || C2->isOpaque())
5283 return SDValue();
5284
5285 Optional<APInt> FoldAttempt =
5286 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
5287 if (!FoldAttempt)
5288 return SDValue();
5289
5290 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT);
5291 assert((!Folded || !VT.isVector()) &&
5292 "Can't fold vectors ops with scalar operands");
5293 return Folded;
5294 }
5295 }
5296
5297 // fold (add Sym, c) -> Sym+c
5298 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1))
5299 return FoldSymbolOffset(Opcode, VT, GA, N2);
5300 if (TLI->isCommutativeBinOp(Opcode))
5301 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2))
5302 return FoldSymbolOffset(Opcode, VT, GA, N1);
5303
5304 // For fixed width vectors, extract each constant element and fold them
5305 // individually. Either input may be an undef value.
5306 bool IsBVOrSV1 = N1->getOpcode() == ISD::BUILD_VECTOR ||
5307 N1->getOpcode() == ISD::SPLAT_VECTOR;
5308 if (!IsBVOrSV1 && !N1->isUndef())
5309 return SDValue();
5310 bool IsBVOrSV2 = N2->getOpcode() == ISD::BUILD_VECTOR ||
5311 N2->getOpcode() == ISD::SPLAT_VECTOR;
5312 if (!IsBVOrSV2 && !N2->isUndef())
5313 return SDValue();
5314 // If both operands are undef, that's handled the same way as scalars.
5315 if (!IsBVOrSV1 && !IsBVOrSV2)
5316 return SDValue();
5317
5318 EVT SVT = VT.getScalarType();
5319 EVT LegalSVT = SVT;
5320 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5321 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5322 if (LegalSVT.bitsLT(SVT))
5323 return SDValue();
5324 }
5325
5326 SmallVector<SDValue, 4> Outputs;
5327 unsigned NumOps = 0;
5328 if (IsBVOrSV1)
5329 NumOps = std::max(NumOps, N1->getNumOperands());
5330 if (IsBVOrSV2)
5331 NumOps = std::max(NumOps, N2->getNumOperands());
5332 assert(NumOps != 0 && "Expected non-zero operands");
5333 // Scalable vectors should only be SPLAT_VECTOR or UNDEF here. We only need
5334 // one iteration for that.
5335 assert((!VT.isScalableVector() || NumOps == 1) &&
5336 "Scalable vector should only have one scalar");
5337
5338 for (unsigned I = 0; I != NumOps; ++I) {
5339 // We can have a fixed length SPLAT_VECTOR and a BUILD_VECTOR so we need
5340 // to use operand 0 of the SPLAT_VECTOR for each fixed element.
5341 SDValue V1;
5342 if (N1->getOpcode() == ISD::BUILD_VECTOR)
5343 V1 = N1->getOperand(I);
5344 else if (N1->getOpcode() == ISD::SPLAT_VECTOR)
5345 V1 = N1->getOperand(0);
5346 else
5347 V1 = getUNDEF(SVT);
5348
5349 SDValue V2;
5350 if (N2->getOpcode() == ISD::BUILD_VECTOR)
5351 V2 = N2->getOperand(I);
5352 else if (N2->getOpcode() == ISD::SPLAT_VECTOR)
5353 V2 = N2->getOperand(0);
5354 else
5355 V2 = getUNDEF(SVT);
5356
5357 if (SVT.isInteger()) {
5358 if (V1.getValueType().bitsGT(SVT))
5359 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
5360 if (V2.getValueType().bitsGT(SVT))
5361 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
5362 }
5363
5364 if (V1.getValueType() != SVT || V2.getValueType() != SVT)
5365 return SDValue();
5366
5367 // Fold one vector element.
5368 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
5369 if (LegalSVT != SVT)
5370 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5371
5372 // Scalar folding only succeeded if the result is a constant or UNDEF.
5373 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5374 ScalarResult.getOpcode() != ISD::ConstantFP)
5375 return SDValue();
5376 Outputs.push_back(ScalarResult);
5377 }
5378
5379 if (N1->getOpcode() == ISD::BUILD_VECTOR ||
5380 N2->getOpcode() == ISD::BUILD_VECTOR) {
5381 assert(VT.getVectorNumElements() == Outputs.size() &&
5382 "Vector size mismatch!");
5383
5384 // Build a big vector out of the scalar elements we generated.
5385 return getBuildVector(VT, SDLoc(), Outputs);
5386 }
5387
5388 assert((N1->getOpcode() == ISD::SPLAT_VECTOR ||
5389 N2->getOpcode() == ISD::SPLAT_VECTOR) &&
5390 "One operand should be a splat vector");
5391
5392 assert(Outputs.size() == 1 && "Vector size mismatch!");
5393 return getSplatVector(VT, SDLoc(), Outputs[0]);
5394 }
5395
5396 // TODO: Merge with FoldConstantArithmetic
FoldConstantVectorArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)5397 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
5398 const SDLoc &DL, EVT VT,
5399 ArrayRef<SDValue> Ops,
5400 const SDNodeFlags Flags) {
5401 // If the opcode is a target-specific ISD node, there's nothing we can
5402 // do here and the operand rules may not line up with the below, so
5403 // bail early.
5404 if (Opcode >= ISD::BUILTIN_OP_END)
5405 return SDValue();
5406
5407 if (isUndef(Opcode, Ops))
5408 return getUNDEF(VT);
5409
5410 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
5411 if (!VT.isVector())
5412 return SDValue();
5413
5414 ElementCount NumElts = VT.getVectorElementCount();
5415
5416 auto IsScalarOrSameVectorSize = [NumElts](const SDValue &Op) {
5417 return !Op.getValueType().isVector() ||
5418 Op.getValueType().getVectorElementCount() == NumElts;
5419 };
5420
5421 auto IsConstantBuildVectorSplatVectorOrUndef = [](const SDValue &Op) {
5422 APInt SplatVal;
5423 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
5424 return Op.isUndef() || Op.getOpcode() == ISD::CONDCODE ||
5425 (BV && BV->isConstant()) ||
5426 (Op.getOpcode() == ISD::SPLAT_VECTOR &&
5427 ISD::isConstantSplatVector(Op.getNode(), SplatVal));
5428 };
5429
5430 // All operands must be vector types with the same number of elements as
5431 // the result type and must be either UNDEF or a build vector of constant
5432 // or UNDEF scalars.
5433 if (!llvm::all_of(Ops, IsConstantBuildVectorSplatVectorOrUndef) ||
5434 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
5435 return SDValue();
5436
5437 // If we are comparing vectors, then the result needs to be a i1 boolean
5438 // that is then sign-extended back to the legal result type.
5439 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
5440
5441 // Find legal integer scalar type for constant promotion and
5442 // ensure that its scalar size is at least as large as source.
5443 EVT LegalSVT = VT.getScalarType();
5444 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5445 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5446 if (LegalSVT.bitsLT(VT.getScalarType()))
5447 return SDValue();
5448 }
5449
5450 // For scalable vector types we know we're dealing with SPLAT_VECTORs. We
5451 // only have one operand to check. For fixed-length vector types we may have
5452 // a combination of BUILD_VECTOR and SPLAT_VECTOR.
5453 unsigned NumOperands = NumElts.isScalable() ? 1 : NumElts.getFixedValue();
5454
5455 // Constant fold each scalar lane separately.
5456 SmallVector<SDValue, 4> ScalarResults;
5457 for (unsigned I = 0; I != NumOperands; I++) {
5458 SmallVector<SDValue, 4> ScalarOps;
5459 for (SDValue Op : Ops) {
5460 EVT InSVT = Op.getValueType().getScalarType();
5461 if (Op.getOpcode() != ISD::BUILD_VECTOR &&
5462 Op.getOpcode() != ISD::SPLAT_VECTOR) {
5463 // We've checked that this is UNDEF or a constant of some kind.
5464 if (Op.isUndef())
5465 ScalarOps.push_back(getUNDEF(InSVT));
5466 else
5467 ScalarOps.push_back(Op);
5468 continue;
5469 }
5470
5471 SDValue ScalarOp =
5472 Op.getOperand(Op.getOpcode() == ISD::SPLAT_VECTOR ? 0 : I);
5473 EVT ScalarVT = ScalarOp.getValueType();
5474
5475 // Build vector (integer) scalar operands may need implicit
5476 // truncation - do this before constant folding.
5477 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
5478 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
5479
5480 ScalarOps.push_back(ScalarOp);
5481 }
5482
5483 // Constant fold the scalar operands.
5484 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
5485
5486 // Legalize the (integer) scalar constant if necessary.
5487 if (LegalSVT != SVT)
5488 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5489
5490 // Scalar folding only succeeded if the result is a constant or UNDEF.
5491 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5492 ScalarResult.getOpcode() != ISD::ConstantFP)
5493 return SDValue();
5494 ScalarResults.push_back(ScalarResult);
5495 }
5496
5497 SDValue V = NumElts.isScalable() ? getSplatVector(VT, DL, ScalarResults[0])
5498 : getBuildVector(VT, DL, ScalarResults);
5499 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
5500 return V;
5501 }
5502
foldConstantFPMath(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2)5503 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
5504 EVT VT, SDValue N1, SDValue N2) {
5505 // TODO: We don't do any constant folding for strict FP opcodes here, but we
5506 // should. That will require dealing with a potentially non-default
5507 // rounding mode, checking the "opStatus" return value from the APFloat
5508 // math calculations, and possibly other variations.
5509 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
5510 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
5511 if (N1CFP && N2CFP) {
5512 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF();
5513 switch (Opcode) {
5514 case ISD::FADD:
5515 C1.add(C2, APFloat::rmNearestTiesToEven);
5516 return getConstantFP(C1, DL, VT);
5517 case ISD::FSUB:
5518 C1.subtract(C2, APFloat::rmNearestTiesToEven);
5519 return getConstantFP(C1, DL, VT);
5520 case ISD::FMUL:
5521 C1.multiply(C2, APFloat::rmNearestTiesToEven);
5522 return getConstantFP(C1, DL, VT);
5523 case ISD::FDIV:
5524 C1.divide(C2, APFloat::rmNearestTiesToEven);
5525 return getConstantFP(C1, DL, VT);
5526 case ISD::FREM:
5527 C1.mod(C2);
5528 return getConstantFP(C1, DL, VT);
5529 case ISD::FCOPYSIGN:
5530 C1.copySign(C2);
5531 return getConstantFP(C1, DL, VT);
5532 default: break;
5533 }
5534 }
5535 if (N1CFP && Opcode == ISD::FP_ROUND) {
5536 APFloat C1 = N1CFP->getValueAPF(); // make copy
5537 bool Unused;
5538 // This can return overflow, underflow, or inexact; we don't care.
5539 // FIXME need to be more flexible about rounding mode.
5540 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5541 &Unused);
5542 return getConstantFP(C1, DL, VT);
5543 }
5544
5545 switch (Opcode) {
5546 case ISD::FSUB:
5547 // -0.0 - undef --> undef (consistent with "fneg undef")
5548 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef())
5549 return getUNDEF(VT);
5550 LLVM_FALLTHROUGH;
5551
5552 case ISD::FADD:
5553 case ISD::FMUL:
5554 case ISD::FDIV:
5555 case ISD::FREM:
5556 // If both operands are undef, the result is undef. If 1 operand is undef,
5557 // the result is NaN. This should match the behavior of the IR optimizer.
5558 if (N1.isUndef() && N2.isUndef())
5559 return getUNDEF(VT);
5560 if (N1.isUndef() || N2.isUndef())
5561 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5562 }
5563 return SDValue();
5564 }
5565
getAssertAlign(const SDLoc & DL,SDValue Val,Align A)5566 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) {
5567 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!");
5568
5569 // There's no need to assert on a byte-aligned pointer. All pointers are at
5570 // least byte aligned.
5571 if (A == Align(1))
5572 return Val;
5573
5574 FoldingSetNodeID ID;
5575 AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val});
5576 ID.AddInteger(A.value());
5577
5578 void *IP = nullptr;
5579 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5580 return SDValue(E, 0);
5581
5582 auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(),
5583 Val.getValueType(), A);
5584 createOperands(N, {Val});
5585
5586 CSEMap.InsertNode(N, IP);
5587 InsertNode(N);
5588
5589 SDValue V(N, 0);
5590 NewSDValueDbgMsg(V, "Creating new node: ", this);
5591 return V;
5592 }
5593
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2)5594 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5595 SDValue N1, SDValue N2) {
5596 SDNodeFlags Flags;
5597 if (Inserter)
5598 Flags = Inserter->getFlags();
5599 return getNode(Opcode, DL, VT, N1, N2, Flags);
5600 }
5601
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,const SDNodeFlags Flags)5602 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5603 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
5604 assert(N1.getOpcode() != ISD::DELETED_NODE &&
5605 N2.getOpcode() != ISD::DELETED_NODE &&
5606 "Operand is DELETED_NODE!");
5607 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
5608 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
5609 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5610 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5611
5612 // Canonicalize constant to RHS if commutative.
5613 if (TLI->isCommutativeBinOp(Opcode)) {
5614 if (N1C && !N2C) {
5615 std::swap(N1C, N2C);
5616 std::swap(N1, N2);
5617 } else if (N1CFP && !N2CFP) {
5618 std::swap(N1CFP, N2CFP);
5619 std::swap(N1, N2);
5620 }
5621 }
5622
5623 switch (Opcode) {
5624 default: break;
5625 case ISD::TokenFactor:
5626 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
5627 N2.getValueType() == MVT::Other && "Invalid token factor!");
5628 // Fold trivial token factors.
5629 if (N1.getOpcode() == ISD::EntryToken) return N2;
5630 if (N2.getOpcode() == ISD::EntryToken) return N1;
5631 if (N1 == N2) return N1;
5632 break;
5633 case ISD::BUILD_VECTOR: {
5634 // Attempt to simplify BUILD_VECTOR.
5635 SDValue Ops[] = {N1, N2};
5636 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5637 return V;
5638 break;
5639 }
5640 case ISD::CONCAT_VECTORS: {
5641 SDValue Ops[] = {N1, N2};
5642 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5643 return V;
5644 break;
5645 }
5646 case ISD::AND:
5647 assert(VT.isInteger() && "This operator does not apply to FP types!");
5648 assert(N1.getValueType() == N2.getValueType() &&
5649 N1.getValueType() == VT && "Binary operator types must match!");
5650 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
5651 // worth handling here.
5652 if (N2C && N2C->isZero())
5653 return N2;
5654 if (N2C && N2C->isAllOnes()) // X & -1 -> X
5655 return N1;
5656 break;
5657 case ISD::OR:
5658 case ISD::XOR:
5659 case ISD::ADD:
5660 case ISD::SUB:
5661 assert(VT.isInteger() && "This operator does not apply to FP types!");
5662 assert(N1.getValueType() == N2.getValueType() &&
5663 N1.getValueType() == VT && "Binary operator types must match!");
5664 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
5665 // it's worth handling here.
5666 if (N2C && N2C->isZero())
5667 return N1;
5668 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && VT.isVector() &&
5669 VT.getVectorElementType() == MVT::i1)
5670 return getNode(ISD::XOR, DL, VT, N1, N2);
5671 break;
5672 case ISD::MUL:
5673 assert(VT.isInteger() && "This operator does not apply to FP types!");
5674 assert(N1.getValueType() == N2.getValueType() &&
5675 N1.getValueType() == VT && "Binary operator types must match!");
5676 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5677 return getNode(ISD::AND, DL, VT, N1, N2);
5678 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5679 const APInt &MulImm = N1->getConstantOperandAPInt(0);
5680 const APInt &N2CImm = N2C->getAPIntValue();
5681 return getVScale(DL, VT, MulImm * N2CImm);
5682 }
5683 break;
5684 case ISD::UDIV:
5685 case ISD::UREM:
5686 case ISD::MULHU:
5687 case ISD::MULHS:
5688 case ISD::SDIV:
5689 case ISD::SREM:
5690 case ISD::SADDSAT:
5691 case ISD::SSUBSAT:
5692 case ISD::UADDSAT:
5693 case ISD::USUBSAT:
5694 assert(VT.isInteger() && "This operator does not apply to FP types!");
5695 assert(N1.getValueType() == N2.getValueType() &&
5696 N1.getValueType() == VT && "Binary operator types must match!");
5697 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
5698 // fold (add_sat x, y) -> (or x, y) for bool types.
5699 if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT)
5700 return getNode(ISD::OR, DL, VT, N1, N2);
5701 // fold (sub_sat x, y) -> (and x, ~y) for bool types.
5702 if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT)
5703 return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT));
5704 }
5705 break;
5706 case ISD::SMIN:
5707 case ISD::UMAX:
5708 assert(VT.isInteger() && "This operator does not apply to FP types!");
5709 assert(N1.getValueType() == N2.getValueType() &&
5710 N1.getValueType() == VT && "Binary operator types must match!");
5711 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5712 return getNode(ISD::OR, DL, VT, N1, N2);
5713 break;
5714 case ISD::SMAX:
5715 case ISD::UMIN:
5716 assert(VT.isInteger() && "This operator does not apply to FP types!");
5717 assert(N1.getValueType() == N2.getValueType() &&
5718 N1.getValueType() == VT && "Binary operator types must match!");
5719 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5720 return getNode(ISD::AND, DL, VT, N1, N2);
5721 break;
5722 case ISD::FADD:
5723 case ISD::FSUB:
5724 case ISD::FMUL:
5725 case ISD::FDIV:
5726 case ISD::FREM:
5727 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5728 assert(N1.getValueType() == N2.getValueType() &&
5729 N1.getValueType() == VT && "Binary operator types must match!");
5730 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags))
5731 return V;
5732 break;
5733 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
5734 assert(N1.getValueType() == VT &&
5735 N1.getValueType().isFloatingPoint() &&
5736 N2.getValueType().isFloatingPoint() &&
5737 "Invalid FCOPYSIGN!");
5738 break;
5739 case ISD::SHL:
5740 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5741 const APInt &MulImm = N1->getConstantOperandAPInt(0);
5742 const APInt &ShiftImm = N2C->getAPIntValue();
5743 return getVScale(DL, VT, MulImm << ShiftImm);
5744 }
5745 LLVM_FALLTHROUGH;
5746 case ISD::SRA:
5747 case ISD::SRL:
5748 if (SDValue V = simplifyShift(N1, N2))
5749 return V;
5750 LLVM_FALLTHROUGH;
5751 case ISD::ROTL:
5752 case ISD::ROTR:
5753 assert(VT == N1.getValueType() &&
5754 "Shift operators return type must be the same as their first arg");
5755 assert(VT.isInteger() && N2.getValueType().isInteger() &&
5756 "Shifts only work on integers");
5757 assert((!VT.isVector() || VT == N2.getValueType()) &&
5758 "Vector shift amounts must be in the same as their first arg");
5759 // Verify that the shift amount VT is big enough to hold valid shift
5760 // amounts. This catches things like trying to shift an i1024 value by an
5761 // i8, which is easy to fall into in generic code that uses
5762 // TLI.getShiftAmount().
5763 assert(N2.getValueType().getScalarSizeInBits() >=
5764 Log2_32_Ceil(VT.getScalarSizeInBits()) &&
5765 "Invalid use of small shift amount with oversized value!");
5766
5767 // Always fold shifts of i1 values so the code generator doesn't need to
5768 // handle them. Since we know the size of the shift has to be less than the
5769 // size of the value, the shift/rotate count is guaranteed to be zero.
5770 if (VT == MVT::i1)
5771 return N1;
5772 if (N2C && N2C->isZero())
5773 return N1;
5774 break;
5775 case ISD::FP_ROUND:
5776 assert(VT.isFloatingPoint() &&
5777 N1.getValueType().isFloatingPoint() &&
5778 VT.bitsLE(N1.getValueType()) &&
5779 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
5780 "Invalid FP_ROUND!");
5781 if (N1.getValueType() == VT) return N1; // noop conversion.
5782 break;
5783 case ISD::AssertSext:
5784 case ISD::AssertZext: {
5785 EVT EVT = cast<VTSDNode>(N2)->getVT();
5786 assert(VT == N1.getValueType() && "Not an inreg extend!");
5787 assert(VT.isInteger() && EVT.isInteger() &&
5788 "Cannot *_EXTEND_INREG FP types");
5789 assert(!EVT.isVector() &&
5790 "AssertSExt/AssertZExt type should be the vector element type "
5791 "rather than the vector type!");
5792 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
5793 if (VT.getScalarType() == EVT) return N1; // noop assertion.
5794 break;
5795 }
5796 case ISD::SIGN_EXTEND_INREG: {
5797 EVT EVT = cast<VTSDNode>(N2)->getVT();
5798 assert(VT == N1.getValueType() && "Not an inreg extend!");
5799 assert(VT.isInteger() && EVT.isInteger() &&
5800 "Cannot *_EXTEND_INREG FP types");
5801 assert(EVT.isVector() == VT.isVector() &&
5802 "SIGN_EXTEND_INREG type should be vector iff the operand "
5803 "type is vector!");
5804 assert((!EVT.isVector() ||
5805 EVT.getVectorElementCount() == VT.getVectorElementCount()) &&
5806 "Vector element counts must match in SIGN_EXTEND_INREG");
5807 assert(EVT.bitsLE(VT) && "Not extending!");
5808 if (EVT == VT) return N1; // Not actually extending
5809
5810 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
5811 unsigned FromBits = EVT.getScalarSizeInBits();
5812 Val <<= Val.getBitWidth() - FromBits;
5813 Val.ashrInPlace(Val.getBitWidth() - FromBits);
5814 return getConstant(Val, DL, ConstantVT);
5815 };
5816
5817 if (N1C) {
5818 const APInt &Val = N1C->getAPIntValue();
5819 return SignExtendInReg(Val, VT);
5820 }
5821
5822 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
5823 SmallVector<SDValue, 8> Ops;
5824 llvm::EVT OpVT = N1.getOperand(0).getValueType();
5825 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5826 SDValue Op = N1.getOperand(i);
5827 if (Op.isUndef()) {
5828 Ops.push_back(getUNDEF(OpVT));
5829 continue;
5830 }
5831 ConstantSDNode *C = cast<ConstantSDNode>(Op);
5832 APInt Val = C->getAPIntValue();
5833 Ops.push_back(SignExtendInReg(Val, OpVT));
5834 }
5835 return getBuildVector(VT, DL, Ops);
5836 }
5837 break;
5838 }
5839 case ISD::FP_TO_SINT_SAT:
5840 case ISD::FP_TO_UINT_SAT: {
5841 assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() &&
5842 N1.getValueType().isFloatingPoint() && "Invalid FP_TO_*INT_SAT");
5843 assert(N1.getValueType().isVector() == VT.isVector() &&
5844 "FP_TO_*INT_SAT type should be vector iff the operand type is "
5845 "vector!");
5846 assert((!VT.isVector() || VT.getVectorNumElements() ==
5847 N1.getValueType().getVectorNumElements()) &&
5848 "Vector element counts must match in FP_TO_*INT_SAT");
5849 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
5850 "Type to saturate to must be a scalar.");
5851 assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) &&
5852 "Not extending!");
5853 break;
5854 }
5855 case ISD::EXTRACT_VECTOR_ELT:
5856 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
5857 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
5858 element type of the vector.");
5859
5860 // Extract from an undefined value or using an undefined index is undefined.
5861 if (N1.isUndef() || N2.isUndef())
5862 return getUNDEF(VT);
5863
5864 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length
5865 // vectors. For scalable vectors we will provide appropriate support for
5866 // dealing with arbitrary indices.
5867 if (N2C && N1.getValueType().isFixedLengthVector() &&
5868 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
5869 return getUNDEF(VT);
5870
5871 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5872 // expanding copies of large vectors from registers. This only works for
5873 // fixed length vectors, since we need to know the exact number of
5874 // elements.
5875 if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() &&
5876 N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) {
5877 unsigned Factor =
5878 N1.getOperand(0).getValueType().getVectorNumElements();
5879 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
5880 N1.getOperand(N2C->getZExtValue() / Factor),
5881 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL));
5882 }
5883
5884 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while
5885 // lowering is expanding large vector constants.
5886 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR ||
5887 N1.getOpcode() == ISD::SPLAT_VECTOR)) {
5888 assert((N1.getOpcode() != ISD::BUILD_VECTOR ||
5889 N1.getValueType().isFixedLengthVector()) &&
5890 "BUILD_VECTOR used for scalable vectors");
5891 unsigned Index =
5892 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0;
5893 SDValue Elt = N1.getOperand(Index);
5894
5895 if (VT != Elt.getValueType())
5896 // If the vector element type is not legal, the BUILD_VECTOR operands
5897 // are promoted and implicitly truncated, and the result implicitly
5898 // extended. Make that explicit here.
5899 Elt = getAnyExtOrTrunc(Elt, DL, VT);
5900
5901 return Elt;
5902 }
5903
5904 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5905 // operations are lowered to scalars.
5906 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
5907 // If the indices are the same, return the inserted element else
5908 // if the indices are known different, extract the element from
5909 // the original vector.
5910 SDValue N1Op2 = N1.getOperand(2);
5911 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
5912
5913 if (N1Op2C && N2C) {
5914 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
5915 if (VT == N1.getOperand(1).getValueType())
5916 return N1.getOperand(1);
5917 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
5918 }
5919 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
5920 }
5921 }
5922
5923 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5924 // when vector types are scalarized and v1iX is legal.
5925 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx).
5926 // Here we are completely ignoring the extract element index (N2),
5927 // which is fine for fixed width vectors, since any index other than 0
5928 // is undefined anyway. However, this cannot be ignored for scalable
5929 // vectors - in theory we could support this, but we don't want to do this
5930 // without a profitability check.
5931 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5932 N1.getValueType().isFixedLengthVector() &&
5933 N1.getValueType().getVectorNumElements() == 1) {
5934 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
5935 N1.getOperand(1));
5936 }
5937 break;
5938 case ISD::EXTRACT_ELEMENT:
5939 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
5940 assert(!N1.getValueType().isVector() && !VT.isVector() &&
5941 (N1.getValueType().isInteger() == VT.isInteger()) &&
5942 N1.getValueType() != VT &&
5943 "Wrong types for EXTRACT_ELEMENT!");
5944
5945 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5946 // 64-bit integers into 32-bit parts. Instead of building the extract of
5947 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5948 if (N1.getOpcode() == ISD::BUILD_PAIR)
5949 return N1.getOperand(N2C->getZExtValue());
5950
5951 // EXTRACT_ELEMENT of a constant int is also very common.
5952 if (N1C) {
5953 unsigned ElementSize = VT.getSizeInBits();
5954 unsigned Shift = ElementSize * N2C->getZExtValue();
5955 const APInt &Val = N1C->getAPIntValue();
5956 return getConstant(Val.extractBits(ElementSize, Shift), DL, VT);
5957 }
5958 break;
5959 case ISD::EXTRACT_SUBVECTOR: {
5960 EVT N1VT = N1.getValueType();
5961 assert(VT.isVector() && N1VT.isVector() &&
5962 "Extract subvector VTs must be vectors!");
5963 assert(VT.getVectorElementType() == N1VT.getVectorElementType() &&
5964 "Extract subvector VTs must have the same element type!");
5965 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&
5966 "Cannot extract a scalable vector from a fixed length vector!");
5967 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
5968 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) &&
5969 "Extract subvector must be from larger vector to smaller vector!");
5970 assert(N2C && "Extract subvector index must be a constant");
5971 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
5972 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=
5973 N1VT.getVectorMinNumElements()) &&
5974 "Extract subvector overflow!");
5975 assert(N2C->getAPIntValue().getBitWidth() ==
5976 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&
5977 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
5978
5979 // Trivial extraction.
5980 if (VT == N1VT)
5981 return N1;
5982
5983 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5984 if (N1.isUndef())
5985 return getUNDEF(VT);
5986
5987 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5988 // the concat have the same type as the extract.
5989 if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 &&
5990 VT == N1.getOperand(0).getValueType()) {
5991 unsigned Factor = VT.getVectorMinNumElements();
5992 return N1.getOperand(N2C->getZExtValue() / Factor);
5993 }
5994
5995 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5996 // during shuffle legalization.
5997 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
5998 VT == N1.getOperand(1).getValueType())
5999 return N1.getOperand(1);
6000 break;
6001 }
6002 }
6003
6004 // Perform trivial constant folding.
6005 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}))
6006 return SV;
6007
6008 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2))
6009 return V;
6010
6011 // Canonicalize an UNDEF to the RHS, even over a constant.
6012 if (N1.isUndef()) {
6013 if (TLI->isCommutativeBinOp(Opcode)) {
6014 std::swap(N1, N2);
6015 } else {
6016 switch (Opcode) {
6017 case ISD::SIGN_EXTEND_INREG:
6018 case ISD::SUB:
6019 return getUNDEF(VT); // fold op(undef, arg2) -> undef
6020 case ISD::UDIV:
6021 case ISD::SDIV:
6022 case ISD::UREM:
6023 case ISD::SREM:
6024 case ISD::SSUBSAT:
6025 case ISD::USUBSAT:
6026 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
6027 }
6028 }
6029 }
6030
6031 // Fold a bunch of operators when the RHS is undef.
6032 if (N2.isUndef()) {
6033 switch (Opcode) {
6034 case ISD::XOR:
6035 if (N1.isUndef())
6036 // Handle undef ^ undef -> 0 special case. This is a common
6037 // idiom (misuse).
6038 return getConstant(0, DL, VT);
6039 LLVM_FALLTHROUGH;
6040 case ISD::ADD:
6041 case ISD::SUB:
6042 case ISD::UDIV:
6043 case ISD::SDIV:
6044 case ISD::UREM:
6045 case ISD::SREM:
6046 return getUNDEF(VT); // fold op(arg1, undef) -> undef
6047 case ISD::MUL:
6048 case ISD::AND:
6049 case ISD::SSUBSAT:
6050 case ISD::USUBSAT:
6051 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
6052 case ISD::OR:
6053 case ISD::SADDSAT:
6054 case ISD::UADDSAT:
6055 return getAllOnesConstant(DL, VT);
6056 }
6057 }
6058
6059 // Memoize this node if possible.
6060 SDNode *N;
6061 SDVTList VTs = getVTList(VT);
6062 SDValue Ops[] = {N1, N2};
6063 if (VT != MVT::Glue) {
6064 FoldingSetNodeID ID;
6065 AddNodeIDNode(ID, Opcode, VTs, Ops);
6066 void *IP = nullptr;
6067 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6068 E->intersectFlagsWith(Flags);
6069 return SDValue(E, 0);
6070 }
6071
6072 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6073 N->setFlags(Flags);
6074 createOperands(N, Ops);
6075 CSEMap.InsertNode(N, IP);
6076 } else {
6077 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6078 createOperands(N, Ops);
6079 }
6080
6081 InsertNode(N);
6082 SDValue V = SDValue(N, 0);
6083 NewSDValueDbgMsg(V, "Creating new node: ", this);
6084 return V;
6085 }
6086
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3)6087 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6088 SDValue N1, SDValue N2, SDValue N3) {
6089 SDNodeFlags Flags;
6090 if (Inserter)
6091 Flags = Inserter->getFlags();
6092 return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
6093 }
6094
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,const SDNodeFlags Flags)6095 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6096 SDValue N1, SDValue N2, SDValue N3,
6097 const SDNodeFlags Flags) {
6098 assert(N1.getOpcode() != ISD::DELETED_NODE &&
6099 N2.getOpcode() != ISD::DELETED_NODE &&
6100 N3.getOpcode() != ISD::DELETED_NODE &&
6101 "Operand is DELETED_NODE!");
6102 // Perform various simplifications.
6103 switch (Opcode) {
6104 case ISD::FMA: {
6105 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
6106 assert(N1.getValueType() == VT && N2.getValueType() == VT &&
6107 N3.getValueType() == VT && "FMA types must match!");
6108 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
6109 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
6110 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
6111 if (N1CFP && N2CFP && N3CFP) {
6112 APFloat V1 = N1CFP->getValueAPF();
6113 const APFloat &V2 = N2CFP->getValueAPF();
6114 const APFloat &V3 = N3CFP->getValueAPF();
6115 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
6116 return getConstantFP(V1, DL, VT);
6117 }
6118 break;
6119 }
6120 case ISD::BUILD_VECTOR: {
6121 // Attempt to simplify BUILD_VECTOR.
6122 SDValue Ops[] = {N1, N2, N3};
6123 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
6124 return V;
6125 break;
6126 }
6127 case ISD::CONCAT_VECTORS: {
6128 SDValue Ops[] = {N1, N2, N3};
6129 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
6130 return V;
6131 break;
6132 }
6133 case ISD::SETCC: {
6134 assert(VT.isInteger() && "SETCC result type must be an integer!");
6135 assert(N1.getValueType() == N2.getValueType() &&
6136 "SETCC operands must have the same type!");
6137 assert(VT.isVector() == N1.getValueType().isVector() &&
6138 "SETCC type should be vector iff the operand type is vector!");
6139 assert((!VT.isVector() || VT.getVectorElementCount() ==
6140 N1.getValueType().getVectorElementCount()) &&
6141 "SETCC vector element counts must match!");
6142 // Use FoldSetCC to simplify SETCC's.
6143 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
6144 return V;
6145 // Vector constant folding.
6146 SDValue Ops[] = {N1, N2, N3};
6147 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
6148 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
6149 return V;
6150 }
6151 break;
6152 }
6153 case ISD::SELECT:
6154 case ISD::VSELECT:
6155 if (SDValue V = simplifySelect(N1, N2, N3))
6156 return V;
6157 break;
6158 case ISD::VECTOR_SHUFFLE:
6159 llvm_unreachable("should use getVectorShuffle constructor!");
6160 case ISD::VECTOR_SPLICE: {
6161 if (cast<ConstantSDNode>(N3)->isNullValue())
6162 return N1;
6163 break;
6164 }
6165 case ISD::INSERT_VECTOR_ELT: {
6166 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
6167 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except
6168 // for scalable vectors where we will generate appropriate code to
6169 // deal with out-of-bounds cases correctly.
6170 if (N3C && N1.getValueType().isFixedLengthVector() &&
6171 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
6172 return getUNDEF(VT);
6173
6174 // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
6175 if (N3.isUndef())
6176 return getUNDEF(VT);
6177
6178 // If the inserted element is an UNDEF, just use the input vector.
6179 if (N2.isUndef())
6180 return N1;
6181
6182 break;
6183 }
6184 case ISD::INSERT_SUBVECTOR: {
6185 // Inserting undef into undef is still undef.
6186 if (N1.isUndef() && N2.isUndef())
6187 return getUNDEF(VT);
6188
6189 EVT N2VT = N2.getValueType();
6190 assert(VT == N1.getValueType() &&
6191 "Dest and insert subvector source types must match!");
6192 assert(VT.isVector() && N2VT.isVector() &&
6193 "Insert subvector VTs must be vectors!");
6194 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) &&
6195 "Cannot insert a scalable vector into a fixed length vector!");
6196 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
6197 VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) &&
6198 "Insert subvector must be from smaller vector to larger vector!");
6199 assert(isa<ConstantSDNode>(N3) &&
6200 "Insert subvector index must be constant");
6201 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
6202 (N2VT.getVectorMinNumElements() +
6203 cast<ConstantSDNode>(N3)->getZExtValue()) <=
6204 VT.getVectorMinNumElements()) &&
6205 "Insert subvector overflow!");
6206 assert(cast<ConstantSDNode>(N3)->getAPIntValue().getBitWidth() ==
6207 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&
6208 "Constant index for INSERT_SUBVECTOR has an invalid size");
6209
6210 // Trivial insertion.
6211 if (VT == N2VT)
6212 return N2;
6213
6214 // If this is an insert of an extracted vector into an undef vector, we
6215 // can just use the input to the extract.
6216 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6217 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
6218 return N2.getOperand(0);
6219 break;
6220 }
6221 case ISD::BITCAST:
6222 // Fold bit_convert nodes from a type to themselves.
6223 if (N1.getValueType() == VT)
6224 return N1;
6225 break;
6226 }
6227
6228 // Memoize node if it doesn't produce a flag.
6229 SDNode *N;
6230 SDVTList VTs = getVTList(VT);
6231 SDValue Ops[] = {N1, N2, N3};
6232 if (VT != MVT::Glue) {
6233 FoldingSetNodeID ID;
6234 AddNodeIDNode(ID, Opcode, VTs, Ops);
6235 void *IP = nullptr;
6236 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6237 E->intersectFlagsWith(Flags);
6238 return SDValue(E, 0);
6239 }
6240
6241 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6242 N->setFlags(Flags);
6243 createOperands(N, Ops);
6244 CSEMap.InsertNode(N, IP);
6245 } else {
6246 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6247 createOperands(N, Ops);
6248 }
6249
6250 InsertNode(N);
6251 SDValue V = SDValue(N, 0);
6252 NewSDValueDbgMsg(V, "Creating new node: ", this);
6253 return V;
6254 }
6255
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)6256 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6257 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
6258 SDValue Ops[] = { N1, N2, N3, N4 };
6259 return getNode(Opcode, DL, VT, Ops);
6260 }
6261
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)6262 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6263 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
6264 SDValue N5) {
6265 SDValue Ops[] = { N1, N2, N3, N4, N5 };
6266 return getNode(Opcode, DL, VT, Ops);
6267 }
6268
6269 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
6270 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)6271 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
6272 SmallVector<SDValue, 8> ArgChains;
6273
6274 // Include the original chain at the beginning of the list. When this is
6275 // used by target LowerCall hooks, this helps legalize find the
6276 // CALLSEQ_BEGIN node.
6277 ArgChains.push_back(Chain);
6278
6279 // Add a chain value for each stack argument.
6280 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
6281 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
6282 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
6283 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
6284 if (FI->getIndex() < 0)
6285 ArgChains.push_back(SDValue(L, 1));
6286
6287 // Build a tokenfactor for all the chains.
6288 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
6289 }
6290
6291 /// getMemsetValue - Vectorized representation of the memset value
6292 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,const SDLoc & dl)6293 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
6294 const SDLoc &dl) {
6295 assert(!Value.isUndef());
6296
6297 unsigned NumBits = VT.getScalarSizeInBits();
6298 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
6299 assert(C->getAPIntValue().getBitWidth() == 8);
6300 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
6301 if (VT.isInteger()) {
6302 bool IsOpaque = VT.getSizeInBits() > 64 ||
6303 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
6304 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
6305 }
6306 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
6307 VT);
6308 }
6309
6310 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
6311 EVT IntVT = VT.getScalarType();
6312 if (!IntVT.isInteger())
6313 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
6314
6315 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
6316 if (NumBits > 8) {
6317 // Use a multiplication with 0x010101... to extend the input to the
6318 // required length.
6319 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
6320 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
6321 DAG.getConstant(Magic, dl, IntVT));
6322 }
6323
6324 if (VT != Value.getValueType() && !VT.isInteger())
6325 Value = DAG.getBitcast(VT.getScalarType(), Value);
6326 if (VT != Value.getValueType())
6327 Value = DAG.getSplatBuildVector(VT, dl, Value);
6328
6329 return Value;
6330 }
6331
6332 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
6333 /// used when a memcpy is turned into a memset when the source is a constant
6334 /// string ptr.
getMemsetStringVal(EVT VT,const SDLoc & dl,SelectionDAG & DAG,const TargetLowering & TLI,const ConstantDataArraySlice & Slice)6335 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
6336 const TargetLowering &TLI,
6337 const ConstantDataArraySlice &Slice) {
6338 // Handle vector with all elements zero.
6339 if (Slice.Array == nullptr) {
6340 if (VT.isInteger())
6341 return DAG.getConstant(0, dl, VT);
6342 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
6343 return DAG.getConstantFP(0.0, dl, VT);
6344 if (VT.isVector()) {
6345 unsigned NumElts = VT.getVectorNumElements();
6346 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
6347 return DAG.getNode(ISD::BITCAST, dl, VT,
6348 DAG.getConstant(0, dl,
6349 EVT::getVectorVT(*DAG.getContext(),
6350 EltVT, NumElts)));
6351 }
6352 llvm_unreachable("Expected type!");
6353 }
6354
6355 assert(!VT.isVector() && "Can't handle vector type here!");
6356 unsigned NumVTBits = VT.getSizeInBits();
6357 unsigned NumVTBytes = NumVTBits / 8;
6358 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
6359
6360 APInt Val(NumVTBits, 0);
6361 if (DAG.getDataLayout().isLittleEndian()) {
6362 for (unsigned i = 0; i != NumBytes; ++i)
6363 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
6364 } else {
6365 for (unsigned i = 0; i != NumBytes; ++i)
6366 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
6367 }
6368
6369 // If the "cost" of materializing the integer immediate is less than the cost
6370 // of a load, then it is cost effective to turn the load into the immediate.
6371 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
6372 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
6373 return DAG.getConstant(Val, dl, VT);
6374 return SDValue(nullptr, 0);
6375 }
6376
getMemBasePlusOffset(SDValue Base,TypeSize Offset,const SDLoc & DL,const SDNodeFlags Flags)6377 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset,
6378 const SDLoc &DL,
6379 const SDNodeFlags Flags) {
6380 EVT VT = Base.getValueType();
6381 SDValue Index;
6382
6383 if (Offset.isScalable())
6384 Index = getVScale(DL, Base.getValueType(),
6385 APInt(Base.getValueSizeInBits().getFixedSize(),
6386 Offset.getKnownMinSize()));
6387 else
6388 Index = getConstant(Offset.getFixedSize(), DL, VT);
6389
6390 return getMemBasePlusOffset(Base, Index, DL, Flags);
6391 }
6392
getMemBasePlusOffset(SDValue Ptr,SDValue Offset,const SDLoc & DL,const SDNodeFlags Flags)6393 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset,
6394 const SDLoc &DL,
6395 const SDNodeFlags Flags) {
6396 assert(Offset.getValueType().isInteger());
6397 EVT BasePtrVT = Ptr.getValueType();
6398 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags);
6399 }
6400
6401 /// Returns true if memcpy source is constant data.
isMemSrcFromConstant(SDValue Src,ConstantDataArraySlice & Slice)6402 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
6403 uint64_t SrcDelta = 0;
6404 GlobalAddressSDNode *G = nullptr;
6405 if (Src.getOpcode() == ISD::GlobalAddress)
6406 G = cast<GlobalAddressSDNode>(Src);
6407 else if (Src.getOpcode() == ISD::ADD &&
6408 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
6409 Src.getOperand(1).getOpcode() == ISD::Constant) {
6410 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
6411 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
6412 }
6413 if (!G)
6414 return false;
6415
6416 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
6417 SrcDelta + G->getOffset());
6418 }
6419
shouldLowerMemFuncForSize(const MachineFunction & MF,SelectionDAG & DAG)6420 static bool shouldLowerMemFuncForSize(const MachineFunction &MF,
6421 SelectionDAG &DAG) {
6422 // On Darwin, -Os means optimize for size without hurting performance, so
6423 // only really optimize for size when -Oz (MinSize) is used.
6424 if (MF.getTarget().getTargetTriple().isOSDarwin())
6425 return MF.getFunction().hasMinSize();
6426 return DAG.shouldOptForSize();
6427 }
6428
chainLoadsAndStoresForMemcpy(SelectionDAG & DAG,const SDLoc & dl,SmallVector<SDValue,32> & OutChains,unsigned From,unsigned To,SmallVector<SDValue,16> & OutLoadChains,SmallVector<SDValue,16> & OutStoreChains)6429 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
6430 SmallVector<SDValue, 32> &OutChains, unsigned From,
6431 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
6432 SmallVector<SDValue, 16> &OutStoreChains) {
6433 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
6434 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
6435 SmallVector<SDValue, 16> GluedLoadChains;
6436 for (unsigned i = From; i < To; ++i) {
6437 OutChains.push_back(OutLoadChains[i]);
6438 GluedLoadChains.push_back(OutLoadChains[i]);
6439 }
6440
6441 // Chain for all loads.
6442 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
6443 GluedLoadChains);
6444
6445 for (unsigned i = From; i < To; ++i) {
6446 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
6447 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
6448 ST->getBasePtr(), ST->getMemoryVT(),
6449 ST->getMemOperand());
6450 OutChains.push_back(NewStore);
6451 }
6452 }
6453
getMemcpyLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo,const AAMDNodes & AAInfo)6454 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6455 SDValue Chain, SDValue Dst, SDValue Src,
6456 uint64_t Size, Align Alignment,
6457 bool isVol, bool AlwaysInline,
6458 MachinePointerInfo DstPtrInfo,
6459 MachinePointerInfo SrcPtrInfo,
6460 const AAMDNodes &AAInfo) {
6461 // Turn a memcpy of undef to nop.
6462 // FIXME: We need to honor volatile even is Src is undef.
6463 if (Src.isUndef())
6464 return Chain;
6465
6466 // Expand memcpy to a series of load and store ops if the size operand falls
6467 // below a certain threshold.
6468 // TODO: In the AlwaysInline case, if the size is big then generate a loop
6469 // rather than maybe a humongous number of loads and stores.
6470 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6471 const DataLayout &DL = DAG.getDataLayout();
6472 LLVMContext &C = *DAG.getContext();
6473 std::vector<EVT> MemOps;
6474 bool DstAlignCanChange = false;
6475 MachineFunction &MF = DAG.getMachineFunction();
6476 MachineFrameInfo &MFI = MF.getFrameInfo();
6477 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6478 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6479 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6480 DstAlignCanChange = true;
6481 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6482 if (!SrcAlign || Alignment > *SrcAlign)
6483 SrcAlign = Alignment;
6484 assert(SrcAlign && "SrcAlign must be set");
6485 ConstantDataArraySlice Slice;
6486 // If marked as volatile, perform a copy even when marked as constant.
6487 bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
6488 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
6489 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
6490 const MemOp Op = isZeroConstant
6491 ? MemOp::Set(Size, DstAlignCanChange, Alignment,
6492 /*IsZeroMemset*/ true, isVol)
6493 : MemOp::Copy(Size, DstAlignCanChange, Alignment,
6494 *SrcAlign, isVol, CopyFromConstant);
6495 if (!TLI.findOptimalMemOpLowering(
6496 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(),
6497 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
6498 return SDValue();
6499
6500 if (DstAlignCanChange) {
6501 Type *Ty = MemOps[0].getTypeForEVT(C);
6502 Align NewAlign = DL.getABITypeAlign(Ty);
6503
6504 // Don't promote to an alignment that would require dynamic stack
6505 // realignment.
6506 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
6507 if (!TRI->hasStackRealignment(MF))
6508 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
6509 NewAlign = NewAlign / 2;
6510
6511 if (NewAlign > Alignment) {
6512 // Give the stack frame object a larger alignment if needed.
6513 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6514 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6515 Alignment = NewAlign;
6516 }
6517 }
6518
6519 // Prepare AAInfo for loads/stores after lowering this memcpy.
6520 AAMDNodes NewAAInfo = AAInfo;
6521 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
6522
6523 MachineMemOperand::Flags MMOFlags =
6524 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6525 SmallVector<SDValue, 16> OutLoadChains;
6526 SmallVector<SDValue, 16> OutStoreChains;
6527 SmallVector<SDValue, 32> OutChains;
6528 unsigned NumMemOps = MemOps.size();
6529 uint64_t SrcOff = 0, DstOff = 0;
6530 for (unsigned i = 0; i != NumMemOps; ++i) {
6531 EVT VT = MemOps[i];
6532 unsigned VTSize = VT.getSizeInBits() / 8;
6533 SDValue Value, Store;
6534
6535 if (VTSize > Size) {
6536 // Issuing an unaligned load / store pair that overlaps with the previous
6537 // pair. Adjust the offset accordingly.
6538 assert(i == NumMemOps-1 && i != 0);
6539 SrcOff -= VTSize - Size;
6540 DstOff -= VTSize - Size;
6541 }
6542
6543 if (CopyFromConstant &&
6544 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
6545 // It's unlikely a store of a vector immediate can be done in a single
6546 // instruction. It would require a load from a constantpool first.
6547 // We only handle zero vectors here.
6548 // FIXME: Handle other cases where store of vector immediate is done in
6549 // a single instruction.
6550 ConstantDataArraySlice SubSlice;
6551 if (SrcOff < Slice.Length) {
6552 SubSlice = Slice;
6553 SubSlice.move(SrcOff);
6554 } else {
6555 // This is an out-of-bounds access and hence UB. Pretend we read zero.
6556 SubSlice.Array = nullptr;
6557 SubSlice.Offset = 0;
6558 SubSlice.Length = VTSize;
6559 }
6560 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
6561 if (Value.getNode()) {
6562 Store = DAG.getStore(
6563 Chain, dl, Value,
6564 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6565 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
6566 OutChains.push_back(Store);
6567 }
6568 }
6569
6570 if (!Store.getNode()) {
6571 // The type might not be legal for the target. This should only happen
6572 // if the type is smaller than a legal type, as on PPC, so the right
6573 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
6574 // to Load/Store if NVT==VT.
6575 // FIXME does the case above also need this?
6576 EVT NVT = TLI.getTypeToTransformTo(C, VT);
6577 assert(NVT.bitsGE(VT));
6578
6579 bool isDereferenceable =
6580 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6581 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6582 if (isDereferenceable)
6583 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6584
6585 Value = DAG.getExtLoad(
6586 ISD::EXTLOAD, dl, NVT, Chain,
6587 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6588 SrcPtrInfo.getWithOffset(SrcOff), VT,
6589 commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags, NewAAInfo);
6590 OutLoadChains.push_back(Value.getValue(1));
6591
6592 Store = DAG.getTruncStore(
6593 Chain, dl, Value,
6594 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6595 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags, NewAAInfo);
6596 OutStoreChains.push_back(Store);
6597 }
6598 SrcOff += VTSize;
6599 DstOff += VTSize;
6600 Size -= VTSize;
6601 }
6602
6603 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
6604 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
6605 unsigned NumLdStInMemcpy = OutStoreChains.size();
6606
6607 if (NumLdStInMemcpy) {
6608 // It may be that memcpy might be converted to memset if it's memcpy
6609 // of constants. In such a case, we won't have loads and stores, but
6610 // just stores. In the absence of loads, there is nothing to gang up.
6611 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
6612 // If target does not care, just leave as it.
6613 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
6614 OutChains.push_back(OutLoadChains[i]);
6615 OutChains.push_back(OutStoreChains[i]);
6616 }
6617 } else {
6618 // Ld/St less than/equal limit set by target.
6619 if (NumLdStInMemcpy <= GluedLdStLimit) {
6620 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6621 NumLdStInMemcpy, OutLoadChains,
6622 OutStoreChains);
6623 } else {
6624 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
6625 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
6626 unsigned GlueIter = 0;
6627
6628 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
6629 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
6630 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
6631
6632 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
6633 OutLoadChains, OutStoreChains);
6634 GlueIter += GluedLdStLimit;
6635 }
6636
6637 // Residual ld/st.
6638 if (RemainingLdStInMemcpy) {
6639 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6640 RemainingLdStInMemcpy, OutLoadChains,
6641 OutStoreChains);
6642 }
6643 }
6644 }
6645 }
6646 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6647 }
6648
getMemmoveLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo,const AAMDNodes & AAInfo)6649 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6650 SDValue Chain, SDValue Dst, SDValue Src,
6651 uint64_t Size, Align Alignment,
6652 bool isVol, bool AlwaysInline,
6653 MachinePointerInfo DstPtrInfo,
6654 MachinePointerInfo SrcPtrInfo,
6655 const AAMDNodes &AAInfo) {
6656 // Turn a memmove of undef to nop.
6657 // FIXME: We need to honor volatile even is Src is undef.
6658 if (Src.isUndef())
6659 return Chain;
6660
6661 // Expand memmove to a series of load and store ops if the size operand falls
6662 // below a certain threshold.
6663 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6664 const DataLayout &DL = DAG.getDataLayout();
6665 LLVMContext &C = *DAG.getContext();
6666 std::vector<EVT> MemOps;
6667 bool DstAlignCanChange = false;
6668 MachineFunction &MF = DAG.getMachineFunction();
6669 MachineFrameInfo &MFI = MF.getFrameInfo();
6670 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6671 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6672 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6673 DstAlignCanChange = true;
6674 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6675 if (!SrcAlign || Alignment > *SrcAlign)
6676 SrcAlign = Alignment;
6677 assert(SrcAlign && "SrcAlign must be set");
6678 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
6679 if (!TLI.findOptimalMemOpLowering(
6680 MemOps, Limit,
6681 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign,
6682 /*IsVolatile*/ true),
6683 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
6684 MF.getFunction().getAttributes()))
6685 return SDValue();
6686
6687 if (DstAlignCanChange) {
6688 Type *Ty = MemOps[0].getTypeForEVT(C);
6689 Align NewAlign = DL.getABITypeAlign(Ty);
6690 if (NewAlign > Alignment) {
6691 // Give the stack frame object a larger alignment if needed.
6692 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6693 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6694 Alignment = NewAlign;
6695 }
6696 }
6697
6698 // Prepare AAInfo for loads/stores after lowering this memmove.
6699 AAMDNodes NewAAInfo = AAInfo;
6700 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
6701
6702 MachineMemOperand::Flags MMOFlags =
6703 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6704 uint64_t SrcOff = 0, DstOff = 0;
6705 SmallVector<SDValue, 8> LoadValues;
6706 SmallVector<SDValue, 8> LoadChains;
6707 SmallVector<SDValue, 8> OutChains;
6708 unsigned NumMemOps = MemOps.size();
6709 for (unsigned i = 0; i < NumMemOps; i++) {
6710 EVT VT = MemOps[i];
6711 unsigned VTSize = VT.getSizeInBits() / 8;
6712 SDValue Value;
6713
6714 bool isDereferenceable =
6715 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6716 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6717 if (isDereferenceable)
6718 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6719
6720 Value = DAG.getLoad(
6721 VT, dl, Chain,
6722 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6723 SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags, NewAAInfo);
6724 LoadValues.push_back(Value);
6725 LoadChains.push_back(Value.getValue(1));
6726 SrcOff += VTSize;
6727 }
6728 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
6729 OutChains.clear();
6730 for (unsigned i = 0; i < NumMemOps; i++) {
6731 EVT VT = MemOps[i];
6732 unsigned VTSize = VT.getSizeInBits() / 8;
6733 SDValue Store;
6734
6735 Store = DAG.getStore(
6736 Chain, dl, LoadValues[i],
6737 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6738 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags, NewAAInfo);
6739 OutChains.push_back(Store);
6740 DstOff += VTSize;
6741 }
6742
6743 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6744 }
6745
6746 /// Lower the call to 'memset' intrinsic function into a series of store
6747 /// operations.
6748 ///
6749 /// \param DAG Selection DAG where lowered code is placed.
6750 /// \param dl Link to corresponding IR location.
6751 /// \param Chain Control flow dependency.
6752 /// \param Dst Pointer to destination memory location.
6753 /// \param Src Value of byte to write into the memory.
6754 /// \param Size Number of bytes to write.
6755 /// \param Alignment Alignment of the destination in bytes.
6756 /// \param isVol True if destination is volatile.
6757 /// \param DstPtrInfo IR information on the memory pointer.
6758 /// \returns New head in the control flow, if lowering was successful, empty
6759 /// SDValue otherwise.
6760 ///
6761 /// The function tries to replace 'llvm.memset' intrinsic with several store
6762 /// operations and value calculation code. This is usually profitable for small
6763 /// memory size.
getMemsetStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,MachinePointerInfo DstPtrInfo,const AAMDNodes & AAInfo)6764 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
6765 SDValue Chain, SDValue Dst, SDValue Src,
6766 uint64_t Size, Align Alignment, bool isVol,
6767 MachinePointerInfo DstPtrInfo,
6768 const AAMDNodes &AAInfo) {
6769 // Turn a memset of undef to nop.
6770 // FIXME: We need to honor volatile even is Src is undef.
6771 if (Src.isUndef())
6772 return Chain;
6773
6774 // Expand memset to a series of load/store ops if the size operand
6775 // falls below a certain threshold.
6776 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6777 std::vector<EVT> MemOps;
6778 bool DstAlignCanChange = false;
6779 MachineFunction &MF = DAG.getMachineFunction();
6780 MachineFrameInfo &MFI = MF.getFrameInfo();
6781 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6782 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6783 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6784 DstAlignCanChange = true;
6785 bool IsZeroVal =
6786 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isZero();
6787 if (!TLI.findOptimalMemOpLowering(
6788 MemOps, TLI.getMaxStoresPerMemset(OptSize),
6789 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
6790 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes()))
6791 return SDValue();
6792
6793 if (DstAlignCanChange) {
6794 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
6795 Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty);
6796 if (NewAlign > Alignment) {
6797 // Give the stack frame object a larger alignment if needed.
6798 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6799 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6800 Alignment = NewAlign;
6801 }
6802 }
6803
6804 SmallVector<SDValue, 8> OutChains;
6805 uint64_t DstOff = 0;
6806 unsigned NumMemOps = MemOps.size();
6807
6808 // Find the largest store and generate the bit pattern for it.
6809 EVT LargestVT = MemOps[0];
6810 for (unsigned i = 1; i < NumMemOps; i++)
6811 if (MemOps[i].bitsGT(LargestVT))
6812 LargestVT = MemOps[i];
6813 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
6814
6815 // Prepare AAInfo for loads/stores after lowering this memset.
6816 AAMDNodes NewAAInfo = AAInfo;
6817 NewAAInfo.TBAA = NewAAInfo.TBAAStruct = nullptr;
6818
6819 for (unsigned i = 0; i < NumMemOps; i++) {
6820 EVT VT = MemOps[i];
6821 unsigned VTSize = VT.getSizeInBits() / 8;
6822 if (VTSize > Size) {
6823 // Issuing an unaligned load / store pair that overlaps with the previous
6824 // pair. Adjust the offset accordingly.
6825 assert(i == NumMemOps-1 && i != 0);
6826 DstOff -= VTSize - Size;
6827 }
6828
6829 // If this store is smaller than the largest store see whether we can get
6830 // the smaller value for free with a truncate.
6831 SDValue Value = MemSetValue;
6832 if (VT.bitsLT(LargestVT)) {
6833 if (!LargestVT.isVector() && !VT.isVector() &&
6834 TLI.isTruncateFree(LargestVT, VT))
6835 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
6836 else
6837 Value = getMemsetValue(Src, VT, DAG, dl);
6838 }
6839 assert(Value.getValueType() == VT && "Value with wrong type.");
6840 SDValue Store = DAG.getStore(
6841 Chain, dl, Value,
6842 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6843 DstPtrInfo.getWithOffset(DstOff), Alignment,
6844 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone,
6845 NewAAInfo);
6846 OutChains.push_back(Store);
6847 DstOff += VT.getSizeInBits() / 8;
6848 Size -= VTSize;
6849 }
6850
6851 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6852 }
6853
checkAddrSpaceIsValidForLibcall(const TargetLowering * TLI,unsigned AS)6854 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
6855 unsigned AS) {
6856 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6857 // pointer operands can be losslessly bitcasted to pointers of address space 0
6858 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
6859 report_fatal_error("cannot lower memory intrinsic in address space " +
6860 Twine(AS));
6861 }
6862 }
6863
getMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool AlwaysInline,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo,const AAMDNodes & AAInfo)6864 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
6865 SDValue Src, SDValue Size, Align Alignment,
6866 bool isVol, bool AlwaysInline, bool isTailCall,
6867 MachinePointerInfo DstPtrInfo,
6868 MachinePointerInfo SrcPtrInfo,
6869 const AAMDNodes &AAInfo) {
6870 // Check to see if we should lower the memcpy to loads and stores first.
6871 // For cases within the target-specified limits, this is the best choice.
6872 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6873 if (ConstantSize) {
6874 // Memcpy with size zero? Just return the original chain.
6875 if (ConstantSize->isZero())
6876 return Chain;
6877
6878 SDValue Result = getMemcpyLoadsAndStores(
6879 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
6880 isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo);
6881 if (Result.getNode())
6882 return Result;
6883 }
6884
6885 // Then check to see if we should lower the memcpy with target-specific
6886 // code. If the target chooses to do this, this is the next best.
6887 if (TSI) {
6888 SDValue Result = TSI->EmitTargetCodeForMemcpy(
6889 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline,
6890 DstPtrInfo, SrcPtrInfo);
6891 if (Result.getNode())
6892 return Result;
6893 }
6894
6895 // If we really need inline code and the target declined to provide it,
6896 // use a (potentially long) sequence of loads and stores.
6897 if (AlwaysInline) {
6898 assert(ConstantSize && "AlwaysInline requires a constant size!");
6899 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6900 ConstantSize->getZExtValue(), Alignment,
6901 isVol, true, DstPtrInfo, SrcPtrInfo, AAInfo);
6902 }
6903
6904 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6905 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6906
6907 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
6908 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
6909 // respect volatile, so they may do things like read or write memory
6910 // beyond the given memory regions. But fixing this isn't easy, and most
6911 // people don't care.
6912
6913 // Emit a library call.
6914 TargetLowering::ArgListTy Args;
6915 TargetLowering::ArgListEntry Entry;
6916 Entry.Ty = Type::getInt8PtrTy(*getContext());
6917 Entry.Node = Dst; Args.push_back(Entry);
6918 Entry.Node = Src; Args.push_back(Entry);
6919
6920 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6921 Entry.Node = Size; Args.push_back(Entry);
6922 // FIXME: pass in SDLoc
6923 TargetLowering::CallLoweringInfo CLI(*this);
6924 CLI.setDebugLoc(dl)
6925 .setChain(Chain)
6926 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
6927 Dst.getValueType().getTypeForEVT(*getContext()),
6928 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
6929 TLI->getPointerTy(getDataLayout())),
6930 std::move(Args))
6931 .setDiscardResult()
6932 .setTailCall(isTailCall);
6933
6934 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6935 return CallResult.second;
6936 }
6937
getAtomicMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6938 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
6939 SDValue Dst, unsigned DstAlign,
6940 SDValue Src, unsigned SrcAlign,
6941 SDValue Size, Type *SizeTy,
6942 unsigned ElemSz, bool isTailCall,
6943 MachinePointerInfo DstPtrInfo,
6944 MachinePointerInfo SrcPtrInfo) {
6945 // Emit a library call.
6946 TargetLowering::ArgListTy Args;
6947 TargetLowering::ArgListEntry Entry;
6948 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6949 Entry.Node = Dst;
6950 Args.push_back(Entry);
6951
6952 Entry.Node = Src;
6953 Args.push_back(Entry);
6954
6955 Entry.Ty = SizeTy;
6956 Entry.Node = Size;
6957 Args.push_back(Entry);
6958
6959 RTLIB::Libcall LibraryCall =
6960 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6961 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6962 report_fatal_error("Unsupported element size");
6963
6964 TargetLowering::CallLoweringInfo CLI(*this);
6965 CLI.setDebugLoc(dl)
6966 .setChain(Chain)
6967 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6968 Type::getVoidTy(*getContext()),
6969 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6970 TLI->getPointerTy(getDataLayout())),
6971 std::move(Args))
6972 .setDiscardResult()
6973 .setTailCall(isTailCall);
6974
6975 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6976 return CallResult.second;
6977 }
6978
getMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo,const AAMDNodes & AAInfo)6979 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
6980 SDValue Src, SDValue Size, Align Alignment,
6981 bool isVol, bool isTailCall,
6982 MachinePointerInfo DstPtrInfo,
6983 MachinePointerInfo SrcPtrInfo,
6984 const AAMDNodes &AAInfo) {
6985 // Check to see if we should lower the memmove to loads and stores first.
6986 // For cases within the target-specified limits, this is the best choice.
6987 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6988 if (ConstantSize) {
6989 // Memmove with size zero? Just return the original chain.
6990 if (ConstantSize->isZero())
6991 return Chain;
6992
6993 SDValue Result = getMemmoveLoadsAndStores(
6994 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
6995 isVol, false, DstPtrInfo, SrcPtrInfo, AAInfo);
6996 if (Result.getNode())
6997 return Result;
6998 }
6999
7000 // Then check to see if we should lower the memmove with target-specific
7001 // code. If the target chooses to do this, this is the next best.
7002 if (TSI) {
7003 SDValue Result =
7004 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size,
7005 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
7006 if (Result.getNode())
7007 return Result;
7008 }
7009
7010 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
7011 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
7012
7013 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
7014 // not be safe. See memcpy above for more details.
7015
7016 // Emit a library call.
7017 TargetLowering::ArgListTy Args;
7018 TargetLowering::ArgListEntry Entry;
7019 Entry.Ty = Type::getInt8PtrTy(*getContext());
7020 Entry.Node = Dst; Args.push_back(Entry);
7021 Entry.Node = Src; Args.push_back(Entry);
7022
7023 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7024 Entry.Node = Size; Args.push_back(Entry);
7025 // FIXME: pass in SDLoc
7026 TargetLowering::CallLoweringInfo CLI(*this);
7027 CLI.setDebugLoc(dl)
7028 .setChain(Chain)
7029 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
7030 Dst.getValueType().getTypeForEVT(*getContext()),
7031 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
7032 TLI->getPointerTy(getDataLayout())),
7033 std::move(Args))
7034 .setDiscardResult()
7035 .setTailCall(isTailCall);
7036
7037 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
7038 return CallResult.second;
7039 }
7040
getAtomicMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)7041 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
7042 SDValue Dst, unsigned DstAlign,
7043 SDValue Src, unsigned SrcAlign,
7044 SDValue Size, Type *SizeTy,
7045 unsigned ElemSz, bool isTailCall,
7046 MachinePointerInfo DstPtrInfo,
7047 MachinePointerInfo SrcPtrInfo) {
7048 // Emit a library call.
7049 TargetLowering::ArgListTy Args;
7050 TargetLowering::ArgListEntry Entry;
7051 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7052 Entry.Node = Dst;
7053 Args.push_back(Entry);
7054
7055 Entry.Node = Src;
7056 Args.push_back(Entry);
7057
7058 Entry.Ty = SizeTy;
7059 Entry.Node = Size;
7060 Args.push_back(Entry);
7061
7062 RTLIB::Libcall LibraryCall =
7063 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
7064 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
7065 report_fatal_error("Unsupported element size");
7066
7067 TargetLowering::CallLoweringInfo CLI(*this);
7068 CLI.setDebugLoc(dl)
7069 .setChain(Chain)
7070 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
7071 Type::getVoidTy(*getContext()),
7072 getExternalSymbol(TLI->getLibcallName(LibraryCall),
7073 TLI->getPointerTy(getDataLayout())),
7074 std::move(Args))
7075 .setDiscardResult()
7076 .setTailCall(isTailCall);
7077
7078 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
7079 return CallResult.second;
7080 }
7081
getMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo,const AAMDNodes & AAInfo)7082 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
7083 SDValue Src, SDValue Size, Align Alignment,
7084 bool isVol, bool isTailCall,
7085 MachinePointerInfo DstPtrInfo,
7086 const AAMDNodes &AAInfo) {
7087 // Check to see if we should lower the memset to stores first.
7088 // For cases within the target-specified limits, this is the best choice.
7089 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
7090 if (ConstantSize) {
7091 // Memset with size zero? Just return the original chain.
7092 if (ConstantSize->isZero())
7093 return Chain;
7094
7095 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
7096 ConstantSize->getZExtValue(), Alignment,
7097 isVol, DstPtrInfo, AAInfo);
7098
7099 if (Result.getNode())
7100 return Result;
7101 }
7102
7103 // Then check to see if we should lower the memset with target-specific
7104 // code. If the target chooses to do this, this is the next best.
7105 if (TSI) {
7106 SDValue Result = TSI->EmitTargetCodeForMemset(
7107 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo);
7108 if (Result.getNode())
7109 return Result;
7110 }
7111
7112 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
7113
7114 // Emit a library call.
7115 TargetLowering::ArgListTy Args;
7116 TargetLowering::ArgListEntry Entry;
7117 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext());
7118 Args.push_back(Entry);
7119 Entry.Node = Src;
7120 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
7121 Args.push_back(Entry);
7122 Entry.Node = Size;
7123 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7124 Args.push_back(Entry);
7125
7126 // FIXME: pass in SDLoc
7127 TargetLowering::CallLoweringInfo CLI(*this);
7128 CLI.setDebugLoc(dl)
7129 .setChain(Chain)
7130 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
7131 Dst.getValueType().getTypeForEVT(*getContext()),
7132 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
7133 TLI->getPointerTy(getDataLayout())),
7134 std::move(Args))
7135 .setDiscardResult()
7136 .setTailCall(isTailCall);
7137
7138 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
7139 return CallResult.second;
7140 }
7141
getAtomicMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Value,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo)7142 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
7143 SDValue Dst, unsigned DstAlign,
7144 SDValue Value, SDValue Size, Type *SizeTy,
7145 unsigned ElemSz, bool isTailCall,
7146 MachinePointerInfo DstPtrInfo) {
7147 // Emit a library call.
7148 TargetLowering::ArgListTy Args;
7149 TargetLowering::ArgListEntry Entry;
7150 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7151 Entry.Node = Dst;
7152 Args.push_back(Entry);
7153
7154 Entry.Ty = Type::getInt8Ty(*getContext());
7155 Entry.Node = Value;
7156 Args.push_back(Entry);
7157
7158 Entry.Ty = SizeTy;
7159 Entry.Node = Size;
7160 Args.push_back(Entry);
7161
7162 RTLIB::Libcall LibraryCall =
7163 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
7164 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
7165 report_fatal_error("Unsupported element size");
7166
7167 TargetLowering::CallLoweringInfo CLI(*this);
7168 CLI.setDebugLoc(dl)
7169 .setChain(Chain)
7170 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
7171 Type::getVoidTy(*getContext()),
7172 getExternalSymbol(TLI->getLibcallName(LibraryCall),
7173 TLI->getPointerTy(getDataLayout())),
7174 std::move(Args))
7175 .setDiscardResult()
7176 .setTailCall(isTailCall);
7177
7178 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
7179 return CallResult.second;
7180 }
7181
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)7182 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
7183 SDVTList VTList, ArrayRef<SDValue> Ops,
7184 MachineMemOperand *MMO) {
7185 FoldingSetNodeID ID;
7186 ID.AddInteger(MemVT.getRawBits());
7187 AddNodeIDNode(ID, Opcode, VTList, Ops);
7188 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7189 void* IP = nullptr;
7190 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7191 cast<AtomicSDNode>(E)->refineAlignment(MMO);
7192 return SDValue(E, 0);
7193 }
7194
7195 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
7196 VTList, MemVT, MMO);
7197 createOperands(N, Ops);
7198
7199 CSEMap.InsertNode(N, IP);
7200 InsertNode(N);
7201 return SDValue(N, 0);
7202 }
7203
getAtomicCmpSwap(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO)7204 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
7205 EVT MemVT, SDVTList VTs, SDValue Chain,
7206 SDValue Ptr, SDValue Cmp, SDValue Swp,
7207 MachineMemOperand *MMO) {
7208 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
7209 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
7210 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
7211
7212 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
7213 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
7214 }
7215
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO)7216 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
7217 SDValue Chain, SDValue Ptr, SDValue Val,
7218 MachineMemOperand *MMO) {
7219 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
7220 Opcode == ISD::ATOMIC_LOAD_SUB ||
7221 Opcode == ISD::ATOMIC_LOAD_AND ||
7222 Opcode == ISD::ATOMIC_LOAD_CLR ||
7223 Opcode == ISD::ATOMIC_LOAD_OR ||
7224 Opcode == ISD::ATOMIC_LOAD_XOR ||
7225 Opcode == ISD::ATOMIC_LOAD_NAND ||
7226 Opcode == ISD::ATOMIC_LOAD_MIN ||
7227 Opcode == ISD::ATOMIC_LOAD_MAX ||
7228 Opcode == ISD::ATOMIC_LOAD_UMIN ||
7229 Opcode == ISD::ATOMIC_LOAD_UMAX ||
7230 Opcode == ISD::ATOMIC_LOAD_FADD ||
7231 Opcode == ISD::ATOMIC_LOAD_FSUB ||
7232 Opcode == ISD::ATOMIC_SWAP ||
7233 Opcode == ISD::ATOMIC_STORE) &&
7234 "Invalid Atomic Op");
7235
7236 EVT VT = Val.getValueType();
7237
7238 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
7239 getVTList(VT, MVT::Other);
7240 SDValue Ops[] = {Chain, Ptr, Val};
7241 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
7242 }
7243
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)7244 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
7245 EVT VT, SDValue Chain, SDValue Ptr,
7246 MachineMemOperand *MMO) {
7247 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
7248
7249 SDVTList VTs = getVTList(VT, MVT::Other);
7250 SDValue Ops[] = {Chain, Ptr};
7251 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
7252 }
7253
7254 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,const SDLoc & dl)7255 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
7256 if (Ops.size() == 1)
7257 return Ops[0];
7258
7259 SmallVector<EVT, 4> VTs;
7260 VTs.reserve(Ops.size());
7261 for (const SDValue &Op : Ops)
7262 VTs.push_back(Op.getValueType());
7263 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
7264 }
7265
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags Flags,uint64_t Size,const AAMDNodes & AAInfo)7266 SDValue SelectionDAG::getMemIntrinsicNode(
7267 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
7268 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
7269 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
7270 if (!Size && MemVT.isScalableVector())
7271 Size = MemoryLocation::UnknownSize;
7272 else if (!Size)
7273 Size = MemVT.getStoreSize();
7274
7275 MachineFunction &MF = getMachineFunction();
7276 MachineMemOperand *MMO =
7277 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo);
7278
7279 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
7280 }
7281
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)7282 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
7283 SDVTList VTList,
7284 ArrayRef<SDValue> Ops, EVT MemVT,
7285 MachineMemOperand *MMO) {
7286 assert((Opcode == ISD::INTRINSIC_VOID ||
7287 Opcode == ISD::INTRINSIC_W_CHAIN ||
7288 Opcode == ISD::PREFETCH ||
7289 ((int)Opcode <= std::numeric_limits<int>::max() &&
7290 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
7291 "Opcode is not a memory-accessing opcode!");
7292
7293 // Memoize the node unless it returns a flag.
7294 MemIntrinsicSDNode *N;
7295 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
7296 FoldingSetNodeID ID;
7297 AddNodeIDNode(ID, Opcode, VTList, Ops);
7298 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
7299 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
7300 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7301 void *IP = nullptr;
7302 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7303 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
7304 return SDValue(E, 0);
7305 }
7306
7307 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
7308 VTList, MemVT, MMO);
7309 createOperands(N, Ops);
7310
7311 CSEMap.InsertNode(N, IP);
7312 } else {
7313 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
7314 VTList, MemVT, MMO);
7315 createOperands(N, Ops);
7316 }
7317 InsertNode(N);
7318 SDValue V(N, 0);
7319 NewSDValueDbgMsg(V, "Creating new node: ", this);
7320 return V;
7321 }
7322
getLifetimeNode(bool IsStart,const SDLoc & dl,SDValue Chain,int FrameIndex,int64_t Size,int64_t Offset)7323 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
7324 SDValue Chain, int FrameIndex,
7325 int64_t Size, int64_t Offset) {
7326 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
7327 const auto VTs = getVTList(MVT::Other);
7328 SDValue Ops[2] = {
7329 Chain,
7330 getFrameIndex(FrameIndex,
7331 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
7332 true)};
7333
7334 FoldingSetNodeID ID;
7335 AddNodeIDNode(ID, Opcode, VTs, Ops);
7336 ID.AddInteger(FrameIndex);
7337 ID.AddInteger(Size);
7338 ID.AddInteger(Offset);
7339 void *IP = nullptr;
7340 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7341 return SDValue(E, 0);
7342
7343 LifetimeSDNode *N = newSDNode<LifetimeSDNode>(
7344 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset);
7345 createOperands(N, Ops);
7346 CSEMap.InsertNode(N, IP);
7347 InsertNode(N);
7348 SDValue V(N, 0);
7349 NewSDValueDbgMsg(V, "Creating new node: ", this);
7350 return V;
7351 }
7352
getPseudoProbeNode(const SDLoc & Dl,SDValue Chain,uint64_t Guid,uint64_t Index,uint32_t Attr)7353 SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain,
7354 uint64_t Guid, uint64_t Index,
7355 uint32_t Attr) {
7356 const unsigned Opcode = ISD::PSEUDO_PROBE;
7357 const auto VTs = getVTList(MVT::Other);
7358 SDValue Ops[] = {Chain};
7359 FoldingSetNodeID ID;
7360 AddNodeIDNode(ID, Opcode, VTs, Ops);
7361 ID.AddInteger(Guid);
7362 ID.AddInteger(Index);
7363 void *IP = nullptr;
7364 if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
7365 return SDValue(E, 0);
7366
7367 auto *N = newSDNode<PseudoProbeSDNode>(
7368 Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
7369 createOperands(N, Ops);
7370 CSEMap.InsertNode(N, IP);
7371 InsertNode(N);
7372 SDValue V(N, 0);
7373 NewSDValueDbgMsg(V, "Creating new node: ", this);
7374 return V;
7375 }
7376
7377 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
7378 /// MachinePointerInfo record from it. This is particularly useful because the
7379 /// code generator has many cases where it doesn't bother passing in a
7380 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,int64_t Offset=0)7381 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
7382 SelectionDAG &DAG, SDValue Ptr,
7383 int64_t Offset = 0) {
7384 // If this is FI+Offset, we can model it.
7385 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
7386 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
7387 FI->getIndex(), Offset);
7388
7389 // If this is (FI+Offset1)+Offset2, we can model it.
7390 if (Ptr.getOpcode() != ISD::ADD ||
7391 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
7392 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
7393 return Info;
7394
7395 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7396 return MachinePointerInfo::getFixedStack(
7397 DAG.getMachineFunction(), FI,
7398 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
7399 }
7400
7401 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
7402 /// MachinePointerInfo record from it. This is particularly useful because the
7403 /// code generator has many cases where it doesn't bother passing in a
7404 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,SDValue OffsetOp)7405 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
7406 SelectionDAG &DAG, SDValue Ptr,
7407 SDValue OffsetOp) {
7408 // If the 'Offset' value isn't a constant, we can't handle this.
7409 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
7410 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
7411 if (OffsetOp.isUndef())
7412 return InferPointerInfo(Info, DAG, Ptr);
7413 return Info;
7414 }
7415
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)7416 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
7417 EVT VT, const SDLoc &dl, SDValue Chain,
7418 SDValue Ptr, SDValue Offset,
7419 MachinePointerInfo PtrInfo, EVT MemVT,
7420 Align Alignment,
7421 MachineMemOperand::Flags MMOFlags,
7422 const AAMDNodes &AAInfo, const MDNode *Ranges) {
7423 assert(Chain.getValueType() == MVT::Other &&
7424 "Invalid chain type");
7425
7426 MMOFlags |= MachineMemOperand::MOLoad;
7427 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
7428 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
7429 // clients.
7430 if (PtrInfo.V.isNull())
7431 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
7432
7433 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
7434 MachineFunction &MF = getMachineFunction();
7435 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
7436 Alignment, AAInfo, Ranges);
7437 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
7438 }
7439
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)7440 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
7441 EVT VT, const SDLoc &dl, SDValue Chain,
7442 SDValue Ptr, SDValue Offset, EVT MemVT,
7443 MachineMemOperand *MMO) {
7444 if (VT == MemVT) {
7445 ExtType = ISD::NON_EXTLOAD;
7446 } else if (ExtType == ISD::NON_EXTLOAD) {
7447 assert(VT == MemVT && "Non-extending load from different memory type!");
7448 } else {
7449 // Extending load.
7450 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
7451 "Should only be an extending load, not truncating!");
7452 assert(VT.isInteger() == MemVT.isInteger() &&
7453 "Cannot convert from FP to Int or Int -> FP!");
7454 assert(VT.isVector() == MemVT.isVector() &&
7455 "Cannot use an ext load to convert to or from a vector!");
7456 assert((!VT.isVector() ||
7457 VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
7458 "Cannot use an ext load to change the number of vector elements!");
7459 }
7460
7461 bool Indexed = AM != ISD::UNINDEXED;
7462 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
7463
7464 SDVTList VTs = Indexed ?
7465 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
7466 SDValue Ops[] = { Chain, Ptr, Offset };
7467 FoldingSetNodeID ID;
7468 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
7469 ID.AddInteger(MemVT.getRawBits());
7470 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
7471 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
7472 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7473 void *IP = nullptr;
7474 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7475 cast<LoadSDNode>(E)->refineAlignment(MMO);
7476 return SDValue(E, 0);
7477 }
7478 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7479 ExtType, MemVT, MMO);
7480 createOperands(N, Ops);
7481
7482 CSEMap.InsertNode(N, IP);
7483 InsertNode(N);
7484 SDValue V(N, 0);
7485 NewSDValueDbgMsg(V, "Creating new node: ", this);
7486 return V;
7487 }
7488
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)7489 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7490 SDValue Ptr, MachinePointerInfo PtrInfo,
7491 MaybeAlign Alignment,
7492 MachineMemOperand::Flags MMOFlags,
7493 const AAMDNodes &AAInfo, const MDNode *Ranges) {
7494 SDValue Undef = getUNDEF(Ptr.getValueType());
7495 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7496 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
7497 }
7498
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)7499 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7500 SDValue Ptr, MachineMemOperand *MMO) {
7501 SDValue Undef = getUNDEF(Ptr.getValueType());
7502 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7503 VT, MMO);
7504 }
7505
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)7506 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7507 EVT VT, SDValue Chain, SDValue Ptr,
7508 MachinePointerInfo PtrInfo, EVT MemVT,
7509 MaybeAlign Alignment,
7510 MachineMemOperand::Flags MMOFlags,
7511 const AAMDNodes &AAInfo) {
7512 SDValue Undef = getUNDEF(Ptr.getValueType());
7513 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
7514 MemVT, Alignment, MMOFlags, AAInfo);
7515 }
7516
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)7517 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7518 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
7519 MachineMemOperand *MMO) {
7520 SDValue Undef = getUNDEF(Ptr.getValueType());
7521 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
7522 MemVT, MMO);
7523 }
7524
getIndexedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7525 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
7526 SDValue Base, SDValue Offset,
7527 ISD::MemIndexedMode AM) {
7528 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
7529 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
7530 // Don't propagate the invariant or dereferenceable flags.
7531 auto MMOFlags =
7532 LD->getMemOperand()->getFlags() &
7533 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7534 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
7535 LD->getChain(), Base, Offset, LD->getPointerInfo(),
7536 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
7537 }
7538
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)7539 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7540 SDValue Ptr, MachinePointerInfo PtrInfo,
7541 Align Alignment,
7542 MachineMemOperand::Flags MMOFlags,
7543 const AAMDNodes &AAInfo) {
7544 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7545
7546 MMOFlags |= MachineMemOperand::MOStore;
7547 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7548
7549 if (PtrInfo.V.isNull())
7550 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7551
7552 MachineFunction &MF = getMachineFunction();
7553 uint64_t Size =
7554 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize());
7555 MachineMemOperand *MMO =
7556 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
7557 return getStore(Chain, dl, Val, Ptr, MMO);
7558 }
7559
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)7560 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7561 SDValue Ptr, MachineMemOperand *MMO) {
7562 assert(Chain.getValueType() == MVT::Other &&
7563 "Invalid chain type");
7564 EVT VT = Val.getValueType();
7565 SDVTList VTs = getVTList(MVT::Other);
7566 SDValue Undef = getUNDEF(Ptr.getValueType());
7567 SDValue Ops[] = { Chain, Val, Ptr, Undef };
7568 FoldingSetNodeID ID;
7569 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7570 ID.AddInteger(VT.getRawBits());
7571 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7572 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
7573 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7574 void *IP = nullptr;
7575 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7576 cast<StoreSDNode>(E)->refineAlignment(MMO);
7577 return SDValue(E, 0);
7578 }
7579 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7580 ISD::UNINDEXED, false, VT, MMO);
7581 createOperands(N, Ops);
7582
7583 CSEMap.InsertNode(N, IP);
7584 InsertNode(N);
7585 SDValue V(N, 0);
7586 NewSDValueDbgMsg(V, "Creating new node: ", this);
7587 return V;
7588 }
7589
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)7590 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7591 SDValue Ptr, MachinePointerInfo PtrInfo,
7592 EVT SVT, Align Alignment,
7593 MachineMemOperand::Flags MMOFlags,
7594 const AAMDNodes &AAInfo) {
7595 assert(Chain.getValueType() == MVT::Other &&
7596 "Invalid chain type");
7597
7598 MMOFlags |= MachineMemOperand::MOStore;
7599 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7600
7601 if (PtrInfo.V.isNull())
7602 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7603
7604 MachineFunction &MF = getMachineFunction();
7605 MachineMemOperand *MMO = MF.getMachineMemOperand(
7606 PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
7607 Alignment, AAInfo);
7608 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
7609 }
7610
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)7611 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7612 SDValue Ptr, EVT SVT,
7613 MachineMemOperand *MMO) {
7614 EVT VT = Val.getValueType();
7615
7616 assert(Chain.getValueType() == MVT::Other &&
7617 "Invalid chain type");
7618 if (VT == SVT)
7619 return getStore(Chain, dl, Val, Ptr, MMO);
7620
7621 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
7622 "Should only be a truncating store, not extending!");
7623 assert(VT.isInteger() == SVT.isInteger() &&
7624 "Can't do FP-INT conversion!");
7625 assert(VT.isVector() == SVT.isVector() &&
7626 "Cannot use trunc store to convert to or from a vector!");
7627 assert((!VT.isVector() ||
7628 VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
7629 "Cannot use trunc store to change the number of vector elements!");
7630
7631 SDVTList VTs = getVTList(MVT::Other);
7632 SDValue Undef = getUNDEF(Ptr.getValueType());
7633 SDValue Ops[] = { Chain, Val, Ptr, Undef };
7634 FoldingSetNodeID ID;
7635 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7636 ID.AddInteger(SVT.getRawBits());
7637 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7638 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
7639 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7640 void *IP = nullptr;
7641 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7642 cast<StoreSDNode>(E)->refineAlignment(MMO);
7643 return SDValue(E, 0);
7644 }
7645 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7646 ISD::UNINDEXED, true, SVT, MMO);
7647 createOperands(N, Ops);
7648
7649 CSEMap.InsertNode(N, IP);
7650 InsertNode(N);
7651 SDValue V(N, 0);
7652 NewSDValueDbgMsg(V, "Creating new node: ", this);
7653 return V;
7654 }
7655
getIndexedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7656 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
7657 SDValue Base, SDValue Offset,
7658 ISD::MemIndexedMode AM) {
7659 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
7660 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
7661 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
7662 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
7663 FoldingSetNodeID ID;
7664 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7665 ID.AddInteger(ST->getMemoryVT().getRawBits());
7666 ID.AddInteger(ST->getRawSubclassData());
7667 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
7668 void *IP = nullptr;
7669 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7670 return SDValue(E, 0);
7671
7672 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7673 ST->isTruncatingStore(), ST->getMemoryVT(),
7674 ST->getMemOperand());
7675 createOperands(N, Ops);
7676
7677 CSEMap.InsertNode(N, IP);
7678 InsertNode(N);
7679 SDValue V(N, 0);
7680 NewSDValueDbgMsg(V, "Creating new node: ", this);
7681 return V;
7682 }
7683
getLoadVP(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,SDValue Mask,SDValue EVL,MachinePointerInfo PtrInfo,EVT MemVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges,bool IsExpanding)7684 SDValue SelectionDAG::getLoadVP(
7685 ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
7686 SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL,
7687 MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
7688 MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
7689 const MDNode *Ranges, bool IsExpanding) {
7690 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7691
7692 MMOFlags |= MachineMemOperand::MOLoad;
7693 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
7694 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
7695 // clients.
7696 if (PtrInfo.V.isNull())
7697 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
7698
7699 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
7700 MachineFunction &MF = getMachineFunction();
7701 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
7702 Alignment, AAInfo, Ranges);
7703 return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr, Offset, Mask, EVL, MemVT,
7704 MMO, IsExpanding);
7705 }
7706
getLoadVP(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,SDValue Mask,SDValue EVL,EVT MemVT,MachineMemOperand * MMO,bool IsExpanding)7707 SDValue SelectionDAG::getLoadVP(ISD::MemIndexedMode AM,
7708 ISD::LoadExtType ExtType, EVT VT,
7709 const SDLoc &dl, SDValue Chain, SDValue Ptr,
7710 SDValue Offset, SDValue Mask, SDValue EVL,
7711 EVT MemVT, MachineMemOperand *MMO,
7712 bool IsExpanding) {
7713 if (VT == MemVT) {
7714 ExtType = ISD::NON_EXTLOAD;
7715 } else if (ExtType == ISD::NON_EXTLOAD) {
7716 assert(VT == MemVT && "Non-extending load from different memory type!");
7717 } else {
7718 // Extending load.
7719 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
7720 "Should only be an extending load, not truncating!");
7721 assert(VT.isInteger() == MemVT.isInteger() &&
7722 "Cannot convert from FP to Int or Int -> FP!");
7723 assert(VT.isVector() == MemVT.isVector() &&
7724 "Cannot use an ext load to convert to or from a vector!");
7725 assert((!VT.isVector() ||
7726 VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
7727 "Cannot use an ext load to change the number of vector elements!");
7728 }
7729
7730 bool Indexed = AM != ISD::UNINDEXED;
7731 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
7732
7733 SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other)
7734 : getVTList(VT, MVT::Other);
7735 SDValue Ops[] = {Chain, Ptr, Offset, Mask, EVL};
7736 FoldingSetNodeID ID;
7737 AddNodeIDNode(ID, ISD::VP_LOAD, VTs, Ops);
7738 ID.AddInteger(VT.getRawBits());
7739 ID.AddInteger(getSyntheticNodeSubclassData<VPLoadSDNode>(
7740 dl.getIROrder(), VTs, AM, ExtType, IsExpanding, MemVT, MMO));
7741 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7742 void *IP = nullptr;
7743 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7744 cast<VPLoadSDNode>(E)->refineAlignment(MMO);
7745 return SDValue(E, 0);
7746 }
7747 auto *N = newSDNode<VPLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7748 ExtType, IsExpanding, MemVT, MMO);
7749 createOperands(N, Ops);
7750
7751 CSEMap.InsertNode(N, IP);
7752 InsertNode(N);
7753 SDValue V(N, 0);
7754 NewSDValueDbgMsg(V, "Creating new node: ", this);
7755 return V;
7756 }
7757
getLoadVP(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue EVL,MachinePointerInfo PtrInfo,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges,bool IsExpanding)7758 SDValue SelectionDAG::getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain,
7759 SDValue Ptr, SDValue Mask, SDValue EVL,
7760 MachinePointerInfo PtrInfo,
7761 MaybeAlign Alignment,
7762 MachineMemOperand::Flags MMOFlags,
7763 const AAMDNodes &AAInfo, const MDNode *Ranges,
7764 bool IsExpanding) {
7765 SDValue Undef = getUNDEF(Ptr.getValueType());
7766 return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7767 Mask, EVL, PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges,
7768 IsExpanding);
7769 }
7770
getLoadVP(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue EVL,MachineMemOperand * MMO,bool IsExpanding)7771 SDValue SelectionDAG::getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain,
7772 SDValue Ptr, SDValue Mask, SDValue EVL,
7773 MachineMemOperand *MMO, bool IsExpanding) {
7774 SDValue Undef = getUNDEF(Ptr.getValueType());
7775 return getLoadVP(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7776 Mask, EVL, VT, MMO, IsExpanding);
7777 }
7778
getExtLoadVP(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue EVL,MachinePointerInfo PtrInfo,EVT MemVT,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,bool IsExpanding)7779 SDValue SelectionDAG::getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl,
7780 EVT VT, SDValue Chain, SDValue Ptr,
7781 SDValue Mask, SDValue EVL,
7782 MachinePointerInfo PtrInfo, EVT MemVT,
7783 MaybeAlign Alignment,
7784 MachineMemOperand::Flags MMOFlags,
7785 const AAMDNodes &AAInfo, bool IsExpanding) {
7786 SDValue Undef = getUNDEF(Ptr.getValueType());
7787 return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
7788 EVL, PtrInfo, MemVT, Alignment, MMOFlags, AAInfo, nullptr,
7789 IsExpanding);
7790 }
7791
getExtLoadVP(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue EVL,EVT MemVT,MachineMemOperand * MMO,bool IsExpanding)7792 SDValue SelectionDAG::getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl,
7793 EVT VT, SDValue Chain, SDValue Ptr,
7794 SDValue Mask, SDValue EVL, EVT MemVT,
7795 MachineMemOperand *MMO, bool IsExpanding) {
7796 SDValue Undef = getUNDEF(Ptr.getValueType());
7797 return getLoadVP(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, Mask,
7798 EVL, MemVT, MMO, IsExpanding);
7799 }
7800
getIndexedLoadVP(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7801 SDValue SelectionDAG::getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl,
7802 SDValue Base, SDValue Offset,
7803 ISD::MemIndexedMode AM) {
7804 auto *LD = cast<VPLoadSDNode>(OrigLoad);
7805 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
7806 // Don't propagate the invariant or dereferenceable flags.
7807 auto MMOFlags =
7808 LD->getMemOperand()->getFlags() &
7809 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7810 return getLoadVP(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
7811 LD->getChain(), Base, Offset, LD->getMask(),
7812 LD->getVectorLength(), LD->getPointerInfo(),
7813 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo(),
7814 nullptr, LD->isExpandingLoad());
7815 }
7816
getStoreVP(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,SDValue Mask,SDValue EVL,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,bool IsCompressing)7817 SDValue SelectionDAG::getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
7818 SDValue Ptr, SDValue Mask, SDValue EVL,
7819 MachinePointerInfo PtrInfo, Align Alignment,
7820 MachineMemOperand::Flags MMOFlags,
7821 const AAMDNodes &AAInfo, bool IsCompressing) {
7822 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7823
7824 MMOFlags |= MachineMemOperand::MOStore;
7825 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7826
7827 if (PtrInfo.V.isNull())
7828 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7829
7830 MachineFunction &MF = getMachineFunction();
7831 uint64_t Size =
7832 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize());
7833 MachineMemOperand *MMO =
7834 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
7835 return getStoreVP(Chain, dl, Val, Ptr, Mask, EVL, MMO, IsCompressing);
7836 }
7837
getStoreVP(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,SDValue Mask,SDValue EVL,MachineMemOperand * MMO,bool IsCompressing)7838 SDValue SelectionDAG::getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
7839 SDValue Ptr, SDValue Mask, SDValue EVL,
7840 MachineMemOperand *MMO, bool IsCompressing) {
7841 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7842 EVT VT = Val.getValueType();
7843 SDVTList VTs = getVTList(MVT::Other);
7844 SDValue Undef = getUNDEF(Ptr.getValueType());
7845 SDValue Ops[] = {Chain, Val, Ptr, Undef, Mask, EVL};
7846 FoldingSetNodeID ID;
7847 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
7848 ID.AddInteger(VT.getRawBits());
7849 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
7850 dl.getIROrder(), VTs, ISD::UNINDEXED, false, IsCompressing, VT, MMO));
7851 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7852 void *IP = nullptr;
7853 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7854 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
7855 return SDValue(E, 0);
7856 }
7857 auto *N =
7858 newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7859 ISD::UNINDEXED, false, IsCompressing, VT, MMO);
7860 createOperands(N, Ops);
7861
7862 CSEMap.InsertNode(N, IP);
7863 InsertNode(N);
7864 SDValue V(N, 0);
7865 NewSDValueDbgMsg(V, "Creating new node: ", this);
7866 return V;
7867 }
7868
getTruncStoreVP(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,SDValue Mask,SDValue EVL,MachinePointerInfo PtrInfo,EVT SVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,bool IsCompressing)7869 SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl,
7870 SDValue Val, SDValue Ptr, SDValue Mask,
7871 SDValue EVL, MachinePointerInfo PtrInfo,
7872 EVT SVT, Align Alignment,
7873 MachineMemOperand::Flags MMOFlags,
7874 const AAMDNodes &AAInfo,
7875 bool IsCompressing) {
7876 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7877
7878 MMOFlags |= MachineMemOperand::MOStore;
7879 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7880
7881 if (PtrInfo.V.isNull())
7882 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7883
7884 MachineFunction &MF = getMachineFunction();
7885 MachineMemOperand *MMO = MF.getMachineMemOperand(
7886 PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
7887 Alignment, AAInfo);
7888 return getTruncStoreVP(Chain, dl, Val, Ptr, Mask, EVL, SVT, MMO,
7889 IsCompressing);
7890 }
7891
getTruncStoreVP(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,SDValue Mask,SDValue EVL,EVT SVT,MachineMemOperand * MMO,bool IsCompressing)7892 SDValue SelectionDAG::getTruncStoreVP(SDValue Chain, const SDLoc &dl,
7893 SDValue Val, SDValue Ptr, SDValue Mask,
7894 SDValue EVL, EVT SVT,
7895 MachineMemOperand *MMO,
7896 bool IsCompressing) {
7897 EVT VT = Val.getValueType();
7898
7899 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7900 if (VT == SVT)
7901 return getStoreVP(Chain, dl, Val, Ptr, Mask, EVL, MMO, IsCompressing);
7902
7903 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
7904 "Should only be a truncating store, not extending!");
7905 assert(VT.isInteger() == SVT.isInteger() && "Can't do FP-INT conversion!");
7906 assert(VT.isVector() == SVT.isVector() &&
7907 "Cannot use trunc store to convert to or from a vector!");
7908 assert((!VT.isVector() ||
7909 VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
7910 "Cannot use trunc store to change the number of vector elements!");
7911
7912 SDVTList VTs = getVTList(MVT::Other);
7913 SDValue Undef = getUNDEF(Ptr.getValueType());
7914 SDValue Ops[] = {Chain, Val, Ptr, Undef, Mask, EVL};
7915 FoldingSetNodeID ID;
7916 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
7917 ID.AddInteger(SVT.getRawBits());
7918 ID.AddInteger(getSyntheticNodeSubclassData<VPStoreSDNode>(
7919 dl.getIROrder(), VTs, ISD::UNINDEXED, true, IsCompressing, SVT, MMO));
7920 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7921 void *IP = nullptr;
7922 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7923 cast<VPStoreSDNode>(E)->refineAlignment(MMO);
7924 return SDValue(E, 0);
7925 }
7926 auto *N =
7927 newSDNode<VPStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7928 ISD::UNINDEXED, true, IsCompressing, SVT, MMO);
7929 createOperands(N, Ops);
7930
7931 CSEMap.InsertNode(N, IP);
7932 InsertNode(N);
7933 SDValue V(N, 0);
7934 NewSDValueDbgMsg(V, "Creating new node: ", this);
7935 return V;
7936 }
7937
getIndexedStoreVP(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7938 SDValue SelectionDAG::getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl,
7939 SDValue Base, SDValue Offset,
7940 ISD::MemIndexedMode AM) {
7941 auto *ST = cast<VPStoreSDNode>(OrigStore);
7942 assert(ST->getOffset().isUndef() && "Store is already an indexed store!");
7943 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
7944 SDValue Ops[] = {ST->getChain(), ST->getValue(), Base,
7945 Offset, ST->getMask(), ST->getVectorLength()};
7946 FoldingSetNodeID ID;
7947 AddNodeIDNode(ID, ISD::VP_STORE, VTs, Ops);
7948 ID.AddInteger(ST->getMemoryVT().getRawBits());
7949 ID.AddInteger(ST->getRawSubclassData());
7950 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
7951 void *IP = nullptr;
7952 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7953 return SDValue(E, 0);
7954
7955 auto *N = newSDNode<VPStoreSDNode>(
7956 dl.getIROrder(), dl.getDebugLoc(), VTs, AM, ST->isTruncatingStore(),
7957 ST->isCompressingStore(), ST->getMemoryVT(), ST->getMemOperand());
7958 createOperands(N, Ops);
7959
7960 CSEMap.InsertNode(N, IP);
7961 InsertNode(N);
7962 SDValue V(N, 0);
7963 NewSDValueDbgMsg(V, "Creating new node: ", this);
7964 return V;
7965 }
7966
getGatherVP(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType)7967 SDValue SelectionDAG::getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl,
7968 ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
7969 ISD::MemIndexType IndexType) {
7970 assert(Ops.size() == 6 && "Incompatible number of operands");
7971
7972 FoldingSetNodeID ID;
7973 AddNodeIDNode(ID, ISD::VP_GATHER, VTs, Ops);
7974 ID.AddInteger(VT.getRawBits());
7975 ID.AddInteger(getSyntheticNodeSubclassData<VPGatherSDNode>(
7976 dl.getIROrder(), VTs, VT, MMO, IndexType));
7977 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7978 void *IP = nullptr;
7979 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7980 cast<VPGatherSDNode>(E)->refineAlignment(MMO);
7981 return SDValue(E, 0);
7982 }
7983
7984 auto *N = newSDNode<VPGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7985 VT, MMO, IndexType);
7986 createOperands(N, Ops);
7987
7988 assert(N->getMask().getValueType().getVectorElementCount() ==
7989 N->getValueType(0).getVectorElementCount() &&
7990 "Vector width mismatch between mask and data");
7991 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
7992 N->getValueType(0).getVectorElementCount().isScalable() &&
7993 "Scalable flags of index and data do not match");
7994 assert(ElementCount::isKnownGE(
7995 N->getIndex().getValueType().getVectorElementCount(),
7996 N->getValueType(0).getVectorElementCount()) &&
7997 "Vector width mismatch between index and data");
7998 assert(isa<ConstantSDNode>(N->getScale()) &&
7999 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
8000 "Scale should be a constant power of 2");
8001
8002 CSEMap.InsertNode(N, IP);
8003 InsertNode(N);
8004 SDValue V(N, 0);
8005 NewSDValueDbgMsg(V, "Creating new node: ", this);
8006 return V;
8007 }
8008
getScatterVP(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType)8009 SDValue SelectionDAG::getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl,
8010 ArrayRef<SDValue> Ops,
8011 MachineMemOperand *MMO,
8012 ISD::MemIndexType IndexType) {
8013 assert(Ops.size() == 7 && "Incompatible number of operands");
8014
8015 FoldingSetNodeID ID;
8016 AddNodeIDNode(ID, ISD::VP_SCATTER, VTs, Ops);
8017 ID.AddInteger(VT.getRawBits());
8018 ID.AddInteger(getSyntheticNodeSubclassData<VPScatterSDNode>(
8019 dl.getIROrder(), VTs, VT, MMO, IndexType));
8020 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8021 void *IP = nullptr;
8022 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8023 cast<VPScatterSDNode>(E)->refineAlignment(MMO);
8024 return SDValue(E, 0);
8025 }
8026 auto *N = newSDNode<VPScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
8027 VT, MMO, IndexType);
8028 createOperands(N, Ops);
8029
8030 assert(N->getMask().getValueType().getVectorElementCount() ==
8031 N->getValue().getValueType().getVectorElementCount() &&
8032 "Vector width mismatch between mask and data");
8033 assert(
8034 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
8035 N->getValue().getValueType().getVectorElementCount().isScalable() &&
8036 "Scalable flags of index and data do not match");
8037 assert(ElementCount::isKnownGE(
8038 N->getIndex().getValueType().getVectorElementCount(),
8039 N->getValue().getValueType().getVectorElementCount()) &&
8040 "Vector width mismatch between index and data");
8041 assert(isa<ConstantSDNode>(N->getScale()) &&
8042 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
8043 "Scale should be a constant power of 2");
8044
8045 CSEMap.InsertNode(N, IP);
8046 InsertNode(N);
8047 SDValue V(N, 0);
8048 NewSDValueDbgMsg(V, "Creating new node: ", this);
8049 return V;
8050 }
8051
getMaskedLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Base,SDValue Offset,SDValue Mask,SDValue PassThru,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,ISD::LoadExtType ExtTy,bool isExpanding)8052 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
8053 SDValue Base, SDValue Offset, SDValue Mask,
8054 SDValue PassThru, EVT MemVT,
8055 MachineMemOperand *MMO,
8056 ISD::MemIndexedMode AM,
8057 ISD::LoadExtType ExtTy, bool isExpanding) {
8058 bool Indexed = AM != ISD::UNINDEXED;
8059 assert((Indexed || Offset.isUndef()) &&
8060 "Unindexed masked load with an offset!");
8061 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
8062 : getVTList(VT, MVT::Other);
8063 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
8064 FoldingSetNodeID ID;
8065 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
8066 ID.AddInteger(MemVT.getRawBits());
8067 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
8068 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
8069 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8070 void *IP = nullptr;
8071 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8072 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
8073 return SDValue(E, 0);
8074 }
8075 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
8076 AM, ExtTy, isExpanding, MemVT, MMO);
8077 createOperands(N, Ops);
8078
8079 CSEMap.InsertNode(N, IP);
8080 InsertNode(N);
8081 SDValue V(N, 0);
8082 NewSDValueDbgMsg(V, "Creating new node: ", this);
8083 return V;
8084 }
8085
getIndexedMaskedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)8086 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
8087 SDValue Base, SDValue Offset,
8088 ISD::MemIndexedMode AM) {
8089 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
8090 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
8091 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
8092 Offset, LD->getMask(), LD->getPassThru(),
8093 LD->getMemoryVT(), LD->getMemOperand(), AM,
8094 LD->getExtensionType(), LD->isExpandingLoad());
8095 }
8096
getMaskedStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Base,SDValue Offset,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,bool IsTruncating,bool IsCompressing)8097 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
8098 SDValue Val, SDValue Base, SDValue Offset,
8099 SDValue Mask, EVT MemVT,
8100 MachineMemOperand *MMO,
8101 ISD::MemIndexedMode AM, bool IsTruncating,
8102 bool IsCompressing) {
8103 assert(Chain.getValueType() == MVT::Other &&
8104 "Invalid chain type");
8105 bool Indexed = AM != ISD::UNINDEXED;
8106 assert((Indexed || Offset.isUndef()) &&
8107 "Unindexed masked store with an offset!");
8108 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
8109 : getVTList(MVT::Other);
8110 SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
8111 FoldingSetNodeID ID;
8112 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
8113 ID.AddInteger(MemVT.getRawBits());
8114 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
8115 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
8116 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8117 void *IP = nullptr;
8118 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8119 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
8120 return SDValue(E, 0);
8121 }
8122 auto *N =
8123 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
8124 IsTruncating, IsCompressing, MemVT, MMO);
8125 createOperands(N, Ops);
8126
8127 CSEMap.InsertNode(N, IP);
8128 InsertNode(N);
8129 SDValue V(N, 0);
8130 NewSDValueDbgMsg(V, "Creating new node: ", this);
8131 return V;
8132 }
8133
getIndexedMaskedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)8134 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
8135 SDValue Base, SDValue Offset,
8136 ISD::MemIndexedMode AM) {
8137 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
8138 assert(ST->getOffset().isUndef() &&
8139 "Masked store is already a indexed store!");
8140 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
8141 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
8142 AM, ST->isTruncatingStore(), ST->isCompressingStore());
8143 }
8144
getMaskedGather(SDVTList VTs,EVT MemVT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType,ISD::LoadExtType ExtTy)8145 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl,
8146 ArrayRef<SDValue> Ops,
8147 MachineMemOperand *MMO,
8148 ISD::MemIndexType IndexType,
8149 ISD::LoadExtType ExtTy) {
8150 assert(Ops.size() == 6 && "Incompatible number of operands");
8151
8152 FoldingSetNodeID ID;
8153 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
8154 ID.AddInteger(MemVT.getRawBits());
8155 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
8156 dl.getIROrder(), VTs, MemVT, MMO, IndexType, ExtTy));
8157 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8158 void *IP = nullptr;
8159 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8160 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
8161 return SDValue(E, 0);
8162 }
8163
8164 IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]);
8165 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
8166 VTs, MemVT, MMO, IndexType, ExtTy);
8167 createOperands(N, Ops);
8168
8169 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
8170 "Incompatible type of the PassThru value in MaskedGatherSDNode");
8171 assert(N->getMask().getValueType().getVectorElementCount() ==
8172 N->getValueType(0).getVectorElementCount() &&
8173 "Vector width mismatch between mask and data");
8174 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
8175 N->getValueType(0).getVectorElementCount().isScalable() &&
8176 "Scalable flags of index and data do not match");
8177 assert(ElementCount::isKnownGE(
8178 N->getIndex().getValueType().getVectorElementCount(),
8179 N->getValueType(0).getVectorElementCount()) &&
8180 "Vector width mismatch between index and data");
8181 assert(isa<ConstantSDNode>(N->getScale()) &&
8182 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
8183 "Scale should be a constant power of 2");
8184
8185 CSEMap.InsertNode(N, IP);
8186 InsertNode(N);
8187 SDValue V(N, 0);
8188 NewSDValueDbgMsg(V, "Creating new node: ", this);
8189 return V;
8190 }
8191
getMaskedScatter(SDVTList VTs,EVT MemVT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType,bool IsTrunc)8192 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl,
8193 ArrayRef<SDValue> Ops,
8194 MachineMemOperand *MMO,
8195 ISD::MemIndexType IndexType,
8196 bool IsTrunc) {
8197 assert(Ops.size() == 6 && "Incompatible number of operands");
8198
8199 FoldingSetNodeID ID;
8200 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
8201 ID.AddInteger(MemVT.getRawBits());
8202 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
8203 dl.getIROrder(), VTs, MemVT, MMO, IndexType, IsTrunc));
8204 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
8205 void *IP = nullptr;
8206 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
8207 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
8208 return SDValue(E, 0);
8209 }
8210
8211 IndexType = TLI->getCanonicalIndexType(IndexType, MemVT, Ops[4]);
8212 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
8213 VTs, MemVT, MMO, IndexType, IsTrunc);
8214 createOperands(N, Ops);
8215
8216 assert(N->getMask().getValueType().getVectorElementCount() ==
8217 N->getValue().getValueType().getVectorElementCount() &&
8218 "Vector width mismatch between mask and data");
8219 assert(
8220 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
8221 N->getValue().getValueType().getVectorElementCount().isScalable() &&
8222 "Scalable flags of index and data do not match");
8223 assert(ElementCount::isKnownGE(
8224 N->getIndex().getValueType().getVectorElementCount(),
8225 N->getValue().getValueType().getVectorElementCount()) &&
8226 "Vector width mismatch between index and data");
8227 assert(isa<ConstantSDNode>(N->getScale()) &&
8228 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
8229 "Scale should be a constant power of 2");
8230
8231 CSEMap.InsertNode(N, IP);
8232 InsertNode(N);
8233 SDValue V(N, 0);
8234 NewSDValueDbgMsg(V, "Creating new node: ", this);
8235 return V;
8236 }
8237
simplifySelect(SDValue Cond,SDValue T,SDValue F)8238 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
8239 // select undef, T, F --> T (if T is a constant), otherwise F
8240 // select, ?, undef, F --> F
8241 // select, ?, T, undef --> T
8242 if (Cond.isUndef())
8243 return isConstantValueOfAnyType(T) ? T : F;
8244 if (T.isUndef())
8245 return F;
8246 if (F.isUndef())
8247 return T;
8248
8249 // select true, T, F --> T
8250 // select false, T, F --> F
8251 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
8252 return CondC->isZero() ? F : T;
8253
8254 // TODO: This should simplify VSELECT with constant condition using something
8255 // like this (but check boolean contents to be complete?):
8256 // if (ISD::isBuildVectorAllOnes(Cond.getNode()))
8257 // return T;
8258 // if (ISD::isBuildVectorAllZeros(Cond.getNode()))
8259 // return F;
8260
8261 // select ?, T, T --> T
8262 if (T == F)
8263 return T;
8264
8265 return SDValue();
8266 }
8267
simplifyShift(SDValue X,SDValue Y)8268 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
8269 // shift undef, Y --> 0 (can always assume that the undef value is 0)
8270 if (X.isUndef())
8271 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
8272 // shift X, undef --> undef (because it may shift by the bitwidth)
8273 if (Y.isUndef())
8274 return getUNDEF(X.getValueType());
8275
8276 // shift 0, Y --> 0
8277 // shift X, 0 --> X
8278 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
8279 return X;
8280
8281 // shift X, C >= bitwidth(X) --> undef
8282 // All vector elements must be too big (or undef) to avoid partial undefs.
8283 auto isShiftTooBig = [X](ConstantSDNode *Val) {
8284 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
8285 };
8286 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
8287 return getUNDEF(X.getValueType());
8288
8289 return SDValue();
8290 }
8291
simplifyFPBinop(unsigned Opcode,SDValue X,SDValue Y,SDNodeFlags Flags)8292 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
8293 SDNodeFlags Flags) {
8294 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
8295 // (an undef operand can be chosen to be Nan/Inf), then the result of this
8296 // operation is poison. That result can be relaxed to undef.
8297 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true);
8298 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
8299 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
8300 (YC && YC->getValueAPF().isNaN());
8301 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
8302 (YC && YC->getValueAPF().isInfinity());
8303
8304 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
8305 return getUNDEF(X.getValueType());
8306
8307 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
8308 return getUNDEF(X.getValueType());
8309
8310 if (!YC)
8311 return SDValue();
8312
8313 // X + -0.0 --> X
8314 if (Opcode == ISD::FADD)
8315 if (YC->getValueAPF().isNegZero())
8316 return X;
8317
8318 // X - +0.0 --> X
8319 if (Opcode == ISD::FSUB)
8320 if (YC->getValueAPF().isPosZero())
8321 return X;
8322
8323 // X * 1.0 --> X
8324 // X / 1.0 --> X
8325 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
8326 if (YC->getValueAPF().isExactlyValue(1.0))
8327 return X;
8328
8329 // X * 0.0 --> 0.0
8330 if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
8331 if (YC->getValueAPF().isZero())
8332 return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
8333
8334 return SDValue();
8335 }
8336
getVAArg(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)8337 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
8338 SDValue Ptr, SDValue SV, unsigned Align) {
8339 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
8340 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
8341 }
8342
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDUse> Ops)8343 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8344 ArrayRef<SDUse> Ops) {
8345 switch (Ops.size()) {
8346 case 0: return getNode(Opcode, DL, VT);
8347 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
8348 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
8349 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
8350 default: break;
8351 }
8352
8353 // Copy from an SDUse array into an SDValue array for use with
8354 // the regular getNode logic.
8355 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
8356 return getNode(Opcode, DL, VT, NewOps);
8357 }
8358
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops)8359 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8360 ArrayRef<SDValue> Ops) {
8361 SDNodeFlags Flags;
8362 if (Inserter)
8363 Flags = Inserter->getFlags();
8364 return getNode(Opcode, DL, VT, Ops, Flags);
8365 }
8366
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)8367 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
8368 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
8369 unsigned NumOps = Ops.size();
8370 switch (NumOps) {
8371 case 0: return getNode(Opcode, DL, VT);
8372 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
8373 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
8374 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
8375 default: break;
8376 }
8377
8378 #ifndef NDEBUG
8379 for (auto &Op : Ops)
8380 assert(Op.getOpcode() != ISD::DELETED_NODE &&
8381 "Operand is DELETED_NODE!");
8382 #endif
8383
8384 switch (Opcode) {
8385 default: break;
8386 case ISD::BUILD_VECTOR:
8387 // Attempt to simplify BUILD_VECTOR.
8388 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
8389 return V;
8390 break;
8391 case ISD::CONCAT_VECTORS:
8392 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
8393 return V;
8394 break;
8395 case ISD::SELECT_CC:
8396 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
8397 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
8398 "LHS and RHS of condition must have same type!");
8399 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
8400 "True and False arms of SelectCC must have same type!");
8401 assert(Ops[2].getValueType() == VT &&
8402 "select_cc node must be of same type as true and false value!");
8403 break;
8404 case ISD::BR_CC:
8405 assert(NumOps == 5 && "BR_CC takes 5 operands!");
8406 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
8407 "LHS/RHS of comparison should match types!");
8408 break;
8409 }
8410
8411 // Memoize nodes.
8412 SDNode *N;
8413 SDVTList VTs = getVTList(VT);
8414
8415 if (VT != MVT::Glue) {
8416 FoldingSetNodeID ID;
8417 AddNodeIDNode(ID, Opcode, VTs, Ops);
8418 void *IP = nullptr;
8419
8420 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
8421 return SDValue(E, 0);
8422
8423 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8424 createOperands(N, Ops);
8425
8426 CSEMap.InsertNode(N, IP);
8427 } else {
8428 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8429 createOperands(N, Ops);
8430 }
8431
8432 N->setFlags(Flags);
8433 InsertNode(N);
8434 SDValue V(N, 0);
8435 NewSDValueDbgMsg(V, "Creating new node: ", this);
8436 return V;
8437 }
8438
getNode(unsigned Opcode,const SDLoc & DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)8439 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
8440 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
8441 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
8442 }
8443
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops)8444 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8445 ArrayRef<SDValue> Ops) {
8446 SDNodeFlags Flags;
8447 if (Inserter)
8448 Flags = Inserter->getFlags();
8449 return getNode(Opcode, DL, VTList, Ops, Flags);
8450 }
8451
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)8452 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8453 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
8454 if (VTList.NumVTs == 1)
8455 return getNode(Opcode, DL, VTList.VTs[0], Ops);
8456
8457 #ifndef NDEBUG
8458 for (auto &Op : Ops)
8459 assert(Op.getOpcode() != ISD::DELETED_NODE &&
8460 "Operand is DELETED_NODE!");
8461 #endif
8462
8463 switch (Opcode) {
8464 case ISD::STRICT_FP_EXTEND:
8465 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
8466 "Invalid STRICT_FP_EXTEND!");
8467 assert(VTList.VTs[0].isFloatingPoint() &&
8468 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
8469 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
8470 "STRICT_FP_EXTEND result type should be vector iff the operand "
8471 "type is vector!");
8472 assert((!VTList.VTs[0].isVector() ||
8473 VTList.VTs[0].getVectorNumElements() ==
8474 Ops[1].getValueType().getVectorNumElements()) &&
8475 "Vector element count mismatch!");
8476 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
8477 "Invalid fpext node, dst <= src!");
8478 break;
8479 case ISD::STRICT_FP_ROUND:
8480 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
8481 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
8482 "STRICT_FP_ROUND result type should be vector iff the operand "
8483 "type is vector!");
8484 assert((!VTList.VTs[0].isVector() ||
8485 VTList.VTs[0].getVectorNumElements() ==
8486 Ops[1].getValueType().getVectorNumElements()) &&
8487 "Vector element count mismatch!");
8488 assert(VTList.VTs[0].isFloatingPoint() &&
8489 Ops[1].getValueType().isFloatingPoint() &&
8490 VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
8491 isa<ConstantSDNode>(Ops[2]) &&
8492 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
8493 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
8494 "Invalid STRICT_FP_ROUND!");
8495 break;
8496 #if 0
8497 // FIXME: figure out how to safely handle things like
8498 // int foo(int x) { return 1 << (x & 255); }
8499 // int bar() { return foo(256); }
8500 case ISD::SRA_PARTS:
8501 case ISD::SRL_PARTS:
8502 case ISD::SHL_PARTS:
8503 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
8504 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
8505 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
8506 else if (N3.getOpcode() == ISD::AND)
8507 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
8508 // If the and is only masking out bits that cannot effect the shift,
8509 // eliminate the and.
8510 unsigned NumBits = VT.getScalarSizeInBits()*2;
8511 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
8512 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
8513 }
8514 break;
8515 #endif
8516 }
8517
8518 // Memoize the node unless it returns a flag.
8519 SDNode *N;
8520 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
8521 FoldingSetNodeID ID;
8522 AddNodeIDNode(ID, Opcode, VTList, Ops);
8523 void *IP = nullptr;
8524 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
8525 return SDValue(E, 0);
8526
8527 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
8528 createOperands(N, Ops);
8529 CSEMap.InsertNode(N, IP);
8530 } else {
8531 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
8532 createOperands(N, Ops);
8533 }
8534
8535 N->setFlags(Flags);
8536 InsertNode(N);
8537 SDValue V(N, 0);
8538 NewSDValueDbgMsg(V, "Creating new node: ", this);
8539 return V;
8540 }
8541
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList)8542 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
8543 SDVTList VTList) {
8544 return getNode(Opcode, DL, VTList, None);
8545 }
8546
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1)8547 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8548 SDValue N1) {
8549 SDValue Ops[] = { N1 };
8550 return getNode(Opcode, DL, VTList, Ops);
8551 }
8552
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2)8553 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8554 SDValue N1, SDValue N2) {
8555 SDValue Ops[] = { N1, N2 };
8556 return getNode(Opcode, DL, VTList, Ops);
8557 }
8558
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)8559 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8560 SDValue N1, SDValue N2, SDValue N3) {
8561 SDValue Ops[] = { N1, N2, N3 };
8562 return getNode(Opcode, DL, VTList, Ops);
8563 }
8564
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)8565 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8566 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
8567 SDValue Ops[] = { N1, N2, N3, N4 };
8568 return getNode(Opcode, DL, VTList, Ops);
8569 }
8570
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)8571 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8572 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
8573 SDValue N5) {
8574 SDValue Ops[] = { N1, N2, N3, N4, N5 };
8575 return getNode(Opcode, DL, VTList, Ops);
8576 }
8577
getVTList(EVT VT)8578 SDVTList SelectionDAG::getVTList(EVT VT) {
8579 return makeVTList(SDNode::getValueTypeList(VT), 1);
8580 }
8581
getVTList(EVT VT1,EVT VT2)8582 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
8583 FoldingSetNodeID ID;
8584 ID.AddInteger(2U);
8585 ID.AddInteger(VT1.getRawBits());
8586 ID.AddInteger(VT2.getRawBits());
8587
8588 void *IP = nullptr;
8589 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8590 if (!Result) {
8591 EVT *Array = Allocator.Allocate<EVT>(2);
8592 Array[0] = VT1;
8593 Array[1] = VT2;
8594 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
8595 VTListMap.InsertNode(Result, IP);
8596 }
8597 return Result->getSDVTList();
8598 }
8599
getVTList(EVT VT1,EVT VT2,EVT VT3)8600 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
8601 FoldingSetNodeID ID;
8602 ID.AddInteger(3U);
8603 ID.AddInteger(VT1.getRawBits());
8604 ID.AddInteger(VT2.getRawBits());
8605 ID.AddInteger(VT3.getRawBits());
8606
8607 void *IP = nullptr;
8608 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8609 if (!Result) {
8610 EVT *Array = Allocator.Allocate<EVT>(3);
8611 Array[0] = VT1;
8612 Array[1] = VT2;
8613 Array[2] = VT3;
8614 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
8615 VTListMap.InsertNode(Result, IP);
8616 }
8617 return Result->getSDVTList();
8618 }
8619
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)8620 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
8621 FoldingSetNodeID ID;
8622 ID.AddInteger(4U);
8623 ID.AddInteger(VT1.getRawBits());
8624 ID.AddInteger(VT2.getRawBits());
8625 ID.AddInteger(VT3.getRawBits());
8626 ID.AddInteger(VT4.getRawBits());
8627
8628 void *IP = nullptr;
8629 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8630 if (!Result) {
8631 EVT *Array = Allocator.Allocate<EVT>(4);
8632 Array[0] = VT1;
8633 Array[1] = VT2;
8634 Array[2] = VT3;
8635 Array[3] = VT4;
8636 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
8637 VTListMap.InsertNode(Result, IP);
8638 }
8639 return Result->getSDVTList();
8640 }
8641
getVTList(ArrayRef<EVT> VTs)8642 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
8643 unsigned NumVTs = VTs.size();
8644 FoldingSetNodeID ID;
8645 ID.AddInteger(NumVTs);
8646 for (unsigned index = 0; index < NumVTs; index++) {
8647 ID.AddInteger(VTs[index].getRawBits());
8648 }
8649
8650 void *IP = nullptr;
8651 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8652 if (!Result) {
8653 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
8654 llvm::copy(VTs, Array);
8655 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
8656 VTListMap.InsertNode(Result, IP);
8657 }
8658 return Result->getSDVTList();
8659 }
8660
8661
8662 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
8663 /// specified operands. If the resultant node already exists in the DAG,
8664 /// this does not modify the specified node, instead it returns the node that
8665 /// already exists. If the resultant node does not exist in the DAG, the
8666 /// input node is returned. As a degenerate case, if you specify the same
8667 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)8668 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
8669 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
8670
8671 // Check to see if there is no change.
8672 if (Op == N->getOperand(0)) return N;
8673
8674 // See if the modified node already exists.
8675 void *InsertPos = nullptr;
8676 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
8677 return Existing;
8678
8679 // Nope it doesn't. Remove the node from its current place in the maps.
8680 if (InsertPos)
8681 if (!RemoveNodeFromCSEMaps(N))
8682 InsertPos = nullptr;
8683
8684 // Now we update the operands.
8685 N->OperandList[0].set(Op);
8686
8687 updateDivergence(N);
8688 // If this gets put into a CSE map, add it.
8689 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
8690 return N;
8691 }
8692
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)8693 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
8694 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
8695
8696 // Check to see if there is no change.
8697 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
8698 return N; // No operands changed, just return the input node.
8699
8700 // See if the modified node already exists.
8701 void *InsertPos = nullptr;
8702 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
8703 return Existing;
8704
8705 // Nope it doesn't. Remove the node from its current place in the maps.
8706 if (InsertPos)
8707 if (!RemoveNodeFromCSEMaps(N))
8708 InsertPos = nullptr;
8709
8710 // Now we update the operands.
8711 if (N->OperandList[0] != Op1)
8712 N->OperandList[0].set(Op1);
8713 if (N->OperandList[1] != Op2)
8714 N->OperandList[1].set(Op2);
8715
8716 updateDivergence(N);
8717 // If this gets put into a CSE map, add it.
8718 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
8719 return N;
8720 }
8721
8722 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)8723 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
8724 SDValue Ops[] = { Op1, Op2, Op3 };
8725 return UpdateNodeOperands(N, Ops);
8726 }
8727
8728 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)8729 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
8730 SDValue Op3, SDValue Op4) {
8731 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
8732 return UpdateNodeOperands(N, Ops);
8733 }
8734
8735 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)8736 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
8737 SDValue Op3, SDValue Op4, SDValue Op5) {
8738 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
8739 return UpdateNodeOperands(N, Ops);
8740 }
8741
8742 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)8743 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
8744 unsigned NumOps = Ops.size();
8745 assert(N->getNumOperands() == NumOps &&
8746 "Update with wrong number of operands");
8747
8748 // If no operands changed just return the input node.
8749 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
8750 return N;
8751
8752 // See if the modified node already exists.
8753 void *InsertPos = nullptr;
8754 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
8755 return Existing;
8756
8757 // Nope it doesn't. Remove the node from its current place in the maps.
8758 if (InsertPos)
8759 if (!RemoveNodeFromCSEMaps(N))
8760 InsertPos = nullptr;
8761
8762 // Now we update the operands.
8763 for (unsigned i = 0; i != NumOps; ++i)
8764 if (N->OperandList[i] != Ops[i])
8765 N->OperandList[i].set(Ops[i]);
8766
8767 updateDivergence(N);
8768 // If this gets put into a CSE map, add it.
8769 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
8770 return N;
8771 }
8772
8773 /// DropOperands - Release the operands and set this node to have
8774 /// zero operands.
DropOperands()8775 void SDNode::DropOperands() {
8776 // Unlike the code in MorphNodeTo that does this, we don't need to
8777 // watch for dead nodes here.
8778 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
8779 SDUse &Use = *I++;
8780 Use.set(SDValue());
8781 }
8782 }
8783
setNodeMemRefs(MachineSDNode * N,ArrayRef<MachineMemOperand * > NewMemRefs)8784 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
8785 ArrayRef<MachineMemOperand *> NewMemRefs) {
8786 if (NewMemRefs.empty()) {
8787 N->clearMemRefs();
8788 return;
8789 }
8790
8791 // Check if we can avoid allocating by storing a single reference directly.
8792 if (NewMemRefs.size() == 1) {
8793 N->MemRefs = NewMemRefs[0];
8794 N->NumMemRefs = 1;
8795 return;
8796 }
8797
8798 MachineMemOperand **MemRefsBuffer =
8799 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
8800 llvm::copy(NewMemRefs, MemRefsBuffer);
8801 N->MemRefs = MemRefsBuffer;
8802 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
8803 }
8804
8805 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
8806 /// machine opcode.
8807 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)8808 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8809 EVT VT) {
8810 SDVTList VTs = getVTList(VT);
8811 return SelectNodeTo(N, MachineOpc, VTs, None);
8812 }
8813
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)8814 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8815 EVT VT, SDValue Op1) {
8816 SDVTList VTs = getVTList(VT);
8817 SDValue Ops[] = { Op1 };
8818 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8819 }
8820
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)8821 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8822 EVT VT, SDValue Op1,
8823 SDValue Op2) {
8824 SDVTList VTs = getVTList(VT);
8825 SDValue Ops[] = { Op1, Op2 };
8826 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8827 }
8828
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)8829 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8830 EVT VT, SDValue Op1,
8831 SDValue Op2, SDValue Op3) {
8832 SDVTList VTs = getVTList(VT);
8833 SDValue Ops[] = { Op1, Op2, Op3 };
8834 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8835 }
8836
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)8837 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8838 EVT VT, ArrayRef<SDValue> Ops) {
8839 SDVTList VTs = getVTList(VT);
8840 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8841 }
8842
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)8843 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8844 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
8845 SDVTList VTs = getVTList(VT1, VT2);
8846 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8847 }
8848
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)8849 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8850 EVT VT1, EVT VT2) {
8851 SDVTList VTs = getVTList(VT1, VT2);
8852 return SelectNodeTo(N, MachineOpc, VTs, None);
8853 }
8854
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)8855 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8856 EVT VT1, EVT VT2, EVT VT3,
8857 ArrayRef<SDValue> Ops) {
8858 SDVTList VTs = getVTList(VT1, VT2, VT3);
8859 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8860 }
8861
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)8862 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8863 EVT VT1, EVT VT2,
8864 SDValue Op1, SDValue Op2) {
8865 SDVTList VTs = getVTList(VT1, VT2);
8866 SDValue Ops[] = { Op1, Op2 };
8867 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8868 }
8869
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)8870 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8871 SDVTList VTs,ArrayRef<SDValue> Ops) {
8872 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
8873 // Reset the NodeID to -1.
8874 New->setNodeId(-1);
8875 if (New != N) {
8876 ReplaceAllUsesWith(N, New);
8877 RemoveDeadNode(N);
8878 }
8879 return New;
8880 }
8881
8882 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
8883 /// the line number information on the merged node since it is not possible to
8884 /// preserve the information that operation is associated with multiple lines.
8885 /// This will make the debugger working better at -O0, were there is a higher
8886 /// probability having other instructions associated with that line.
8887 ///
8888 /// For IROrder, we keep the smaller of the two
UpdateSDLocOnMergeSDNode(SDNode * N,const SDLoc & OLoc)8889 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
8890 DebugLoc NLoc = N->getDebugLoc();
8891 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
8892 N->setDebugLoc(DebugLoc());
8893 }
8894 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
8895 N->setIROrder(Order);
8896 return N;
8897 }
8898
8899 /// MorphNodeTo - This *mutates* the specified node to have the specified
8900 /// return type, opcode, and operands.
8901 ///
8902 /// Note that MorphNodeTo returns the resultant node. If there is already a
8903 /// node of the specified opcode and operands, it returns that node instead of
8904 /// the current one. Note that the SDLoc need not be the same.
8905 ///
8906 /// Using MorphNodeTo is faster than creating a new node and swapping it in
8907 /// with ReplaceAllUsesWith both because it often avoids allocating a new
8908 /// node, and because it doesn't require CSE recalculation for any of
8909 /// the node's users.
8910 ///
8911 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
8912 /// As a consequence it isn't appropriate to use from within the DAG combiner or
8913 /// the legalizer which maintain worklists that would need to be updated when
8914 /// deleting things.
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)8915 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
8916 SDVTList VTs, ArrayRef<SDValue> Ops) {
8917 // If an identical node already exists, use it.
8918 void *IP = nullptr;
8919 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
8920 FoldingSetNodeID ID;
8921 AddNodeIDNode(ID, Opc, VTs, Ops);
8922 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
8923 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
8924 }
8925
8926 if (!RemoveNodeFromCSEMaps(N))
8927 IP = nullptr;
8928
8929 // Start the morphing.
8930 N->NodeType = Opc;
8931 N->ValueList = VTs.VTs;
8932 N->NumValues = VTs.NumVTs;
8933
8934 // Clear the operands list, updating used nodes to remove this from their
8935 // use list. Keep track of any operands that become dead as a result.
8936 SmallPtrSet<SDNode*, 16> DeadNodeSet;
8937 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
8938 SDUse &Use = *I++;
8939 SDNode *Used = Use.getNode();
8940 Use.set(SDValue());
8941 if (Used->use_empty())
8942 DeadNodeSet.insert(Used);
8943 }
8944
8945 // For MachineNode, initialize the memory references information.
8946 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
8947 MN->clearMemRefs();
8948
8949 // Swap for an appropriately sized array from the recycler.
8950 removeOperands(N);
8951 createOperands(N, Ops);
8952
8953 // Delete any nodes that are still dead after adding the uses for the
8954 // new operands.
8955 if (!DeadNodeSet.empty()) {
8956 SmallVector<SDNode *, 16> DeadNodes;
8957 for (SDNode *N : DeadNodeSet)
8958 if (N->use_empty())
8959 DeadNodes.push_back(N);
8960 RemoveDeadNodes(DeadNodes);
8961 }
8962
8963 if (IP)
8964 CSEMap.InsertNode(N, IP); // Memoize the new node.
8965 return N;
8966 }
8967
mutateStrictFPToFP(SDNode * Node)8968 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
8969 unsigned OrigOpc = Node->getOpcode();
8970 unsigned NewOpc;
8971 switch (OrigOpc) {
8972 default:
8973 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
8974 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8975 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
8976 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8977 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
8978 #include "llvm/IR/ConstrainedOps.def"
8979 }
8980
8981 assert(Node->getNumValues() == 2 && "Unexpected number of results!");
8982
8983 // We're taking this node out of the chain, so we need to re-link things.
8984 SDValue InputChain = Node->getOperand(0);
8985 SDValue OutputChain = SDValue(Node, 1);
8986 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
8987
8988 SmallVector<SDValue, 3> Ops;
8989 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
8990 Ops.push_back(Node->getOperand(i));
8991
8992 SDVTList VTs = getVTList(Node->getValueType(0));
8993 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
8994
8995 // MorphNodeTo can operate in two ways: if an existing node with the
8996 // specified operands exists, it can just return it. Otherwise, it
8997 // updates the node in place to have the requested operands.
8998 if (Res == Node) {
8999 // If we updated the node in place, reset the node ID. To the isel,
9000 // this should be just like a newly allocated machine node.
9001 Res->setNodeId(-1);
9002 } else {
9003 ReplaceAllUsesWith(Node, Res);
9004 RemoveDeadNode(Node);
9005 }
9006
9007 return Res;
9008 }
9009
9010 /// getMachineNode - These are used for target selectors to create a new node
9011 /// with specified return type(s), MachineInstr opcode, and operands.
9012 ///
9013 /// Note that getMachineNode returns the resultant node. If there is already a
9014 /// node of the specified opcode and operands, it returns that node instead of
9015 /// the current one.
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT)9016 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9017 EVT VT) {
9018 SDVTList VTs = getVTList(VT);
9019 return getMachineNode(Opcode, dl, VTs, None);
9020 }
9021
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1)9022 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9023 EVT VT, SDValue Op1) {
9024 SDVTList VTs = getVTList(VT);
9025 SDValue Ops[] = { Op1 };
9026 return getMachineNode(Opcode, dl, VTs, Ops);
9027 }
9028
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2)9029 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9030 EVT VT, SDValue Op1, SDValue Op2) {
9031 SDVTList VTs = getVTList(VT);
9032 SDValue Ops[] = { Op1, Op2 };
9033 return getMachineNode(Opcode, dl, VTs, Ops);
9034 }
9035
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)9036 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9037 EVT VT, SDValue Op1, SDValue Op2,
9038 SDValue Op3) {
9039 SDVTList VTs = getVTList(VT);
9040 SDValue Ops[] = { Op1, Op2, Op3 };
9041 return getMachineNode(Opcode, dl, VTs, Ops);
9042 }
9043
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,ArrayRef<SDValue> Ops)9044 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9045 EVT VT, ArrayRef<SDValue> Ops) {
9046 SDVTList VTs = getVTList(VT);
9047 return getMachineNode(Opcode, dl, VTs, Ops);
9048 }
9049
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)9050 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9051 EVT VT1, EVT VT2, SDValue Op1,
9052 SDValue Op2) {
9053 SDVTList VTs = getVTList(VT1, VT2);
9054 SDValue Ops[] = { Op1, Op2 };
9055 return getMachineNode(Opcode, dl, VTs, Ops);
9056 }
9057
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)9058 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9059 EVT VT1, EVT VT2, SDValue Op1,
9060 SDValue Op2, SDValue Op3) {
9061 SDVTList VTs = getVTList(VT1, VT2);
9062 SDValue Ops[] = { Op1, Op2, Op3 };
9063 return getMachineNode(Opcode, dl, VTs, Ops);
9064 }
9065
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)9066 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9067 EVT VT1, EVT VT2,
9068 ArrayRef<SDValue> Ops) {
9069 SDVTList VTs = getVTList(VT1, VT2);
9070 return getMachineNode(Opcode, dl, VTs, Ops);
9071 }
9072
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)9073 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9074 EVT VT1, EVT VT2, EVT VT3,
9075 SDValue Op1, SDValue Op2) {
9076 SDVTList VTs = getVTList(VT1, VT2, VT3);
9077 SDValue Ops[] = { Op1, Op2 };
9078 return getMachineNode(Opcode, dl, VTs, Ops);
9079 }
9080
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)9081 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9082 EVT VT1, EVT VT2, EVT VT3,
9083 SDValue Op1, SDValue Op2,
9084 SDValue Op3) {
9085 SDVTList VTs = getVTList(VT1, VT2, VT3);
9086 SDValue Ops[] = { Op1, Op2, Op3 };
9087 return getMachineNode(Opcode, dl, VTs, Ops);
9088 }
9089
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)9090 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9091 EVT VT1, EVT VT2, EVT VT3,
9092 ArrayRef<SDValue> Ops) {
9093 SDVTList VTs = getVTList(VT1, VT2, VT3);
9094 return getMachineNode(Opcode, dl, VTs, Ops);
9095 }
9096
getMachineNode(unsigned Opcode,const SDLoc & dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)9097 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
9098 ArrayRef<EVT> ResultTys,
9099 ArrayRef<SDValue> Ops) {
9100 SDVTList VTs = getVTList(ResultTys);
9101 return getMachineNode(Opcode, dl, VTs, Ops);
9102 }
9103
getMachineNode(unsigned Opcode,const SDLoc & DL,SDVTList VTs,ArrayRef<SDValue> Ops)9104 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
9105 SDVTList VTs,
9106 ArrayRef<SDValue> Ops) {
9107 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
9108 MachineSDNode *N;
9109 void *IP = nullptr;
9110
9111 if (DoCSE) {
9112 FoldingSetNodeID ID;
9113 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
9114 IP = nullptr;
9115 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
9116 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
9117 }
9118 }
9119
9120 // Allocate a new MachineSDNode.
9121 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
9122 createOperands(N, Ops);
9123
9124 if (DoCSE)
9125 CSEMap.InsertNode(N, IP);
9126
9127 InsertNode(N);
9128 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
9129 return N;
9130 }
9131
9132 /// getTargetExtractSubreg - A convenience function for creating
9133 /// TargetOpcode::EXTRACT_SUBREG nodes.
getTargetExtractSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand)9134 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
9135 SDValue Operand) {
9136 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
9137 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
9138 VT, Operand, SRIdxVal);
9139 return SDValue(Subreg, 0);
9140 }
9141
9142 /// getTargetInsertSubreg - A convenience function for creating
9143 /// TargetOpcode::INSERT_SUBREG nodes.
getTargetInsertSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand,SDValue Subreg)9144 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
9145 SDValue Operand, SDValue Subreg) {
9146 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
9147 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
9148 VT, Operand, Subreg, SRIdxVal);
9149 return SDValue(Result, 0);
9150 }
9151
9152 /// getNodeIfExists - Get the specified node if it's already available, or
9153 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops)9154 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
9155 ArrayRef<SDValue> Ops) {
9156 SDNodeFlags Flags;
9157 if (Inserter)
9158 Flags = Inserter->getFlags();
9159 return getNodeIfExists(Opcode, VTList, Ops, Flags);
9160 }
9161
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)9162 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
9163 ArrayRef<SDValue> Ops,
9164 const SDNodeFlags Flags) {
9165 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
9166 FoldingSetNodeID ID;
9167 AddNodeIDNode(ID, Opcode, VTList, Ops);
9168 void *IP = nullptr;
9169 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
9170 E->intersectFlagsWith(Flags);
9171 return E;
9172 }
9173 }
9174 return nullptr;
9175 }
9176
9177 /// doesNodeExist - Check if a node exists without modifying its flags.
doesNodeExist(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops)9178 bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
9179 ArrayRef<SDValue> Ops) {
9180 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
9181 FoldingSetNodeID ID;
9182 AddNodeIDNode(ID, Opcode, VTList, Ops);
9183 void *IP = nullptr;
9184 if (FindNodeOrInsertPos(ID, SDLoc(), IP))
9185 return true;
9186 }
9187 return false;
9188 }
9189
9190 /// getDbgValue - Creates a SDDbgValue node.
9191 ///
9192 /// SDNode
getDbgValue(DIVariable * Var,DIExpression * Expr,SDNode * N,unsigned R,bool IsIndirect,const DebugLoc & DL,unsigned O)9193 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
9194 SDNode *N, unsigned R, bool IsIndirect,
9195 const DebugLoc &DL, unsigned O) {
9196 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9197 "Expected inlined-at fields to agree");
9198 return new (DbgInfo->getAlloc())
9199 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromNode(N, R),
9200 {}, IsIndirect, DL, O,
9201 /*IsVariadic=*/false);
9202 }
9203
9204 /// Constant
getConstantDbgValue(DIVariable * Var,DIExpression * Expr,const Value * C,const DebugLoc & DL,unsigned O)9205 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
9206 DIExpression *Expr,
9207 const Value *C,
9208 const DebugLoc &DL, unsigned O) {
9209 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9210 "Expected inlined-at fields to agree");
9211 return new (DbgInfo->getAlloc())
9212 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromConst(C), {},
9213 /*IsIndirect=*/false, DL, O,
9214 /*IsVariadic=*/false);
9215 }
9216
9217 /// FrameIndex
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,bool IsIndirect,const DebugLoc & DL,unsigned O)9218 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
9219 DIExpression *Expr, unsigned FI,
9220 bool IsIndirect,
9221 const DebugLoc &DL,
9222 unsigned O) {
9223 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9224 "Expected inlined-at fields to agree");
9225 return getFrameIndexDbgValue(Var, Expr, FI, {}, IsIndirect, DL, O);
9226 }
9227
9228 /// FrameIndex with dependencies
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,ArrayRef<SDNode * > Dependencies,bool IsIndirect,const DebugLoc & DL,unsigned O)9229 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
9230 DIExpression *Expr, unsigned FI,
9231 ArrayRef<SDNode *> Dependencies,
9232 bool IsIndirect,
9233 const DebugLoc &DL,
9234 unsigned O) {
9235 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9236 "Expected inlined-at fields to agree");
9237 return new (DbgInfo->getAlloc())
9238 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromFrameIdx(FI),
9239 Dependencies, IsIndirect, DL, O,
9240 /*IsVariadic=*/false);
9241 }
9242
9243 /// VReg
getVRegDbgValue(DIVariable * Var,DIExpression * Expr,unsigned VReg,bool IsIndirect,const DebugLoc & DL,unsigned O)9244 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
9245 unsigned VReg, bool IsIndirect,
9246 const DebugLoc &DL, unsigned O) {
9247 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9248 "Expected inlined-at fields to agree");
9249 return new (DbgInfo->getAlloc())
9250 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromVReg(VReg),
9251 {}, IsIndirect, DL, O,
9252 /*IsVariadic=*/false);
9253 }
9254
getDbgValueList(DIVariable * Var,DIExpression * Expr,ArrayRef<SDDbgOperand> Locs,ArrayRef<SDNode * > Dependencies,bool IsIndirect,const DebugLoc & DL,unsigned O,bool IsVariadic)9255 SDDbgValue *SelectionDAG::getDbgValueList(DIVariable *Var, DIExpression *Expr,
9256 ArrayRef<SDDbgOperand> Locs,
9257 ArrayRef<SDNode *> Dependencies,
9258 bool IsIndirect, const DebugLoc &DL,
9259 unsigned O, bool IsVariadic) {
9260 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
9261 "Expected inlined-at fields to agree");
9262 return new (DbgInfo->getAlloc())
9263 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect,
9264 DL, O, IsVariadic);
9265 }
9266
transferDbgValues(SDValue From,SDValue To,unsigned OffsetInBits,unsigned SizeInBits,bool InvalidateDbg)9267 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
9268 unsigned OffsetInBits, unsigned SizeInBits,
9269 bool InvalidateDbg) {
9270 SDNode *FromNode = From.getNode();
9271 SDNode *ToNode = To.getNode();
9272 assert(FromNode && ToNode && "Can't modify dbg values");
9273
9274 // PR35338
9275 // TODO: assert(From != To && "Redundant dbg value transfer");
9276 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
9277 if (From == To || FromNode == ToNode)
9278 return;
9279
9280 if (!FromNode->getHasDebugValue())
9281 return;
9282
9283 SDDbgOperand FromLocOp =
9284 SDDbgOperand::fromNode(From.getNode(), From.getResNo());
9285 SDDbgOperand ToLocOp = SDDbgOperand::fromNode(To.getNode(), To.getResNo());
9286
9287 SmallVector<SDDbgValue *, 2> ClonedDVs;
9288 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
9289 if (Dbg->isInvalidated())
9290 continue;
9291
9292 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
9293
9294 // Create a new location ops vector that is equal to the old vector, but
9295 // with each instance of FromLocOp replaced with ToLocOp.
9296 bool Changed = false;
9297 auto NewLocOps = Dbg->copyLocationOps();
9298 std::replace_if(
9299 NewLocOps.begin(), NewLocOps.end(),
9300 [&Changed, FromLocOp](const SDDbgOperand &Op) {
9301 bool Match = Op == FromLocOp;
9302 Changed |= Match;
9303 return Match;
9304 },
9305 ToLocOp);
9306 // Ignore this SDDbgValue if we didn't find a matching location.
9307 if (!Changed)
9308 continue;
9309
9310 DIVariable *Var = Dbg->getVariable();
9311 auto *Expr = Dbg->getExpression();
9312 // If a fragment is requested, update the expression.
9313 if (SizeInBits) {
9314 // When splitting a larger (e.g., sign-extended) value whose
9315 // lower bits are described with an SDDbgValue, do not attempt
9316 // to transfer the SDDbgValue to the upper bits.
9317 if (auto FI = Expr->getFragmentInfo())
9318 if (OffsetInBits + SizeInBits > FI->SizeInBits)
9319 continue;
9320 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
9321 SizeInBits);
9322 if (!Fragment)
9323 continue;
9324 Expr = *Fragment;
9325 }
9326
9327 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
9328 // Clone the SDDbgValue and move it to To.
9329 SDDbgValue *Clone = getDbgValueList(
9330 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
9331 Dbg->getDebugLoc(), std::max(ToNode->getIROrder(), Dbg->getOrder()),
9332 Dbg->isVariadic());
9333 ClonedDVs.push_back(Clone);
9334
9335 if (InvalidateDbg) {
9336 // Invalidate value and indicate the SDDbgValue should not be emitted.
9337 Dbg->setIsInvalidated();
9338 Dbg->setIsEmitted();
9339 }
9340 }
9341
9342 for (SDDbgValue *Dbg : ClonedDVs) {
9343 assert(is_contained(Dbg->getSDNodes(), ToNode) &&
9344 "Transferred DbgValues should depend on the new SDNode");
9345 AddDbgValue(Dbg, false);
9346 }
9347 }
9348
salvageDebugInfo(SDNode & N)9349 void SelectionDAG::salvageDebugInfo(SDNode &N) {
9350 if (!N.getHasDebugValue())
9351 return;
9352
9353 SmallVector<SDDbgValue *, 2> ClonedDVs;
9354 for (auto DV : GetDbgValues(&N)) {
9355 if (DV->isInvalidated())
9356 continue;
9357 switch (N.getOpcode()) {
9358 default:
9359 break;
9360 case ISD::ADD:
9361 SDValue N0 = N.getOperand(0);
9362 SDValue N1 = N.getOperand(1);
9363 if (!isConstantIntBuildVectorOrConstantInt(N0) &&
9364 isConstantIntBuildVectorOrConstantInt(N1)) {
9365 uint64_t Offset = N.getConstantOperandVal(1);
9366
9367 // Rewrite an ADD constant node into a DIExpression. Since we are
9368 // performing arithmetic to compute the variable's *value* in the
9369 // DIExpression, we need to mark the expression with a
9370 // DW_OP_stack_value.
9371 auto *DIExpr = DV->getExpression();
9372 auto NewLocOps = DV->copyLocationOps();
9373 bool Changed = false;
9374 for (size_t i = 0; i < NewLocOps.size(); ++i) {
9375 // We're not given a ResNo to compare against because the whole
9376 // node is going away. We know that any ISD::ADD only has one
9377 // result, so we can assume any node match is using the result.
9378 if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
9379 NewLocOps[i].getSDNode() != &N)
9380 continue;
9381 NewLocOps[i] = SDDbgOperand::fromNode(N0.getNode(), N0.getResNo());
9382 SmallVector<uint64_t, 3> ExprOps;
9383 DIExpression::appendOffset(ExprOps, Offset);
9384 DIExpr = DIExpression::appendOpsToArg(DIExpr, ExprOps, i, true);
9385 Changed = true;
9386 }
9387 (void)Changed;
9388 assert(Changed && "Salvage target doesn't use N");
9389
9390 auto AdditionalDependencies = DV->getAdditionalDependencies();
9391 SDDbgValue *Clone = getDbgValueList(DV->getVariable(), DIExpr,
9392 NewLocOps, AdditionalDependencies,
9393 DV->isIndirect(), DV->getDebugLoc(),
9394 DV->getOrder(), DV->isVariadic());
9395 ClonedDVs.push_back(Clone);
9396 DV->setIsInvalidated();
9397 DV->setIsEmitted();
9398 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
9399 N0.getNode()->dumprFull(this);
9400 dbgs() << " into " << *DIExpr << '\n');
9401 }
9402 }
9403 }
9404
9405 for (SDDbgValue *Dbg : ClonedDVs) {
9406 assert(!Dbg->getSDNodes().empty() &&
9407 "Salvaged DbgValue should depend on a new SDNode");
9408 AddDbgValue(Dbg, false);
9409 }
9410 }
9411
9412 /// Creates a SDDbgLabel node.
getDbgLabel(DILabel * Label,const DebugLoc & DL,unsigned O)9413 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
9414 const DebugLoc &DL, unsigned O) {
9415 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
9416 "Expected inlined-at fields to agree");
9417 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
9418 }
9419
9420 namespace {
9421
9422 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
9423 /// pointed to by a use iterator is deleted, increment the use iterator
9424 /// so that it doesn't dangle.
9425 ///
9426 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
9427 SDNode::use_iterator &UI;
9428 SDNode::use_iterator &UE;
9429
NodeDeleted(SDNode * N,SDNode * E)9430 void NodeDeleted(SDNode *N, SDNode *E) override {
9431 // Increment the iterator as needed.
9432 while (UI != UE && N == *UI)
9433 ++UI;
9434 }
9435
9436 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)9437 RAUWUpdateListener(SelectionDAG &d,
9438 SDNode::use_iterator &ui,
9439 SDNode::use_iterator &ue)
9440 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
9441 };
9442
9443 } // end anonymous namespace
9444
9445 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
9446 /// This can cause recursive merging of nodes in the DAG.
9447 ///
9448 /// This version assumes From has a single result value.
9449 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)9450 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
9451 SDNode *From = FromN.getNode();
9452 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
9453 "Cannot replace with this method!");
9454 assert(From != To.getNode() && "Cannot replace uses of with self");
9455
9456 // Preserve Debug Values
9457 transferDbgValues(FromN, To);
9458
9459 // Iterate over all the existing uses of From. New uses will be added
9460 // to the beginning of the use list, which we avoid visiting.
9461 // This specifically avoids visiting uses of From that arise while the
9462 // replacement is happening, because any such uses would be the result
9463 // of CSE: If an existing node looks like From after one of its operands
9464 // is replaced by To, we don't want to replace of all its users with To
9465 // too. See PR3018 for more info.
9466 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
9467 RAUWUpdateListener Listener(*this, UI, UE);
9468 while (UI != UE) {
9469 SDNode *User = *UI;
9470
9471 // This node is about to morph, remove its old self from the CSE maps.
9472 RemoveNodeFromCSEMaps(User);
9473
9474 // A user can appear in a use list multiple times, and when this
9475 // happens the uses are usually next to each other in the list.
9476 // To help reduce the number of CSE recomputations, process all
9477 // the uses of this user that we can find this way.
9478 do {
9479 SDUse &Use = UI.getUse();
9480 ++UI;
9481 Use.set(To);
9482 if (To->isDivergent() != From->isDivergent())
9483 updateDivergence(User);
9484 } while (UI != UE && *UI == User);
9485 // Now that we have modified User, add it back to the CSE maps. If it
9486 // already exists there, recursively merge the results together.
9487 AddModifiedNodeToCSEMaps(User);
9488 }
9489
9490 // If we just RAUW'd the root, take note.
9491 if (FromN == getRoot())
9492 setRoot(To);
9493 }
9494
9495 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
9496 /// This can cause recursive merging of nodes in the DAG.
9497 ///
9498 /// This version assumes that for each value of From, there is a
9499 /// corresponding value in To in the same position with the same type.
9500 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)9501 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
9502 #ifndef NDEBUG
9503 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
9504 assert((!From->hasAnyUseOfValue(i) ||
9505 From->getValueType(i) == To->getValueType(i)) &&
9506 "Cannot use this version of ReplaceAllUsesWith!");
9507 #endif
9508
9509 // Handle the trivial case.
9510 if (From == To)
9511 return;
9512
9513 // Preserve Debug Info. Only do this if there's a use.
9514 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
9515 if (From->hasAnyUseOfValue(i)) {
9516 assert((i < To->getNumValues()) && "Invalid To location");
9517 transferDbgValues(SDValue(From, i), SDValue(To, i));
9518 }
9519
9520 // Iterate over just the existing users of From. See the comments in
9521 // the ReplaceAllUsesWith above.
9522 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
9523 RAUWUpdateListener Listener(*this, UI, UE);
9524 while (UI != UE) {
9525 SDNode *User = *UI;
9526
9527 // This node is about to morph, remove its old self from the CSE maps.
9528 RemoveNodeFromCSEMaps(User);
9529
9530 // A user can appear in a use list multiple times, and when this
9531 // happens the uses are usually next to each other in the list.
9532 // To help reduce the number of CSE recomputations, process all
9533 // the uses of this user that we can find this way.
9534 do {
9535 SDUse &Use = UI.getUse();
9536 ++UI;
9537 Use.setNode(To);
9538 if (To->isDivergent() != From->isDivergent())
9539 updateDivergence(User);
9540 } while (UI != UE && *UI == User);
9541
9542 // Now that we have modified User, add it back to the CSE maps. If it
9543 // already exists there, recursively merge the results together.
9544 AddModifiedNodeToCSEMaps(User);
9545 }
9546
9547 // If we just RAUW'd the root, take note.
9548 if (From == getRoot().getNode())
9549 setRoot(SDValue(To, getRoot().getResNo()));
9550 }
9551
9552 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
9553 /// This can cause recursive merging of nodes in the DAG.
9554 ///
9555 /// This version can replace From with any result values. To must match the
9556 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)9557 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
9558 if (From->getNumValues() == 1) // Handle the simple case efficiently.
9559 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
9560
9561 // Preserve Debug Info.
9562 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
9563 transferDbgValues(SDValue(From, i), To[i]);
9564
9565 // Iterate over just the existing users of From. See the comments in
9566 // the ReplaceAllUsesWith above.
9567 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
9568 RAUWUpdateListener Listener(*this, UI, UE);
9569 while (UI != UE) {
9570 SDNode *User = *UI;
9571
9572 // This node is about to morph, remove its old self from the CSE maps.
9573 RemoveNodeFromCSEMaps(User);
9574
9575 // A user can appear in a use list multiple times, and when this happens the
9576 // uses are usually next to each other in the list. To help reduce the
9577 // number of CSE and divergence recomputations, process all the uses of this
9578 // user that we can find this way.
9579 bool To_IsDivergent = false;
9580 do {
9581 SDUse &Use = UI.getUse();
9582 const SDValue &ToOp = To[Use.getResNo()];
9583 ++UI;
9584 Use.set(ToOp);
9585 To_IsDivergent |= ToOp->isDivergent();
9586 } while (UI != UE && *UI == User);
9587
9588 if (To_IsDivergent != From->isDivergent())
9589 updateDivergence(User);
9590
9591 // Now that we have modified User, add it back to the CSE maps. If it
9592 // already exists there, recursively merge the results together.
9593 AddModifiedNodeToCSEMaps(User);
9594 }
9595
9596 // If we just RAUW'd the root, take note.
9597 if (From == getRoot().getNode())
9598 setRoot(SDValue(To[getRoot().getResNo()]));
9599 }
9600
9601 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
9602 /// uses of other values produced by From.getNode() alone. The Deleted
9603 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)9604 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
9605 // Handle the really simple, really trivial case efficiently.
9606 if (From == To) return;
9607
9608 // Handle the simple, trivial, case efficiently.
9609 if (From.getNode()->getNumValues() == 1) {
9610 ReplaceAllUsesWith(From, To);
9611 return;
9612 }
9613
9614 // Preserve Debug Info.
9615 transferDbgValues(From, To);
9616
9617 // Iterate over just the existing users of From. See the comments in
9618 // the ReplaceAllUsesWith above.
9619 SDNode::use_iterator UI = From.getNode()->use_begin(),
9620 UE = From.getNode()->use_end();
9621 RAUWUpdateListener Listener(*this, UI, UE);
9622 while (UI != UE) {
9623 SDNode *User = *UI;
9624 bool UserRemovedFromCSEMaps = false;
9625
9626 // A user can appear in a use list multiple times, and when this
9627 // happens the uses are usually next to each other in the list.
9628 // To help reduce the number of CSE recomputations, process all
9629 // the uses of this user that we can find this way.
9630 do {
9631 SDUse &Use = UI.getUse();
9632
9633 // Skip uses of different values from the same node.
9634 if (Use.getResNo() != From.getResNo()) {
9635 ++UI;
9636 continue;
9637 }
9638
9639 // If this node hasn't been modified yet, it's still in the CSE maps,
9640 // so remove its old self from the CSE maps.
9641 if (!UserRemovedFromCSEMaps) {
9642 RemoveNodeFromCSEMaps(User);
9643 UserRemovedFromCSEMaps = true;
9644 }
9645
9646 ++UI;
9647 Use.set(To);
9648 if (To->isDivergent() != From->isDivergent())
9649 updateDivergence(User);
9650 } while (UI != UE && *UI == User);
9651 // We are iterating over all uses of the From node, so if a use
9652 // doesn't use the specific value, no changes are made.
9653 if (!UserRemovedFromCSEMaps)
9654 continue;
9655
9656 // Now that we have modified User, add it back to the CSE maps. If it
9657 // already exists there, recursively merge the results together.
9658 AddModifiedNodeToCSEMaps(User);
9659 }
9660
9661 // If we just RAUW'd the root, take note.
9662 if (From == getRoot())
9663 setRoot(To);
9664 }
9665
9666 namespace {
9667
9668 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
9669 /// to record information about a use.
9670 struct UseMemo {
9671 SDNode *User;
9672 unsigned Index;
9673 SDUse *Use;
9674 };
9675
9676 /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)9677 bool operator<(const UseMemo &L, const UseMemo &R) {
9678 return (intptr_t)L.User < (intptr_t)R.User;
9679 }
9680
9681 } // end anonymous namespace
9682
calculateDivergence(SDNode * N)9683 bool SelectionDAG::calculateDivergence(SDNode *N) {
9684 if (TLI->isSDNodeAlwaysUniform(N)) {
9685 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) &&
9686 "Conflicting divergence information!");
9687 return false;
9688 }
9689 if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA))
9690 return true;
9691 for (auto &Op : N->ops()) {
9692 if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
9693 return true;
9694 }
9695 return false;
9696 }
9697
updateDivergence(SDNode * N)9698 void SelectionDAG::updateDivergence(SDNode *N) {
9699 SmallVector<SDNode *, 16> Worklist(1, N);
9700 do {
9701 N = Worklist.pop_back_val();
9702 bool IsDivergent = calculateDivergence(N);
9703 if (N->SDNodeBits.IsDivergent != IsDivergent) {
9704 N->SDNodeBits.IsDivergent = IsDivergent;
9705 llvm::append_range(Worklist, N->uses());
9706 }
9707 } while (!Worklist.empty());
9708 }
9709
CreateTopologicalOrder(std::vector<SDNode * > & Order)9710 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
9711 DenseMap<SDNode *, unsigned> Degree;
9712 Order.reserve(AllNodes.size());
9713 for (auto &N : allnodes()) {
9714 unsigned NOps = N.getNumOperands();
9715 Degree[&N] = NOps;
9716 if (0 == NOps)
9717 Order.push_back(&N);
9718 }
9719 for (size_t I = 0; I != Order.size(); ++I) {
9720 SDNode *N = Order[I];
9721 for (auto U : N->uses()) {
9722 unsigned &UnsortedOps = Degree[U];
9723 if (0 == --UnsortedOps)
9724 Order.push_back(U);
9725 }
9726 }
9727 }
9728
9729 #ifndef NDEBUG
VerifyDAGDivergence()9730 void SelectionDAG::VerifyDAGDivergence() {
9731 std::vector<SDNode *> TopoOrder;
9732 CreateTopologicalOrder(TopoOrder);
9733 for (auto *N : TopoOrder) {
9734 assert(calculateDivergence(N) == N->isDivergent() &&
9735 "Divergence bit inconsistency detected");
9736 }
9737 }
9738 #endif
9739
9740 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
9741 /// uses of other values produced by From.getNode() alone. The same value
9742 /// may appear in both the From and To list. The Deleted vector is
9743 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)9744 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
9745 const SDValue *To,
9746 unsigned Num){
9747 // Handle the simple, trivial case efficiently.
9748 if (Num == 1)
9749 return ReplaceAllUsesOfValueWith(*From, *To);
9750
9751 transferDbgValues(*From, *To);
9752
9753 // Read up all the uses and make records of them. This helps
9754 // processing new uses that are introduced during the
9755 // replacement process.
9756 SmallVector<UseMemo, 4> Uses;
9757 for (unsigned i = 0; i != Num; ++i) {
9758 unsigned FromResNo = From[i].getResNo();
9759 SDNode *FromNode = From[i].getNode();
9760 for (SDNode::use_iterator UI = FromNode->use_begin(),
9761 E = FromNode->use_end(); UI != E; ++UI) {
9762 SDUse &Use = UI.getUse();
9763 if (Use.getResNo() == FromResNo) {
9764 UseMemo Memo = { *UI, i, &Use };
9765 Uses.push_back(Memo);
9766 }
9767 }
9768 }
9769
9770 // Sort the uses, so that all the uses from a given User are together.
9771 llvm::sort(Uses);
9772
9773 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
9774 UseIndex != UseIndexEnd; ) {
9775 // We know that this user uses some value of From. If it is the right
9776 // value, update it.
9777 SDNode *User = Uses[UseIndex].User;
9778
9779 // This node is about to morph, remove its old self from the CSE maps.
9780 RemoveNodeFromCSEMaps(User);
9781
9782 // The Uses array is sorted, so all the uses for a given User
9783 // are next to each other in the list.
9784 // To help reduce the number of CSE recomputations, process all
9785 // the uses of this user that we can find this way.
9786 do {
9787 unsigned i = Uses[UseIndex].Index;
9788 SDUse &Use = *Uses[UseIndex].Use;
9789 ++UseIndex;
9790
9791 Use.set(To[i]);
9792 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
9793
9794 // Now that we have modified User, add it back to the CSE maps. If it
9795 // already exists there, recursively merge the results together.
9796 AddModifiedNodeToCSEMaps(User);
9797 }
9798 }
9799
9800 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
9801 /// based on their topological order. It returns the maximum id and a vector
9802 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()9803 unsigned SelectionDAG::AssignTopologicalOrder() {
9804 unsigned DAGSize = 0;
9805
9806 // SortedPos tracks the progress of the algorithm. Nodes before it are
9807 // sorted, nodes after it are unsorted. When the algorithm completes
9808 // it is at the end of the list.
9809 allnodes_iterator SortedPos = allnodes_begin();
9810
9811 // Visit all the nodes. Move nodes with no operands to the front of
9812 // the list immediately. Annotate nodes that do have operands with their
9813 // operand count. Before we do this, the Node Id fields of the nodes
9814 // may contain arbitrary values. After, the Node Id fields for nodes
9815 // before SortedPos will contain the topological sort index, and the
9816 // Node Id fields for nodes At SortedPos and after will contain the
9817 // count of outstanding operands.
9818 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
9819 SDNode *N = &*I++;
9820 checkForCycles(N, this);
9821 unsigned Degree = N->getNumOperands();
9822 if (Degree == 0) {
9823 // A node with no uses, add it to the result array immediately.
9824 N->setNodeId(DAGSize++);
9825 allnodes_iterator Q(N);
9826 if (Q != SortedPos)
9827 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
9828 assert(SortedPos != AllNodes.end() && "Overran node list");
9829 ++SortedPos;
9830 } else {
9831 // Temporarily use the Node Id as scratch space for the degree count.
9832 N->setNodeId(Degree);
9833 }
9834 }
9835
9836 // Visit all the nodes. As we iterate, move nodes into sorted order,
9837 // such that by the time the end is reached all nodes will be sorted.
9838 for (SDNode &Node : allnodes()) {
9839 SDNode *N = &Node;
9840 checkForCycles(N, this);
9841 // N is in sorted position, so all its uses have one less operand
9842 // that needs to be sorted.
9843 for (SDNode *P : N->uses()) {
9844 unsigned Degree = P->getNodeId();
9845 assert(Degree != 0 && "Invalid node degree");
9846 --Degree;
9847 if (Degree == 0) {
9848 // All of P's operands are sorted, so P may sorted now.
9849 P->setNodeId(DAGSize++);
9850 if (P->getIterator() != SortedPos)
9851 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
9852 assert(SortedPos != AllNodes.end() && "Overran node list");
9853 ++SortedPos;
9854 } else {
9855 // Update P's outstanding operand count.
9856 P->setNodeId(Degree);
9857 }
9858 }
9859 if (Node.getIterator() == SortedPos) {
9860 #ifndef NDEBUG
9861 allnodes_iterator I(N);
9862 SDNode *S = &*++I;
9863 dbgs() << "Overran sorted position:\n";
9864 S->dumprFull(this); dbgs() << "\n";
9865 dbgs() << "Checking if this is due to cycles\n";
9866 checkForCycles(this, true);
9867 #endif
9868 llvm_unreachable(nullptr);
9869 }
9870 }
9871
9872 assert(SortedPos == AllNodes.end() &&
9873 "Topological sort incomplete!");
9874 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
9875 "First node in topological sort is not the entry token!");
9876 assert(AllNodes.front().getNodeId() == 0 &&
9877 "First node in topological sort has non-zero id!");
9878 assert(AllNodes.front().getNumOperands() == 0 &&
9879 "First node in topological sort has operands!");
9880 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
9881 "Last node in topologic sort has unexpected id!");
9882 assert(AllNodes.back().use_empty() &&
9883 "Last node in topologic sort has users!");
9884 assert(DAGSize == allnodes_size() && "Node count mismatch!");
9885 return DAGSize;
9886 }
9887
9888 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
9889 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,bool isParameter)9890 void SelectionDAG::AddDbgValue(SDDbgValue *DB, bool isParameter) {
9891 for (SDNode *SD : DB->getSDNodes()) {
9892 if (!SD)
9893 continue;
9894 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
9895 SD->setHasDebugValue(true);
9896 }
9897 DbgInfo->add(DB, isParameter);
9898 }
9899
AddDbgLabel(SDDbgLabel * DB)9900 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { DbgInfo->add(DB); }
9901
makeEquivalentMemoryOrdering(SDValue OldChain,SDValue NewMemOpChain)9902 SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain,
9903 SDValue NewMemOpChain) {
9904 assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
9905 assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
9906 // The new memory operation must have the same position as the old load in
9907 // terms of memory dependency. Create a TokenFactor for the old load and new
9908 // memory operation and update uses of the old load's output chain to use that
9909 // TokenFactor.
9910 if (OldChain == NewMemOpChain || OldChain.use_empty())
9911 return NewMemOpChain;
9912
9913 SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
9914 OldChain, NewMemOpChain);
9915 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
9916 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
9917 return TokenFactor;
9918 }
9919
makeEquivalentMemoryOrdering(LoadSDNode * OldLoad,SDValue NewMemOp)9920 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
9921 SDValue NewMemOp) {
9922 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
9923 SDValue OldChain = SDValue(OldLoad, 1);
9924 SDValue NewMemOpChain = NewMemOp.getValue(1);
9925 return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
9926 }
9927
getSymbolFunctionGlobalAddress(SDValue Op,Function ** OutFunction)9928 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
9929 Function **OutFunction) {
9930 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
9931
9932 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
9933 auto *Module = MF->getFunction().getParent();
9934 auto *Function = Module->getFunction(Symbol);
9935
9936 if (OutFunction != nullptr)
9937 *OutFunction = Function;
9938
9939 if (Function != nullptr) {
9940 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
9941 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
9942 }
9943
9944 std::string ErrorStr;
9945 raw_string_ostream ErrorFormatter(ErrorStr);
9946 ErrorFormatter << "Undefined external symbol ";
9947 ErrorFormatter << '"' << Symbol << '"';
9948 report_fatal_error(Twine(ErrorFormatter.str()));
9949 }
9950
9951 //===----------------------------------------------------------------------===//
9952 // SDNode Class
9953 //===----------------------------------------------------------------------===//
9954
isNullConstant(SDValue V)9955 bool llvm::isNullConstant(SDValue V) {
9956 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9957 return Const != nullptr && Const->isZero();
9958 }
9959
isNullFPConstant(SDValue V)9960 bool llvm::isNullFPConstant(SDValue V) {
9961 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
9962 return Const != nullptr && Const->isZero() && !Const->isNegative();
9963 }
9964
isAllOnesConstant(SDValue V)9965 bool llvm::isAllOnesConstant(SDValue V) {
9966 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9967 return Const != nullptr && Const->isAllOnes();
9968 }
9969
isOneConstant(SDValue V)9970 bool llvm::isOneConstant(SDValue V) {
9971 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9972 return Const != nullptr && Const->isOne();
9973 }
9974
peekThroughBitcasts(SDValue V)9975 SDValue llvm::peekThroughBitcasts(SDValue V) {
9976 while (V.getOpcode() == ISD::BITCAST)
9977 V = V.getOperand(0);
9978 return V;
9979 }
9980
peekThroughOneUseBitcasts(SDValue V)9981 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
9982 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
9983 V = V.getOperand(0);
9984 return V;
9985 }
9986
peekThroughExtractSubvectors(SDValue V)9987 SDValue llvm::peekThroughExtractSubvectors(SDValue V) {
9988 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
9989 V = V.getOperand(0);
9990 return V;
9991 }
9992
isBitwiseNot(SDValue V,bool AllowUndefs)9993 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
9994 if (V.getOpcode() != ISD::XOR)
9995 return false;
9996 V = peekThroughBitcasts(V.getOperand(1));
9997 unsigned NumBits = V.getScalarValueSizeInBits();
9998 ConstantSDNode *C =
9999 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
10000 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits);
10001 }
10002
isConstOrConstSplat(SDValue N,bool AllowUndefs,bool AllowTruncation)10003 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
10004 bool AllowTruncation) {
10005 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
10006 return CN;
10007
10008 // SplatVectors can truncate their operands. Ignore that case here unless
10009 // AllowTruncation is set.
10010 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
10011 EVT VecEltVT = N->getValueType(0).getVectorElementType();
10012 if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
10013 EVT CVT = CN->getValueType(0);
10014 assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
10015 if (AllowTruncation || CVT == VecEltVT)
10016 return CN;
10017 }
10018 }
10019
10020 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
10021 BitVector UndefElements;
10022 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
10023
10024 // BuildVectors can truncate their operands. Ignore that case here unless
10025 // AllowTruncation is set.
10026 if (CN && (UndefElements.none() || AllowUndefs)) {
10027 EVT CVT = CN->getValueType(0);
10028 EVT NSVT = N.getValueType().getScalarType();
10029 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
10030 if (AllowTruncation || (CVT == NSVT))
10031 return CN;
10032 }
10033 }
10034
10035 return nullptr;
10036 }
10037
isConstOrConstSplat(SDValue N,const APInt & DemandedElts,bool AllowUndefs,bool AllowTruncation)10038 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
10039 bool AllowUndefs,
10040 bool AllowTruncation) {
10041 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
10042 return CN;
10043
10044 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
10045 BitVector UndefElements;
10046 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
10047
10048 // BuildVectors can truncate their operands. Ignore that case here unless
10049 // AllowTruncation is set.
10050 if (CN && (UndefElements.none() || AllowUndefs)) {
10051 EVT CVT = CN->getValueType(0);
10052 EVT NSVT = N.getValueType().getScalarType();
10053 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
10054 if (AllowTruncation || (CVT == NSVT))
10055 return CN;
10056 }
10057 }
10058
10059 return nullptr;
10060 }
10061
isConstOrConstSplatFP(SDValue N,bool AllowUndefs)10062 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
10063 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
10064 return CN;
10065
10066 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
10067 BitVector UndefElements;
10068 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
10069 if (CN && (UndefElements.none() || AllowUndefs))
10070 return CN;
10071 }
10072
10073 if (N.getOpcode() == ISD::SPLAT_VECTOR)
10074 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
10075 return CN;
10076
10077 return nullptr;
10078 }
10079
isConstOrConstSplatFP(SDValue N,const APInt & DemandedElts,bool AllowUndefs)10080 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
10081 const APInt &DemandedElts,
10082 bool AllowUndefs) {
10083 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
10084 return CN;
10085
10086 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
10087 BitVector UndefElements;
10088 ConstantFPSDNode *CN =
10089 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
10090 if (CN && (UndefElements.none() || AllowUndefs))
10091 return CN;
10092 }
10093
10094 return nullptr;
10095 }
10096
isNullOrNullSplat(SDValue N,bool AllowUndefs)10097 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
10098 // TODO: may want to use peekThroughBitcast() here.
10099 ConstantSDNode *C =
10100 isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation=*/true);
10101 return C && C->isZero();
10102 }
10103
isOneOrOneSplat(SDValue N,bool AllowUndefs)10104 bool llvm::isOneOrOneSplat(SDValue N, bool AllowUndefs) {
10105 // TODO: may want to use peekThroughBitcast() here.
10106 unsigned BitWidth = N.getScalarValueSizeInBits();
10107 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
10108 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth;
10109 }
10110
isAllOnesOrAllOnesSplat(SDValue N,bool AllowUndefs)10111 bool llvm::isAllOnesOrAllOnesSplat(SDValue N, bool AllowUndefs) {
10112 N = peekThroughBitcasts(N);
10113 unsigned BitWidth = N.getScalarValueSizeInBits();
10114 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
10115 return C && C->isAllOnes() && C->getValueSizeInBits(0) == BitWidth;
10116 }
10117
~HandleSDNode()10118 HandleSDNode::~HandleSDNode() {
10119 DropOperands();
10120 }
10121
GlobalAddressSDNode(unsigned Opc,unsigned Order,const DebugLoc & DL,const GlobalValue * GA,EVT VT,int64_t o,unsigned TF)10122 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
10123 const DebugLoc &DL,
10124 const GlobalValue *GA, EVT VT,
10125 int64_t o, unsigned TF)
10126 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
10127 TheGlobal = GA;
10128 }
10129
AddrSpaceCastSDNode(unsigned Order,const DebugLoc & dl,EVT VT,unsigned SrcAS,unsigned DestAS)10130 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
10131 EVT VT, unsigned SrcAS,
10132 unsigned DestAS)
10133 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
10134 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
10135
MemSDNode(unsigned Opc,unsigned Order,const DebugLoc & dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)10136 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
10137 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
10138 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
10139 MemSDNodeBits.IsVolatile = MMO->isVolatile();
10140 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
10141 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
10142 MemSDNodeBits.IsInvariant = MMO->isInvariant();
10143
10144 // We check here that the size of the memory operand fits within the size of
10145 // the MMO. This is because the MMO might indicate only a possible address
10146 // range instead of specifying the affected memory addresses precisely.
10147 // TODO: Make MachineMemOperands aware of scalable vectors.
10148 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() &&
10149 "Size mismatch!");
10150 }
10151
10152 /// Profile - Gather unique data for the node.
10153 ///
Profile(FoldingSetNodeID & ID) const10154 void SDNode::Profile(FoldingSetNodeID &ID) const {
10155 AddNodeIDNode(ID, this);
10156 }
10157
10158 namespace {
10159
10160 struct EVTArray {
10161 std::vector<EVT> VTs;
10162
EVTArray__anon9e0550801111::EVTArray10163 EVTArray() {
10164 VTs.reserve(MVT::VALUETYPE_SIZE);
10165 for (unsigned i = 0; i < MVT::VALUETYPE_SIZE; ++i)
10166 VTs.push_back(MVT((MVT::SimpleValueType)i));
10167 }
10168 };
10169
10170 } // end anonymous namespace
10171
10172 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
10173 static ManagedStatic<EVTArray> SimpleVTArray;
10174 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
10175
10176 /// getValueTypeList - Return a pointer to the specified value type.
10177 ///
getValueTypeList(EVT VT)10178 const EVT *SDNode::getValueTypeList(EVT VT) {
10179 if (VT.isExtended()) {
10180 sys::SmartScopedLock<true> Lock(*VTMutex);
10181 return &(*EVTs->insert(VT).first);
10182 }
10183 assert(VT.getSimpleVT() < MVT::VALUETYPE_SIZE && "Value type out of range!");
10184 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
10185 }
10186
10187 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
10188 /// indicated value. This method ignores uses of other values defined by this
10189 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const10190 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
10191 assert(Value < getNumValues() && "Bad value!");
10192
10193 // TODO: Only iterate over uses of a given value of the node
10194 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
10195 if (UI.getUse().getResNo() == Value) {
10196 if (NUses == 0)
10197 return false;
10198 --NUses;
10199 }
10200 }
10201
10202 // Found exactly the right number of uses?
10203 return NUses == 0;
10204 }
10205
10206 /// hasAnyUseOfValue - Return true if there are any use of the indicated
10207 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const10208 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
10209 assert(Value < getNumValues() && "Bad value!");
10210
10211 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
10212 if (UI.getUse().getResNo() == Value)
10213 return true;
10214
10215 return false;
10216 }
10217
10218 /// isOnlyUserOf - Return true if this node is the only use of N.
isOnlyUserOf(const SDNode * N) const10219 bool SDNode::isOnlyUserOf(const SDNode *N) const {
10220 bool Seen = false;
10221 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
10222 SDNode *User = *I;
10223 if (User == this)
10224 Seen = true;
10225 else
10226 return false;
10227 }
10228
10229 return Seen;
10230 }
10231
10232 /// Return true if the only users of N are contained in Nodes.
areOnlyUsersOf(ArrayRef<const SDNode * > Nodes,const SDNode * N)10233 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
10234 bool Seen = false;
10235 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
10236 SDNode *User = *I;
10237 if (llvm::is_contained(Nodes, User))
10238 Seen = true;
10239 else
10240 return false;
10241 }
10242
10243 return Seen;
10244 }
10245
10246 /// isOperand - Return true if this node is an operand of N.
isOperandOf(const SDNode * N) const10247 bool SDValue::isOperandOf(const SDNode *N) const {
10248 return is_contained(N->op_values(), *this);
10249 }
10250
isOperandOf(const SDNode * N) const10251 bool SDNode::isOperandOf(const SDNode *N) const {
10252 return any_of(N->op_values(),
10253 [this](SDValue Op) { return this == Op.getNode(); });
10254 }
10255
10256 /// reachesChainWithoutSideEffects - Return true if this operand (which must
10257 /// be a chain) reaches the specified operand without crossing any
10258 /// side-effecting instructions on any chain path. In practice, this looks
10259 /// through token factors and non-volatile loads. In order to remain efficient,
10260 /// this only looks a couple of nodes in, it does not do an exhaustive search.
10261 ///
10262 /// Note that we only need to examine chains when we're searching for
10263 /// side-effects; SelectionDAG requires that all side-effects are represented
10264 /// by chains, even if another operand would force a specific ordering. This
10265 /// constraint is necessary to allow transformations like splitting loads.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const10266 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
10267 unsigned Depth) const {
10268 if (*this == Dest) return true;
10269
10270 // Don't search too deeply, we just want to be able to see through
10271 // TokenFactor's etc.
10272 if (Depth == 0) return false;
10273
10274 // If this is a token factor, all inputs to the TF happen in parallel.
10275 if (getOpcode() == ISD::TokenFactor) {
10276 // First, try a shallow search.
10277 if (is_contained((*this)->ops(), Dest)) {
10278 // We found the chain we want as an operand of this TokenFactor.
10279 // Essentially, we reach the chain without side-effects if we could
10280 // serialize the TokenFactor into a simple chain of operations with
10281 // Dest as the last operation. This is automatically true if the
10282 // chain has one use: there are no other ordering constraints.
10283 // If the chain has more than one use, we give up: some other
10284 // use of Dest might force a side-effect between Dest and the current
10285 // node.
10286 if (Dest.hasOneUse())
10287 return true;
10288 }
10289 // Next, try a deep search: check whether every operand of the TokenFactor
10290 // reaches Dest.
10291 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
10292 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
10293 });
10294 }
10295
10296 // Loads don't have side effects, look through them.
10297 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
10298 if (Ld->isUnordered())
10299 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
10300 }
10301 return false;
10302 }
10303
hasPredecessor(const SDNode * N) const10304 bool SDNode::hasPredecessor(const SDNode *N) const {
10305 SmallPtrSet<const SDNode *, 32> Visited;
10306 SmallVector<const SDNode *, 16> Worklist;
10307 Worklist.push_back(this);
10308 return hasPredecessorHelper(N, Visited, Worklist);
10309 }
10310
intersectFlagsWith(const SDNodeFlags Flags)10311 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
10312 this->Flags.intersectWith(Flags);
10313 }
10314
10315 SDValue
matchBinOpReduction(SDNode * Extract,ISD::NodeType & BinOp,ArrayRef<ISD::NodeType> CandidateBinOps,bool AllowPartials)10316 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
10317 ArrayRef<ISD::NodeType> CandidateBinOps,
10318 bool AllowPartials) {
10319 // The pattern must end in an extract from index 0.
10320 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10321 !isNullConstant(Extract->getOperand(1)))
10322 return SDValue();
10323
10324 // Match against one of the candidate binary ops.
10325 SDValue Op = Extract->getOperand(0);
10326 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
10327 return Op.getOpcode() == unsigned(BinOp);
10328 }))
10329 return SDValue();
10330
10331 // Floating-point reductions may require relaxed constraints on the final step
10332 // of the reduction because they may reorder intermediate operations.
10333 unsigned CandidateBinOp = Op.getOpcode();
10334 if (Op.getValueType().isFloatingPoint()) {
10335 SDNodeFlags Flags = Op->getFlags();
10336 switch (CandidateBinOp) {
10337 case ISD::FADD:
10338 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
10339 return SDValue();
10340 break;
10341 default:
10342 llvm_unreachable("Unhandled FP opcode for binop reduction");
10343 }
10344 }
10345
10346 // Matching failed - attempt to see if we did enough stages that a partial
10347 // reduction from a subvector is possible.
10348 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
10349 if (!AllowPartials || !Op)
10350 return SDValue();
10351 EVT OpVT = Op.getValueType();
10352 EVT OpSVT = OpVT.getScalarType();
10353 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
10354 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
10355 return SDValue();
10356 BinOp = (ISD::NodeType)CandidateBinOp;
10357 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op,
10358 getVectorIdxConstant(0, SDLoc(Op)));
10359 };
10360
10361 // At each stage, we're looking for something that looks like:
10362 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
10363 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
10364 // i32 undef, i32 undef, i32 undef, i32 undef>
10365 // %a = binop <8 x i32> %op, %s
10366 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
10367 // we expect something like:
10368 // <4,5,6,7,u,u,u,u>
10369 // <2,3,u,u,u,u,u,u>
10370 // <1,u,u,u,u,u,u,u>
10371 // While a partial reduction match would be:
10372 // <2,3,u,u,u,u,u,u>
10373 // <1,u,u,u,u,u,u,u>
10374 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
10375 SDValue PrevOp;
10376 for (unsigned i = 0; i < Stages; ++i) {
10377 unsigned MaskEnd = (1 << i);
10378
10379 if (Op.getOpcode() != CandidateBinOp)
10380 return PartialReduction(PrevOp, MaskEnd);
10381
10382 SDValue Op0 = Op.getOperand(0);
10383 SDValue Op1 = Op.getOperand(1);
10384
10385 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
10386 if (Shuffle) {
10387 Op = Op1;
10388 } else {
10389 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
10390 Op = Op0;
10391 }
10392
10393 // The first operand of the shuffle should be the same as the other operand
10394 // of the binop.
10395 if (!Shuffle || Shuffle->getOperand(0) != Op)
10396 return PartialReduction(PrevOp, MaskEnd);
10397
10398 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
10399 for (int Index = 0; Index < (int)MaskEnd; ++Index)
10400 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
10401 return PartialReduction(PrevOp, MaskEnd);
10402
10403 PrevOp = Op;
10404 }
10405
10406 // Handle subvector reductions, which tend to appear after the shuffle
10407 // reduction stages.
10408 while (Op.getOpcode() == CandidateBinOp) {
10409 unsigned NumElts = Op.getValueType().getVectorNumElements();
10410 SDValue Op0 = Op.getOperand(0);
10411 SDValue Op1 = Op.getOperand(1);
10412 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
10413 Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
10414 Op0.getOperand(0) != Op1.getOperand(0))
10415 break;
10416 SDValue Src = Op0.getOperand(0);
10417 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
10418 if (NumSrcElts != (2 * NumElts))
10419 break;
10420 if (!(Op0.getConstantOperandAPInt(1) == 0 &&
10421 Op1.getConstantOperandAPInt(1) == NumElts) &&
10422 !(Op1.getConstantOperandAPInt(1) == 0 &&
10423 Op0.getConstantOperandAPInt(1) == NumElts))
10424 break;
10425 Op = Src;
10426 }
10427
10428 BinOp = (ISD::NodeType)CandidateBinOp;
10429 return Op;
10430 }
10431
UnrollVectorOp(SDNode * N,unsigned ResNE)10432 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
10433 assert(N->getNumValues() == 1 &&
10434 "Can't unroll a vector with multiple results!");
10435
10436 EVT VT = N->getValueType(0);
10437 unsigned NE = VT.getVectorNumElements();
10438 EVT EltVT = VT.getVectorElementType();
10439 SDLoc dl(N);
10440
10441 SmallVector<SDValue, 8> Scalars;
10442 SmallVector<SDValue, 4> Operands(N->getNumOperands());
10443
10444 // If ResNE is 0, fully unroll the vector op.
10445 if (ResNE == 0)
10446 ResNE = NE;
10447 else if (NE > ResNE)
10448 NE = ResNE;
10449
10450 unsigned i;
10451 for (i= 0; i != NE; ++i) {
10452 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
10453 SDValue Operand = N->getOperand(j);
10454 EVT OperandVT = Operand.getValueType();
10455 if (OperandVT.isVector()) {
10456 // A vector operand; extract a single element.
10457 EVT OperandEltVT = OperandVT.getVectorElementType();
10458 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT,
10459 Operand, getVectorIdxConstant(i, dl));
10460 } else {
10461 // A scalar operand; just use it as is.
10462 Operands[j] = Operand;
10463 }
10464 }
10465
10466 switch (N->getOpcode()) {
10467 default: {
10468 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
10469 N->getFlags()));
10470 break;
10471 }
10472 case ISD::VSELECT:
10473 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
10474 break;
10475 case ISD::SHL:
10476 case ISD::SRA:
10477 case ISD::SRL:
10478 case ISD::ROTL:
10479 case ISD::ROTR:
10480 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
10481 getShiftAmountOperand(Operands[0].getValueType(),
10482 Operands[1])));
10483 break;
10484 case ISD::SIGN_EXTEND_INREG: {
10485 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
10486 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
10487 Operands[0],
10488 getValueType(ExtVT)));
10489 }
10490 }
10491 }
10492
10493 for (; i < ResNE; ++i)
10494 Scalars.push_back(getUNDEF(EltVT));
10495
10496 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
10497 return getBuildVector(VecVT, dl, Scalars);
10498 }
10499
UnrollVectorOverflowOp(SDNode * N,unsigned ResNE)10500 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
10501 SDNode *N, unsigned ResNE) {
10502 unsigned Opcode = N->getOpcode();
10503 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
10504 Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
10505 Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
10506 "Expected an overflow opcode");
10507
10508 EVT ResVT = N->getValueType(0);
10509 EVT OvVT = N->getValueType(1);
10510 EVT ResEltVT = ResVT.getVectorElementType();
10511 EVT OvEltVT = OvVT.getVectorElementType();
10512 SDLoc dl(N);
10513
10514 // If ResNE is 0, fully unroll the vector op.
10515 unsigned NE = ResVT.getVectorNumElements();
10516 if (ResNE == 0)
10517 ResNE = NE;
10518 else if (NE > ResNE)
10519 NE = ResNE;
10520
10521 SmallVector<SDValue, 8> LHSScalars;
10522 SmallVector<SDValue, 8> RHSScalars;
10523 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
10524 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
10525
10526 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
10527 SDVTList VTs = getVTList(ResEltVT, SVT);
10528 SmallVector<SDValue, 8> ResScalars;
10529 SmallVector<SDValue, 8> OvScalars;
10530 for (unsigned i = 0; i < NE; ++i) {
10531 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
10532 SDValue Ov =
10533 getSelect(dl, OvEltVT, Res.getValue(1),
10534 getBoolConstant(true, dl, OvEltVT, ResVT),
10535 getConstant(0, dl, OvEltVT));
10536
10537 ResScalars.push_back(Res);
10538 OvScalars.push_back(Ov);
10539 }
10540
10541 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
10542 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
10543
10544 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
10545 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
10546 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
10547 getBuildVector(NewOvVT, dl, OvScalars));
10548 }
10549
areNonVolatileConsecutiveLoads(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const10550 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
10551 LoadSDNode *Base,
10552 unsigned Bytes,
10553 int Dist) const {
10554 if (LD->isVolatile() || Base->isVolatile())
10555 return false;
10556 // TODO: probably too restrictive for atomics, revisit
10557 if (!LD->isSimple())
10558 return false;
10559 if (LD->isIndexed() || Base->isIndexed())
10560 return false;
10561 if (LD->getChain() != Base->getChain())
10562 return false;
10563 EVT VT = LD->getValueType(0);
10564 if (VT.getSizeInBits() / 8 != Bytes)
10565 return false;
10566
10567 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
10568 auto LocDecomp = BaseIndexOffset::match(LD, *this);
10569
10570 int64_t Offset = 0;
10571 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
10572 return (Dist * Bytes == Offset);
10573 return false;
10574 }
10575
10576 /// InferPtrAlignment - Infer alignment of a load / store address. Return None
10577 /// if it cannot be inferred.
InferPtrAlign(SDValue Ptr) const10578 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const {
10579 // If this is a GlobalAddress + cst, return the alignment.
10580 const GlobalValue *GV = nullptr;
10581 int64_t GVOffset = 0;
10582 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
10583 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
10584 KnownBits Known(PtrWidth);
10585 llvm::computeKnownBits(GV, Known, getDataLayout());
10586 unsigned AlignBits = Known.countMinTrailingZeros();
10587 if (AlignBits)
10588 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset);
10589 }
10590
10591 // If this is a direct reference to a stack slot, use information about the
10592 // stack slot's alignment.
10593 int FrameIdx = INT_MIN;
10594 int64_t FrameOffset = 0;
10595 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
10596 FrameIdx = FI->getIndex();
10597 } else if (isBaseWithConstantOffset(Ptr) &&
10598 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
10599 // Handle FI+Cst
10600 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
10601 FrameOffset = Ptr.getConstantOperandVal(1);
10602 }
10603
10604 if (FrameIdx != INT_MIN) {
10605 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
10606 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset);
10607 }
10608
10609 return None;
10610 }
10611
10612 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
10613 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const10614 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
10615 // Currently all types are split in half.
10616 EVT LoVT, HiVT;
10617 if (!VT.isVector())
10618 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
10619 else
10620 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
10621
10622 return std::make_pair(LoVT, HiVT);
10623 }
10624
10625 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a
10626 /// type, dependent on an enveloping VT that has been split into two identical
10627 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size.
10628 std::pair<EVT, EVT>
GetDependentSplitDestVTs(const EVT & VT,const EVT & EnvVT,bool * HiIsEmpty) const10629 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
10630 bool *HiIsEmpty) const {
10631 EVT EltTp = VT.getVectorElementType();
10632 // Examples:
10633 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty)
10634 // custom VL=9 with enveloping VL=8/8 yields 8/1
10635 // custom VL=10 with enveloping VL=8/8 yields 8/2
10636 // etc.
10637 ElementCount VTNumElts = VT.getVectorElementCount();
10638 ElementCount EnvNumElts = EnvVT.getVectorElementCount();
10639 assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
10640 "Mixing fixed width and scalable vectors when enveloping a type");
10641 EVT LoVT, HiVT;
10642 if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
10643 LoVT = EnvVT;
10644 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
10645 *HiIsEmpty = false;
10646 } else {
10647 // Flag that hi type has zero storage size, but return split envelop type
10648 // (this would be easier if vector types with zero elements were allowed).
10649 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
10650 HiVT = EnvVT;
10651 *HiIsEmpty = true;
10652 }
10653 return std::make_pair(LoVT, HiVT);
10654 }
10655
10656 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
10657 /// low/high part.
10658 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)10659 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
10660 const EVT &HiVT) {
10661 assert(LoVT.isScalableVector() == HiVT.isScalableVector() &&
10662 LoVT.isScalableVector() == N.getValueType().isScalableVector() &&
10663 "Splitting vector with an invalid mixture of fixed and scalable "
10664 "vector types");
10665 assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <=
10666 N.getValueType().getVectorMinNumElements() &&
10667 "More vector elements requested than available!");
10668 SDValue Lo, Hi;
10669 Lo =
10670 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL));
10671 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements()
10672 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales
10673 // IDX with the runtime scaling factor of the result vector type. For
10674 // fixed-width result vectors, that runtime scaling factor is 1.
10675 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
10676 getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
10677 return std::make_pair(Lo, Hi);
10678 }
10679
10680 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
WidenVector(const SDValue & N,const SDLoc & DL)10681 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
10682 EVT VT = N.getValueType();
10683 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
10684 NextPowerOf2(VT.getVectorNumElements()));
10685 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N,
10686 getVectorIdxConstant(0, DL));
10687 }
10688
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count,EVT EltVT)10689 void SelectionDAG::ExtractVectorElements(SDValue Op,
10690 SmallVectorImpl<SDValue> &Args,
10691 unsigned Start, unsigned Count,
10692 EVT EltVT) {
10693 EVT VT = Op.getValueType();
10694 if (Count == 0)
10695 Count = VT.getVectorNumElements();
10696 if (EltVT == EVT())
10697 EltVT = VT.getVectorElementType();
10698 SDLoc SL(Op);
10699 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
10700 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op,
10701 getVectorIdxConstant(i, SL)));
10702 }
10703 }
10704
10705 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const10706 unsigned GlobalAddressSDNode::getAddressSpace() const {
10707 return getGlobal()->getType()->getAddressSpace();
10708 }
10709
getType() const10710 Type *ConstantPoolSDNode::getType() const {
10711 if (isMachineConstantPoolEntry())
10712 return Val.MachineCPVal->getType();
10713 return Val.ConstVal->getType();
10714 }
10715
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool IsBigEndian) const10716 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
10717 unsigned &SplatBitSize,
10718 bool &HasAnyUndefs,
10719 unsigned MinSplatBits,
10720 bool IsBigEndian) const {
10721 EVT VT = getValueType(0);
10722 assert(VT.isVector() && "Expected a vector type");
10723 unsigned VecWidth = VT.getSizeInBits();
10724 if (MinSplatBits > VecWidth)
10725 return false;
10726
10727 // FIXME: The widths are based on this node's type, but build vectors can
10728 // truncate their operands.
10729 SplatValue = APInt(VecWidth, 0);
10730 SplatUndef = APInt(VecWidth, 0);
10731
10732 // Get the bits. Bits with undefined values (when the corresponding element
10733 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
10734 // in SplatValue. If any of the values are not constant, give up and return
10735 // false.
10736 unsigned int NumOps = getNumOperands();
10737 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
10738 unsigned EltWidth = VT.getScalarSizeInBits();
10739
10740 for (unsigned j = 0; j < NumOps; ++j) {
10741 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
10742 SDValue OpVal = getOperand(i);
10743 unsigned BitPos = j * EltWidth;
10744
10745 if (OpVal.isUndef())
10746 SplatUndef.setBits(BitPos, BitPos + EltWidth);
10747 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
10748 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
10749 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
10750 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
10751 else
10752 return false;
10753 }
10754
10755 // The build_vector is all constants or undefs. Find the smallest element
10756 // size that splats the vector.
10757 HasAnyUndefs = (SplatUndef != 0);
10758
10759 // FIXME: This does not work for vectors with elements less than 8 bits.
10760 while (VecWidth > 8) {
10761 unsigned HalfSize = VecWidth / 2;
10762 APInt HighValue = SplatValue.extractBits(HalfSize, HalfSize);
10763 APInt LowValue = SplatValue.extractBits(HalfSize, 0);
10764 APInt HighUndef = SplatUndef.extractBits(HalfSize, HalfSize);
10765 APInt LowUndef = SplatUndef.extractBits(HalfSize, 0);
10766
10767 // If the two halves do not match (ignoring undef bits), stop here.
10768 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
10769 MinSplatBits > HalfSize)
10770 break;
10771
10772 SplatValue = HighValue | LowValue;
10773 SplatUndef = HighUndef & LowUndef;
10774
10775 VecWidth = HalfSize;
10776 }
10777
10778 SplatBitSize = VecWidth;
10779 return true;
10780 }
10781
getSplatValue(const APInt & DemandedElts,BitVector * UndefElements) const10782 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
10783 BitVector *UndefElements) const {
10784 unsigned NumOps = getNumOperands();
10785 if (UndefElements) {
10786 UndefElements->clear();
10787 UndefElements->resize(NumOps);
10788 }
10789 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
10790 if (!DemandedElts)
10791 return SDValue();
10792 SDValue Splatted;
10793 for (unsigned i = 0; i != NumOps; ++i) {
10794 if (!DemandedElts[i])
10795 continue;
10796 SDValue Op = getOperand(i);
10797 if (Op.isUndef()) {
10798 if (UndefElements)
10799 (*UndefElements)[i] = true;
10800 } else if (!Splatted) {
10801 Splatted = Op;
10802 } else if (Splatted != Op) {
10803 return SDValue();
10804 }
10805 }
10806
10807 if (!Splatted) {
10808 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros();
10809 assert(getOperand(FirstDemandedIdx).isUndef() &&
10810 "Can only have a splat without a constant for all undefs.");
10811 return getOperand(FirstDemandedIdx);
10812 }
10813
10814 return Splatted;
10815 }
10816
getSplatValue(BitVector * UndefElements) const10817 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
10818 APInt DemandedElts = APInt::getAllOnes(getNumOperands());
10819 return getSplatValue(DemandedElts, UndefElements);
10820 }
10821
getRepeatedSequence(const APInt & DemandedElts,SmallVectorImpl<SDValue> & Sequence,BitVector * UndefElements) const10822 bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
10823 SmallVectorImpl<SDValue> &Sequence,
10824 BitVector *UndefElements) const {
10825 unsigned NumOps = getNumOperands();
10826 Sequence.clear();
10827 if (UndefElements) {
10828 UndefElements->clear();
10829 UndefElements->resize(NumOps);
10830 }
10831 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
10832 if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
10833 return false;
10834
10835 // Set the undefs even if we don't find a sequence (like getSplatValue).
10836 if (UndefElements)
10837 for (unsigned I = 0; I != NumOps; ++I)
10838 if (DemandedElts[I] && getOperand(I).isUndef())
10839 (*UndefElements)[I] = true;
10840
10841 // Iteratively widen the sequence length looking for repetitions.
10842 for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
10843 Sequence.append(SeqLen, SDValue());
10844 for (unsigned I = 0; I != NumOps; ++I) {
10845 if (!DemandedElts[I])
10846 continue;
10847 SDValue &SeqOp = Sequence[I % SeqLen];
10848 SDValue Op = getOperand(I);
10849 if (Op.isUndef()) {
10850 if (!SeqOp)
10851 SeqOp = Op;
10852 continue;
10853 }
10854 if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
10855 Sequence.clear();
10856 break;
10857 }
10858 SeqOp = Op;
10859 }
10860 if (!Sequence.empty())
10861 return true;
10862 }
10863
10864 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
10865 return false;
10866 }
10867
getRepeatedSequence(SmallVectorImpl<SDValue> & Sequence,BitVector * UndefElements) const10868 bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
10869 BitVector *UndefElements) const {
10870 APInt DemandedElts = APInt::getAllOnes(getNumOperands());
10871 return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
10872 }
10873
10874 ConstantSDNode *
getConstantSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const10875 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
10876 BitVector *UndefElements) const {
10877 return dyn_cast_or_null<ConstantSDNode>(
10878 getSplatValue(DemandedElts, UndefElements));
10879 }
10880
10881 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const10882 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
10883 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
10884 }
10885
10886 ConstantFPSDNode *
getConstantFPSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const10887 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts,
10888 BitVector *UndefElements) const {
10889 return dyn_cast_or_null<ConstantFPSDNode>(
10890 getSplatValue(DemandedElts, UndefElements));
10891 }
10892
10893 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const10894 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
10895 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
10896 }
10897
10898 int32_t
getConstantFPSplatPow2ToLog2Int(BitVector * UndefElements,uint32_t BitWidth) const10899 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
10900 uint32_t BitWidth) const {
10901 if (ConstantFPSDNode *CN =
10902 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
10903 bool IsExact;
10904 APSInt IntVal(BitWidth);
10905 const APFloat &APF = CN->getValueAPF();
10906 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
10907 APFloat::opOK ||
10908 !IsExact)
10909 return -1;
10910
10911 return IntVal.exactLogBase2();
10912 }
10913 return -1;
10914 }
10915
isConstant() const10916 bool BuildVectorSDNode::isConstant() const {
10917 for (const SDValue &Op : op_values()) {
10918 unsigned Opc = Op.getOpcode();
10919 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
10920 return false;
10921 }
10922 return true;
10923 }
10924
isSplatMask(const int * Mask,EVT VT)10925 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
10926 // Find the first non-undef value in the shuffle mask.
10927 unsigned i, e;
10928 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
10929 /* search */;
10930
10931 // If all elements are undefined, this shuffle can be considered a splat
10932 // (although it should eventually get simplified away completely).
10933 if (i == e)
10934 return true;
10935
10936 // Make sure all remaining elements are either undef or the same as the first
10937 // non-undef value.
10938 for (int Idx = Mask[i]; i != e; ++i)
10939 if (Mask[i] >= 0 && Mask[i] != Idx)
10940 return false;
10941 return true;
10942 }
10943
10944 // Returns the SDNode if it is a constant integer BuildVector
10945 // or constant integer.
isConstantIntBuildVectorOrConstantInt(SDValue N) const10946 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
10947 if (isa<ConstantSDNode>(N))
10948 return N.getNode();
10949 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
10950 return N.getNode();
10951 // Treat a GlobalAddress supporting constant offset folding as a
10952 // constant integer.
10953 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
10954 if (GA->getOpcode() == ISD::GlobalAddress &&
10955 TLI->isOffsetFoldingLegal(GA))
10956 return GA;
10957 if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
10958 isa<ConstantSDNode>(N.getOperand(0)))
10959 return N.getNode();
10960 return nullptr;
10961 }
10962
10963 // Returns the SDNode if it is a constant float BuildVector
10964 // or constant float.
isConstantFPBuildVectorOrConstantFP(SDValue N) const10965 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
10966 if (isa<ConstantFPSDNode>(N))
10967 return N.getNode();
10968
10969 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
10970 return N.getNode();
10971
10972 return nullptr;
10973 }
10974
createOperands(SDNode * Node,ArrayRef<SDValue> Vals)10975 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
10976 assert(!Node->OperandList && "Node already has operands");
10977 assert(SDNode::getMaxNumOperands() >= Vals.size() &&
10978 "too many operands to fit into SDNode");
10979 SDUse *Ops = OperandRecycler.allocate(
10980 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
10981
10982 bool IsDivergent = false;
10983 for (unsigned I = 0; I != Vals.size(); ++I) {
10984 Ops[I].setUser(Node);
10985 Ops[I].setInitial(Vals[I]);
10986 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
10987 IsDivergent |= Ops[I].getNode()->isDivergent();
10988 }
10989 Node->NumOperands = Vals.size();
10990 Node->OperandList = Ops;
10991 if (!TLI->isSDNodeAlwaysUniform(Node)) {
10992 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
10993 Node->SDNodeBits.IsDivergent = IsDivergent;
10994 }
10995 checkForCycles(Node);
10996 }
10997
getTokenFactor(const SDLoc & DL,SmallVectorImpl<SDValue> & Vals)10998 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
10999 SmallVectorImpl<SDValue> &Vals) {
11000 size_t Limit = SDNode::getMaxNumOperands();
11001 while (Vals.size() > Limit) {
11002 unsigned SliceIdx = Vals.size() - Limit;
11003 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
11004 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
11005 Vals.erase(Vals.begin() + SliceIdx, Vals.end());
11006 Vals.emplace_back(NewTF);
11007 }
11008 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
11009 }
11010
getNeutralElement(unsigned Opcode,const SDLoc & DL,EVT VT,SDNodeFlags Flags)11011 SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL,
11012 EVT VT, SDNodeFlags Flags) {
11013 switch (Opcode) {
11014 default:
11015 return SDValue();
11016 case ISD::ADD:
11017 case ISD::OR:
11018 case ISD::XOR:
11019 case ISD::UMAX:
11020 return getConstant(0, DL, VT);
11021 case ISD::MUL:
11022 return getConstant(1, DL, VT);
11023 case ISD::AND:
11024 case ISD::UMIN:
11025 return getAllOnesConstant(DL, VT);
11026 case ISD::SMAX:
11027 return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT);
11028 case ISD::SMIN:
11029 return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT);
11030 case ISD::FADD:
11031 return getConstantFP(-0.0, DL, VT);
11032 case ISD::FMUL:
11033 return getConstantFP(1.0, DL, VT);
11034 case ISD::FMINNUM:
11035 case ISD::FMAXNUM: {
11036 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
11037 const fltSemantics &Semantics = EVTToAPFloatSemantics(VT);
11038 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
11039 !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
11040 APFloat::getLargest(Semantics);
11041 if (Opcode == ISD::FMAXNUM)
11042 NeutralAF.changeSign();
11043
11044 return getConstantFP(NeutralAF, DL, VT);
11045 }
11046 }
11047 }
11048
11049 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallPtrSetImpl<const SDNode * > & Checked,const llvm::SelectionDAG * DAG)11050 static void checkForCyclesHelper(const SDNode *N,
11051 SmallPtrSetImpl<const SDNode*> &Visited,
11052 SmallPtrSetImpl<const SDNode*> &Checked,
11053 const llvm::SelectionDAG *DAG) {
11054 // If this node has already been checked, don't check it again.
11055 if (Checked.count(N))
11056 return;
11057
11058 // If a node has already been visited on this depth-first walk, reject it as
11059 // a cycle.
11060 if (!Visited.insert(N).second) {
11061 errs() << "Detected cycle in SelectionDAG\n";
11062 dbgs() << "Offending node:\n";
11063 N->dumprFull(DAG); dbgs() << "\n";
11064 abort();
11065 }
11066
11067 for (const SDValue &Op : N->op_values())
11068 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
11069
11070 Checked.insert(N);
11071 Visited.erase(N);
11072 }
11073 #endif
11074
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)11075 void llvm::checkForCycles(const llvm::SDNode *N,
11076 const llvm::SelectionDAG *DAG,
11077 bool force) {
11078 #ifndef NDEBUG
11079 bool check = force;
11080 #ifdef EXPENSIVE_CHECKS
11081 check = true;
11082 #endif // EXPENSIVE_CHECKS
11083 if (check) {
11084 assert(N && "Checking nonexistent SDNode");
11085 SmallPtrSet<const SDNode*, 32> visited;
11086 SmallPtrSet<const SDNode*, 32> checked;
11087 checkForCyclesHelper(N, visited, checked, DAG);
11088 }
11089 #endif // !NDEBUG
11090 }
11091
checkForCycles(const llvm::SelectionDAG * DAG,bool force)11092 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
11093 checkForCycles(DAG->getRoot().getNode(), DAG, force);
11094 }
11095