1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAG class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/FoldingSet.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/Analysis/BlockFrequencyInfo.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/CodeGen/FunctionLoweringInfo.h"
32 #include "llvm/CodeGen/ISDOpcodes.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineConstantPool.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/RuntimeLibcalls.h"
39 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
40 #include "llvm/CodeGen/SelectionDAGNodes.h"
41 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
42 #include "llvm/CodeGen/TargetFrameLowering.h"
43 #include "llvm/CodeGen/TargetLowering.h"
44 #include "llvm/CodeGen/TargetRegisterInfo.h"
45 #include "llvm/CodeGen/TargetSubtargetInfo.h"
46 #include "llvm/CodeGen/ValueTypes.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DataLayout.h"
50 #include "llvm/IR/DebugInfoMetadata.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/GlobalValue.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CodeGen.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/KnownBits.h"
64 #include "llvm/Support/MachineValueType.h"
65 #include "llvm/Support/ManagedStatic.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/Mutex.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include "llvm/Target/TargetMachine.h"
70 #include "llvm/Target/TargetOptions.h"
71 #include "llvm/Transforms/Utils/SizeOpts.h"
72 #include <algorithm>
73 #include <cassert>
74 #include <cstdint>
75 #include <cstdlib>
76 #include <limits>
77 #include <set>
78 #include <string>
79 #include <utility>
80 #include <vector>
81
82 using namespace llvm;
83
84 /// makeVTList - Return an instance of the SDVTList struct initialized with the
85 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)86 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
87 SDVTList Res = {VTs, NumVTs};
88 return Res;
89 }
90
91 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)92 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)93 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
NodeInserted(SDNode *)94 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
95
anchor()96 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
97
98 #define DEBUG_TYPE "selectiondag"
99
100 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
101 cl::Hidden, cl::init(true),
102 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
103
104 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
105 cl::desc("Number limit for gluing ld/st of memcpy."),
106 cl::Hidden, cl::init(0));
107
NewSDValueDbgMsg(SDValue V,StringRef Msg,SelectionDAG * G)108 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
109 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
110 }
111
112 //===----------------------------------------------------------------------===//
113 // ConstantFPSDNode Class
114 //===----------------------------------------------------------------------===//
115
116 /// isExactlyValue - We don't rely on operator== working on double values, as
117 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
118 /// As such, this method can be used to do an exact bit-for-bit comparison of
119 /// two floating point values.
isExactlyValue(const APFloat & V) const120 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
121 return getValueAPF().bitwiseIsEqual(V);
122 }
123
isValueValidForType(EVT VT,const APFloat & Val)124 bool ConstantFPSDNode::isValueValidForType(EVT VT,
125 const APFloat& Val) {
126 assert(VT.isFloatingPoint() && "Can only convert between FP types");
127
128 // convert modifies in place, so make a copy.
129 APFloat Val2 = APFloat(Val);
130 bool losesInfo;
131 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
132 APFloat::rmNearestTiesToEven,
133 &losesInfo);
134 return !losesInfo;
135 }
136
137 //===----------------------------------------------------------------------===//
138 // ISD Namespace
139 //===----------------------------------------------------------------------===//
140
isConstantSplatVector(const SDNode * N,APInt & SplatVal)141 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
142 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
143 unsigned EltSize =
144 N->getValueType(0).getVectorElementType().getSizeInBits();
145 if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
146 SplatVal = Op0->getAPIntValue().truncOrSelf(EltSize);
147 return true;
148 }
149 if (auto *Op0 = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) {
150 SplatVal = Op0->getValueAPF().bitcastToAPInt().truncOrSelf(EltSize);
151 return true;
152 }
153 }
154
155 auto *BV = dyn_cast<BuildVectorSDNode>(N);
156 if (!BV)
157 return false;
158
159 APInt SplatUndef;
160 unsigned SplatBitSize;
161 bool HasUndefs;
162 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
163 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
164 EltSize) &&
165 EltSize == SplatBitSize;
166 }
167
168 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
169 // specializations of the more general isConstantSplatVector()?
170
isConstantSplatVectorAllOnes(const SDNode * N,bool BuildVectorOnly)171 bool ISD::isConstantSplatVectorAllOnes(const SDNode *N, bool BuildVectorOnly) {
172 // Look through a bit convert.
173 while (N->getOpcode() == ISD::BITCAST)
174 N = N->getOperand(0).getNode();
175
176 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
177 APInt SplatVal;
178 return isConstantSplatVector(N, SplatVal) && SplatVal.isAllOnesValue();
179 }
180
181 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
182
183 unsigned i = 0, e = N->getNumOperands();
184
185 // Skip over all of the undef values.
186 while (i != e && N->getOperand(i).isUndef())
187 ++i;
188
189 // Do not accept an all-undef vector.
190 if (i == e) return false;
191
192 // Do not accept build_vectors that aren't all constants or which have non-~0
193 // elements. We have to be a bit careful here, as the type of the constant
194 // may not be the same as the type of the vector elements due to type
195 // legalization (the elements are promoted to a legal type for the target and
196 // a vector of a type may be legal when the base element type is not).
197 // We only want to check enough bits to cover the vector elements, because
198 // we care if the resultant vector is all ones, not whether the individual
199 // constants are.
200 SDValue NotZero = N->getOperand(i);
201 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
202 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
203 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
204 return false;
205 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
206 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
207 return false;
208 } else
209 return false;
210
211 // Okay, we have at least one ~0 value, check to see if the rest match or are
212 // undefs. Even with the above element type twiddling, this should be OK, as
213 // the same type legalization should have applied to all the elements.
214 for (++i; i != e; ++i)
215 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
216 return false;
217 return true;
218 }
219
isConstantSplatVectorAllZeros(const SDNode * N,bool BuildVectorOnly)220 bool ISD::isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly) {
221 // Look through a bit convert.
222 while (N->getOpcode() == ISD::BITCAST)
223 N = N->getOperand(0).getNode();
224
225 if (!BuildVectorOnly && N->getOpcode() == ISD::SPLAT_VECTOR) {
226 APInt SplatVal;
227 return isConstantSplatVector(N, SplatVal) && SplatVal.isNullValue();
228 }
229
230 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
231
232 bool IsAllUndef = true;
233 for (const SDValue &Op : N->op_values()) {
234 if (Op.isUndef())
235 continue;
236 IsAllUndef = false;
237 // Do not accept build_vectors that aren't all constants or which have non-0
238 // elements. We have to be a bit careful here, as the type of the constant
239 // may not be the same as the type of the vector elements due to type
240 // legalization (the elements are promoted to a legal type for the target
241 // and a vector of a type may be legal when the base element type is not).
242 // We only want to check enough bits to cover the vector elements, because
243 // we care if the resultant vector is all zeros, not whether the individual
244 // constants are.
245 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
246 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
247 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
248 return false;
249 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
250 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
251 return false;
252 } else
253 return false;
254 }
255
256 // Do not accept an all-undef vector.
257 if (IsAllUndef)
258 return false;
259 return true;
260 }
261
isBuildVectorAllOnes(const SDNode * N)262 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
263 return isConstantSplatVectorAllOnes(N, /*BuildVectorOnly*/ true);
264 }
265
isBuildVectorAllZeros(const SDNode * N)266 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
267 return isConstantSplatVectorAllZeros(N, /*BuildVectorOnly*/ true);
268 }
269
isBuildVectorOfConstantSDNodes(const SDNode * N)270 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
271 if (N->getOpcode() != ISD::BUILD_VECTOR)
272 return false;
273
274 for (const SDValue &Op : N->op_values()) {
275 if (Op.isUndef())
276 continue;
277 if (!isa<ConstantSDNode>(Op))
278 return false;
279 }
280 return true;
281 }
282
isBuildVectorOfConstantFPSDNodes(const SDNode * N)283 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
284 if (N->getOpcode() != ISD::BUILD_VECTOR)
285 return false;
286
287 for (const SDValue &Op : N->op_values()) {
288 if (Op.isUndef())
289 continue;
290 if (!isa<ConstantFPSDNode>(Op))
291 return false;
292 }
293 return true;
294 }
295
allOperandsUndef(const SDNode * N)296 bool ISD::allOperandsUndef(const SDNode *N) {
297 // Return false if the node has no operands.
298 // This is "logically inconsistent" with the definition of "all" but
299 // is probably the desired behavior.
300 if (N->getNumOperands() == 0)
301 return false;
302 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
303 }
304
matchUnaryPredicate(SDValue Op,std::function<bool (ConstantSDNode *)> Match,bool AllowUndefs)305 bool ISD::matchUnaryPredicate(SDValue Op,
306 std::function<bool(ConstantSDNode *)> Match,
307 bool AllowUndefs) {
308 // FIXME: Add support for scalar UNDEF cases?
309 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
310 return Match(Cst);
311
312 // FIXME: Add support for vector UNDEF cases?
313 if (ISD::BUILD_VECTOR != Op.getOpcode() &&
314 ISD::SPLAT_VECTOR != Op.getOpcode())
315 return false;
316
317 EVT SVT = Op.getValueType().getScalarType();
318 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
319 if (AllowUndefs && Op.getOperand(i).isUndef()) {
320 if (!Match(nullptr))
321 return false;
322 continue;
323 }
324
325 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
326 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
327 return false;
328 }
329 return true;
330 }
331
matchBinaryPredicate(SDValue LHS,SDValue RHS,std::function<bool (ConstantSDNode *,ConstantSDNode *)> Match,bool AllowUndefs,bool AllowTypeMismatch)332 bool ISD::matchBinaryPredicate(
333 SDValue LHS, SDValue RHS,
334 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
335 bool AllowUndefs, bool AllowTypeMismatch) {
336 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
337 return false;
338
339 // TODO: Add support for scalar UNDEF cases?
340 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
341 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
342 return Match(LHSCst, RHSCst);
343
344 // TODO: Add support for vector UNDEF cases?
345 if (ISD::BUILD_VECTOR != LHS.getOpcode() ||
346 ISD::BUILD_VECTOR != RHS.getOpcode())
347 return false;
348
349 EVT SVT = LHS.getValueType().getScalarType();
350 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
351 SDValue LHSOp = LHS.getOperand(i);
352 SDValue RHSOp = RHS.getOperand(i);
353 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
354 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
355 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
356 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
357 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
358 return false;
359 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
360 LHSOp.getValueType() != RHSOp.getValueType()))
361 return false;
362 if (!Match(LHSCst, RHSCst))
363 return false;
364 }
365 return true;
366 }
367
getVecReduceBaseOpcode(unsigned VecReduceOpcode)368 ISD::NodeType ISD::getVecReduceBaseOpcode(unsigned VecReduceOpcode) {
369 switch (VecReduceOpcode) {
370 default:
371 llvm_unreachable("Expected VECREDUCE opcode");
372 case ISD::VECREDUCE_FADD:
373 case ISD::VECREDUCE_SEQ_FADD:
374 return ISD::FADD;
375 case ISD::VECREDUCE_FMUL:
376 case ISD::VECREDUCE_SEQ_FMUL:
377 return ISD::FMUL;
378 case ISD::VECREDUCE_ADD:
379 return ISD::ADD;
380 case ISD::VECREDUCE_MUL:
381 return ISD::MUL;
382 case ISD::VECREDUCE_AND:
383 return ISD::AND;
384 case ISD::VECREDUCE_OR:
385 return ISD::OR;
386 case ISD::VECREDUCE_XOR:
387 return ISD::XOR;
388 case ISD::VECREDUCE_SMAX:
389 return ISD::SMAX;
390 case ISD::VECREDUCE_SMIN:
391 return ISD::SMIN;
392 case ISD::VECREDUCE_UMAX:
393 return ISD::UMAX;
394 case ISD::VECREDUCE_UMIN:
395 return ISD::UMIN;
396 case ISD::VECREDUCE_FMAX:
397 return ISD::FMAXNUM;
398 case ISD::VECREDUCE_FMIN:
399 return ISD::FMINNUM;
400 }
401 }
402
isVPOpcode(unsigned Opcode)403 bool ISD::isVPOpcode(unsigned Opcode) {
404 switch (Opcode) {
405 default:
406 return false;
407 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
408 case ISD::SDOPC: \
409 return true;
410 #include "llvm/IR/VPIntrinsics.def"
411 }
412 }
413
414 /// The operand position of the vector mask.
getVPMaskIdx(unsigned Opcode)415 Optional<unsigned> ISD::getVPMaskIdx(unsigned Opcode) {
416 switch (Opcode) {
417 default:
418 return None;
419 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, ...) \
420 case ISD::SDOPC: \
421 return MASKPOS;
422 #include "llvm/IR/VPIntrinsics.def"
423 }
424 }
425
426 /// The operand position of the explicit vector length parameter.
getVPExplicitVectorLengthIdx(unsigned Opcode)427 Optional<unsigned> ISD::getVPExplicitVectorLengthIdx(unsigned Opcode) {
428 switch (Opcode) {
429 default:
430 return None;
431 #define BEGIN_REGISTER_VP_SDNODE(SDOPC, LEGALPOS, TDNAME, MASKPOS, EVLPOS) \
432 case ISD::SDOPC: \
433 return EVLPOS;
434 #include "llvm/IR/VPIntrinsics.def"
435 }
436 }
437
getExtForLoadExtType(bool IsFP,ISD::LoadExtType ExtType)438 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
439 switch (ExtType) {
440 case ISD::EXTLOAD:
441 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
442 case ISD::SEXTLOAD:
443 return ISD::SIGN_EXTEND;
444 case ISD::ZEXTLOAD:
445 return ISD::ZERO_EXTEND;
446 default:
447 break;
448 }
449
450 llvm_unreachable("Invalid LoadExtType");
451 }
452
getSetCCSwappedOperands(ISD::CondCode Operation)453 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
454 // To perform this operation, we just need to swap the L and G bits of the
455 // operation.
456 unsigned OldL = (Operation >> 2) & 1;
457 unsigned OldG = (Operation >> 1) & 1;
458 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
459 (OldL << 1) | // New G bit
460 (OldG << 2)); // New L bit.
461 }
462
getSetCCInverseImpl(ISD::CondCode Op,bool isIntegerLike)463 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
464 unsigned Operation = Op;
465 if (isIntegerLike)
466 Operation ^= 7; // Flip L, G, E bits, but not U.
467 else
468 Operation ^= 15; // Flip all of the condition bits.
469
470 if (Operation > ISD::SETTRUE2)
471 Operation &= ~8; // Don't let N and U bits get set.
472
473 return ISD::CondCode(Operation);
474 }
475
getSetCCInverse(ISD::CondCode Op,EVT Type)476 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
477 return getSetCCInverseImpl(Op, Type.isInteger());
478 }
479
getSetCCInverse(ISD::CondCode Op,bool isIntegerLike)480 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
481 bool isIntegerLike) {
482 return getSetCCInverseImpl(Op, isIntegerLike);
483 }
484
485 /// For an integer comparison, return 1 if the comparison is a signed operation
486 /// and 2 if the result is an unsigned comparison. Return zero if the operation
487 /// does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)488 static int isSignedOp(ISD::CondCode Opcode) {
489 switch (Opcode) {
490 default: llvm_unreachable("Illegal integer setcc operation!");
491 case ISD::SETEQ:
492 case ISD::SETNE: return 0;
493 case ISD::SETLT:
494 case ISD::SETLE:
495 case ISD::SETGT:
496 case ISD::SETGE: return 1;
497 case ISD::SETULT:
498 case ISD::SETULE:
499 case ISD::SETUGT:
500 case ISD::SETUGE: return 2;
501 }
502 }
503
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)504 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
505 EVT Type) {
506 bool IsInteger = Type.isInteger();
507 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
508 // Cannot fold a signed integer setcc with an unsigned integer setcc.
509 return ISD::SETCC_INVALID;
510
511 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
512
513 // If the N and U bits get set, then the resultant comparison DOES suddenly
514 // care about orderedness, and it is true when ordered.
515 if (Op > ISD::SETTRUE2)
516 Op &= ~16; // Clear the U bit if the N bit is set.
517
518 // Canonicalize illegal integer setcc's.
519 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
520 Op = ISD::SETNE;
521
522 return ISD::CondCode(Op);
523 }
524
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)525 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
526 EVT Type) {
527 bool IsInteger = Type.isInteger();
528 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
529 // Cannot fold a signed setcc with an unsigned setcc.
530 return ISD::SETCC_INVALID;
531
532 // Combine all of the condition bits.
533 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
534
535 // Canonicalize illegal integer setcc's.
536 if (IsInteger) {
537 switch (Result) {
538 default: break;
539 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
540 case ISD::SETOEQ: // SETEQ & SETU[LG]E
541 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
542 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
543 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
544 }
545 }
546
547 return Result;
548 }
549
550 //===----------------------------------------------------------------------===//
551 // SDNode Profile Support
552 //===----------------------------------------------------------------------===//
553
554 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)555 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
556 ID.AddInteger(OpC);
557 }
558
559 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
560 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)561 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
562 ID.AddPointer(VTList.VTs);
563 }
564
565 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)566 static void AddNodeIDOperands(FoldingSetNodeID &ID,
567 ArrayRef<SDValue> Ops) {
568 for (auto& Op : Ops) {
569 ID.AddPointer(Op.getNode());
570 ID.AddInteger(Op.getResNo());
571 }
572 }
573
574 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)575 static void AddNodeIDOperands(FoldingSetNodeID &ID,
576 ArrayRef<SDUse> Ops) {
577 for (auto& Op : Ops) {
578 ID.AddPointer(Op.getNode());
579 ID.AddInteger(Op.getResNo());
580 }
581 }
582
AddNodeIDNode(FoldingSetNodeID & ID,unsigned short OpC,SDVTList VTList,ArrayRef<SDValue> OpList)583 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
584 SDVTList VTList, ArrayRef<SDValue> OpList) {
585 AddNodeIDOpcode(ID, OpC);
586 AddNodeIDValueTypes(ID, VTList);
587 AddNodeIDOperands(ID, OpList);
588 }
589
590 /// If this is an SDNode with special info, add this info to the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)591 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
592 switch (N->getOpcode()) {
593 case ISD::TargetExternalSymbol:
594 case ISD::ExternalSymbol:
595 case ISD::MCSymbol:
596 llvm_unreachable("Should only be used on nodes with operands");
597 default: break; // Normal nodes don't need extra info.
598 case ISD::TargetConstant:
599 case ISD::Constant: {
600 const ConstantSDNode *C = cast<ConstantSDNode>(N);
601 ID.AddPointer(C->getConstantIntValue());
602 ID.AddBoolean(C->isOpaque());
603 break;
604 }
605 case ISD::TargetConstantFP:
606 case ISD::ConstantFP:
607 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
608 break;
609 case ISD::TargetGlobalAddress:
610 case ISD::GlobalAddress:
611 case ISD::TargetGlobalTLSAddress:
612 case ISD::GlobalTLSAddress: {
613 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
614 ID.AddPointer(GA->getGlobal());
615 ID.AddInteger(GA->getOffset());
616 ID.AddInteger(GA->getTargetFlags());
617 break;
618 }
619 case ISD::BasicBlock:
620 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
621 break;
622 case ISD::Register:
623 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
624 break;
625 case ISD::RegisterMask:
626 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
627 break;
628 case ISD::SRCVALUE:
629 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
630 break;
631 case ISD::FrameIndex:
632 case ISD::TargetFrameIndex:
633 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
634 break;
635 case ISD::LIFETIME_START:
636 case ISD::LIFETIME_END:
637 if (cast<LifetimeSDNode>(N)->hasOffset()) {
638 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
639 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
640 }
641 break;
642 case ISD::PSEUDO_PROBE:
643 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getGuid());
644 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getIndex());
645 ID.AddInteger(cast<PseudoProbeSDNode>(N)->getAttributes());
646 break;
647 case ISD::JumpTable:
648 case ISD::TargetJumpTable:
649 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
650 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
651 break;
652 case ISD::ConstantPool:
653 case ISD::TargetConstantPool: {
654 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
655 ID.AddInteger(CP->getAlign().value());
656 ID.AddInteger(CP->getOffset());
657 if (CP->isMachineConstantPoolEntry())
658 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
659 else
660 ID.AddPointer(CP->getConstVal());
661 ID.AddInteger(CP->getTargetFlags());
662 break;
663 }
664 case ISD::TargetIndex: {
665 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
666 ID.AddInteger(TI->getIndex());
667 ID.AddInteger(TI->getOffset());
668 ID.AddInteger(TI->getTargetFlags());
669 break;
670 }
671 case ISD::LOAD: {
672 const LoadSDNode *LD = cast<LoadSDNode>(N);
673 ID.AddInteger(LD->getMemoryVT().getRawBits());
674 ID.AddInteger(LD->getRawSubclassData());
675 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
676 break;
677 }
678 case ISD::STORE: {
679 const StoreSDNode *ST = cast<StoreSDNode>(N);
680 ID.AddInteger(ST->getMemoryVT().getRawBits());
681 ID.AddInteger(ST->getRawSubclassData());
682 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
683 break;
684 }
685 case ISD::MLOAD: {
686 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
687 ID.AddInteger(MLD->getMemoryVT().getRawBits());
688 ID.AddInteger(MLD->getRawSubclassData());
689 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
690 break;
691 }
692 case ISD::MSTORE: {
693 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
694 ID.AddInteger(MST->getMemoryVT().getRawBits());
695 ID.AddInteger(MST->getRawSubclassData());
696 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
697 break;
698 }
699 case ISD::MGATHER: {
700 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
701 ID.AddInteger(MG->getMemoryVT().getRawBits());
702 ID.AddInteger(MG->getRawSubclassData());
703 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
704 break;
705 }
706 case ISD::MSCATTER: {
707 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
708 ID.AddInteger(MS->getMemoryVT().getRawBits());
709 ID.AddInteger(MS->getRawSubclassData());
710 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
711 break;
712 }
713 case ISD::ATOMIC_CMP_SWAP:
714 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
715 case ISD::ATOMIC_SWAP:
716 case ISD::ATOMIC_LOAD_ADD:
717 case ISD::ATOMIC_LOAD_SUB:
718 case ISD::ATOMIC_LOAD_AND:
719 case ISD::ATOMIC_LOAD_CLR:
720 case ISD::ATOMIC_LOAD_OR:
721 case ISD::ATOMIC_LOAD_XOR:
722 case ISD::ATOMIC_LOAD_NAND:
723 case ISD::ATOMIC_LOAD_MIN:
724 case ISD::ATOMIC_LOAD_MAX:
725 case ISD::ATOMIC_LOAD_UMIN:
726 case ISD::ATOMIC_LOAD_UMAX:
727 case ISD::ATOMIC_LOAD:
728 case ISD::ATOMIC_STORE: {
729 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
730 ID.AddInteger(AT->getMemoryVT().getRawBits());
731 ID.AddInteger(AT->getRawSubclassData());
732 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
733 break;
734 }
735 case ISD::PREFETCH: {
736 const MemSDNode *PF = cast<MemSDNode>(N);
737 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
738 break;
739 }
740 case ISD::VECTOR_SHUFFLE: {
741 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
742 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
743 i != e; ++i)
744 ID.AddInteger(SVN->getMaskElt(i));
745 break;
746 }
747 case ISD::TargetBlockAddress:
748 case ISD::BlockAddress: {
749 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
750 ID.AddPointer(BA->getBlockAddress());
751 ID.AddInteger(BA->getOffset());
752 ID.AddInteger(BA->getTargetFlags());
753 break;
754 }
755 } // end switch (N->getOpcode())
756
757 // Target specific memory nodes could also have address spaces to check.
758 if (N->isTargetMemoryOpcode())
759 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
760 }
761
762 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
763 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)764 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
765 AddNodeIDOpcode(ID, N->getOpcode());
766 // Add the return value info.
767 AddNodeIDValueTypes(ID, N->getVTList());
768 // Add the operand info.
769 AddNodeIDOperands(ID, N->ops());
770
771 // Handle SDNode leafs with special info.
772 AddNodeIDCustom(ID, N);
773 }
774
775 //===----------------------------------------------------------------------===//
776 // SelectionDAG Class
777 //===----------------------------------------------------------------------===//
778
779 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)780 static bool doNotCSE(SDNode *N) {
781 if (N->getValueType(0) == MVT::Glue)
782 return true; // Never CSE anything that produces a flag.
783
784 switch (N->getOpcode()) {
785 default: break;
786 case ISD::HANDLENODE:
787 case ISD::EH_LABEL:
788 return true; // Never CSE these nodes.
789 }
790
791 // Check that remaining values produced are not flags.
792 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
793 if (N->getValueType(i) == MVT::Glue)
794 return true; // Never CSE anything that produces a flag.
795
796 return false;
797 }
798
799 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
800 /// SelectionDAG.
RemoveDeadNodes()801 void SelectionDAG::RemoveDeadNodes() {
802 // Create a dummy node (which is not added to allnodes), that adds a reference
803 // to the root node, preventing it from being deleted.
804 HandleSDNode Dummy(getRoot());
805
806 SmallVector<SDNode*, 128> DeadNodes;
807
808 // Add all obviously-dead nodes to the DeadNodes worklist.
809 for (SDNode &Node : allnodes())
810 if (Node.use_empty())
811 DeadNodes.push_back(&Node);
812
813 RemoveDeadNodes(DeadNodes);
814
815 // If the root changed (e.g. it was a dead load, update the root).
816 setRoot(Dummy.getValue());
817 }
818
819 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
820 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)821 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
822
823 // Process the worklist, deleting the nodes and adding their uses to the
824 // worklist.
825 while (!DeadNodes.empty()) {
826 SDNode *N = DeadNodes.pop_back_val();
827 // Skip to next node if we've already managed to delete the node. This could
828 // happen if replacing a node causes a node previously added to the node to
829 // be deleted.
830 if (N->getOpcode() == ISD::DELETED_NODE)
831 continue;
832
833 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
834 DUL->NodeDeleted(N, nullptr);
835
836 // Take the node out of the appropriate CSE map.
837 RemoveNodeFromCSEMaps(N);
838
839 // Next, brutally remove the operand list. This is safe to do, as there are
840 // no cycles in the graph.
841 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
842 SDUse &Use = *I++;
843 SDNode *Operand = Use.getNode();
844 Use.set(SDValue());
845
846 // Now that we removed this operand, see if there are no uses of it left.
847 if (Operand->use_empty())
848 DeadNodes.push_back(Operand);
849 }
850
851 DeallocateNode(N);
852 }
853 }
854
RemoveDeadNode(SDNode * N)855 void SelectionDAG::RemoveDeadNode(SDNode *N){
856 SmallVector<SDNode*, 16> DeadNodes(1, N);
857
858 // Create a dummy node that adds a reference to the root node, preventing
859 // it from being deleted. (This matters if the root is an operand of the
860 // dead node.)
861 HandleSDNode Dummy(getRoot());
862
863 RemoveDeadNodes(DeadNodes);
864 }
865
DeleteNode(SDNode * N)866 void SelectionDAG::DeleteNode(SDNode *N) {
867 // First take this out of the appropriate CSE map.
868 RemoveNodeFromCSEMaps(N);
869
870 // Finally, remove uses due to operands of this node, remove from the
871 // AllNodes list, and delete the node.
872 DeleteNodeNotInCSEMaps(N);
873 }
874
DeleteNodeNotInCSEMaps(SDNode * N)875 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
876 assert(N->getIterator() != AllNodes.begin() &&
877 "Cannot delete the entry node!");
878 assert(N->use_empty() && "Cannot delete a node that is not dead!");
879
880 // Drop all of the operands and decrement used node's use counts.
881 N->DropOperands();
882
883 DeallocateNode(N);
884 }
885
add(SDDbgValue * V,bool isParameter)886 void SDDbgInfo::add(SDDbgValue *V, bool isParameter) {
887 assert(!(V->isVariadic() && isParameter));
888 if (isParameter)
889 ByvalParmDbgValues.push_back(V);
890 else
891 DbgValues.push_back(V);
892 for (const SDNode *Node : V->getSDNodes())
893 if (Node)
894 DbgValMap[Node].push_back(V);
895 }
896
erase(const SDNode * Node)897 void SDDbgInfo::erase(const SDNode *Node) {
898 DbgValMapType::iterator I = DbgValMap.find(Node);
899 if (I == DbgValMap.end())
900 return;
901 for (auto &Val: I->second)
902 Val->setIsInvalidated();
903 DbgValMap.erase(I);
904 }
905
DeallocateNode(SDNode * N)906 void SelectionDAG::DeallocateNode(SDNode *N) {
907 // If we have operands, deallocate them.
908 removeOperands(N);
909
910 NodeAllocator.Deallocate(AllNodes.remove(N));
911
912 // Set the opcode to DELETED_NODE to help catch bugs when node
913 // memory is reallocated.
914 // FIXME: There are places in SDag that have grown a dependency on the opcode
915 // value in the released node.
916 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
917 N->NodeType = ISD::DELETED_NODE;
918
919 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
920 // them and forget about that node.
921 DbgInfo->erase(N);
922 }
923
924 #ifndef NDEBUG
925 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
VerifySDNode(SDNode * N)926 static void VerifySDNode(SDNode *N) {
927 switch (N->getOpcode()) {
928 default:
929 break;
930 case ISD::BUILD_PAIR: {
931 EVT VT = N->getValueType(0);
932 assert(N->getNumValues() == 1 && "Too many results!");
933 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
934 "Wrong return type!");
935 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
936 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
937 "Mismatched operand types!");
938 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
939 "Wrong operand type!");
940 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
941 "Wrong return type size");
942 break;
943 }
944 case ISD::BUILD_VECTOR: {
945 assert(N->getNumValues() == 1 && "Too many results!");
946 assert(N->getValueType(0).isVector() && "Wrong return type!");
947 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
948 "Wrong number of operands!");
949 EVT EltVT = N->getValueType(0).getVectorElementType();
950 for (const SDUse &Op : N->ops()) {
951 assert((Op.getValueType() == EltVT ||
952 (EltVT.isInteger() && Op.getValueType().isInteger() &&
953 EltVT.bitsLE(Op.getValueType()))) &&
954 "Wrong operand type!");
955 assert(Op.getValueType() == N->getOperand(0).getValueType() &&
956 "Operands must all have the same type");
957 }
958 break;
959 }
960 }
961 }
962 #endif // NDEBUG
963
964 /// Insert a newly allocated node into the DAG.
965 ///
966 /// Handles insertion into the all nodes list and CSE map, as well as
967 /// verification and other common operations when a new node is allocated.
InsertNode(SDNode * N)968 void SelectionDAG::InsertNode(SDNode *N) {
969 AllNodes.push_back(N);
970 #ifndef NDEBUG
971 N->PersistentId = NextPersistentId++;
972 VerifySDNode(N);
973 #endif
974 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
975 DUL->NodeInserted(N);
976 }
977
978 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
979 /// correspond to it. This is useful when we're about to delete or repurpose
980 /// the node. We don't want future request for structurally identical nodes
981 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)982 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
983 bool Erased = false;
984 switch (N->getOpcode()) {
985 case ISD::HANDLENODE: return false; // noop.
986 case ISD::CONDCODE:
987 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
988 "Cond code doesn't exist!");
989 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
990 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
991 break;
992 case ISD::ExternalSymbol:
993 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
994 break;
995 case ISD::TargetExternalSymbol: {
996 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
997 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
998 ESN->getSymbol(), ESN->getTargetFlags()));
999 break;
1000 }
1001 case ISD::MCSymbol: {
1002 auto *MCSN = cast<MCSymbolSDNode>(N);
1003 Erased = MCSymbols.erase(MCSN->getMCSymbol());
1004 break;
1005 }
1006 case ISD::VALUETYPE: {
1007 EVT VT = cast<VTSDNode>(N)->getVT();
1008 if (VT.isExtended()) {
1009 Erased = ExtendedValueTypeNodes.erase(VT);
1010 } else {
1011 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
1012 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
1013 }
1014 break;
1015 }
1016 default:
1017 // Remove it from the CSE Map.
1018 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
1019 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
1020 Erased = CSEMap.RemoveNode(N);
1021 break;
1022 }
1023 #ifndef NDEBUG
1024 // Verify that the node was actually in one of the CSE maps, unless it has a
1025 // flag result (which cannot be CSE'd) or is one of the special cases that are
1026 // not subject to CSE.
1027 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
1028 !N->isMachineOpcode() && !doNotCSE(N)) {
1029 N->dump(this);
1030 dbgs() << "\n";
1031 llvm_unreachable("Node is not in map!");
1032 }
1033 #endif
1034 return Erased;
1035 }
1036
1037 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
1038 /// maps and modified in place. Add it back to the CSE maps, unless an identical
1039 /// node already exists, in which case transfer all its users to the existing
1040 /// node. This transfer can potentially trigger recursive merging.
1041 void
AddModifiedNodeToCSEMaps(SDNode * N)1042 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
1043 // For node types that aren't CSE'd, just act as if no identical node
1044 // already exists.
1045 if (!doNotCSE(N)) {
1046 SDNode *Existing = CSEMap.GetOrInsertNode(N);
1047 if (Existing != N) {
1048 // If there was already an existing matching node, use ReplaceAllUsesWith
1049 // to replace the dead one with the existing one. This can cause
1050 // recursive merging of other unrelated nodes down the line.
1051 ReplaceAllUsesWith(N, Existing);
1052
1053 // N is now dead. Inform the listeners and delete it.
1054 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1055 DUL->NodeDeleted(N, Existing);
1056 DeleteNodeNotInCSEMaps(N);
1057 return;
1058 }
1059 }
1060
1061 // If the node doesn't already exist, we updated it. Inform listeners.
1062 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
1063 DUL->NodeUpdated(N);
1064 }
1065
1066 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1067 /// were replaced with those specified. If this node is never memoized,
1068 /// return null, otherwise return a pointer to the slot it would take. If a
1069 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)1070 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
1071 void *&InsertPos) {
1072 if (doNotCSE(N))
1073 return nullptr;
1074
1075 SDValue Ops[] = { Op };
1076 FoldingSetNodeID ID;
1077 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1078 AddNodeIDCustom(ID, N);
1079 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1080 if (Node)
1081 Node->intersectFlagsWith(N->getFlags());
1082 return Node;
1083 }
1084
1085 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1086 /// were replaced with those specified. If this node is never memoized,
1087 /// return null, otherwise return a pointer to the slot it would take. If a
1088 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)1089 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
1090 SDValue Op1, SDValue Op2,
1091 void *&InsertPos) {
1092 if (doNotCSE(N))
1093 return nullptr;
1094
1095 SDValue Ops[] = { Op1, Op2 };
1096 FoldingSetNodeID ID;
1097 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1098 AddNodeIDCustom(ID, N);
1099 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1100 if (Node)
1101 Node->intersectFlagsWith(N->getFlags());
1102 return Node;
1103 }
1104
1105 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
1106 /// were replaced with those specified. If this node is never memoized,
1107 /// return null, otherwise return a pointer to the slot it would take. If a
1108 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)1109 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
1110 void *&InsertPos) {
1111 if (doNotCSE(N))
1112 return nullptr;
1113
1114 FoldingSetNodeID ID;
1115 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
1116 AddNodeIDCustom(ID, N);
1117 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
1118 if (Node)
1119 Node->intersectFlagsWith(N->getFlags());
1120 return Node;
1121 }
1122
getEVTAlign(EVT VT) const1123 Align SelectionDAG::getEVTAlign(EVT VT) const {
1124 Type *Ty = VT == MVT::iPTR ?
1125 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
1126 VT.getTypeForEVT(*getContext());
1127
1128 return getDataLayout().getABITypeAlign(Ty);
1129 }
1130
1131 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOpt::Level OL)1132 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
1133 : TM(tm), OptLevel(OL),
1134 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1135 Root(getEntryNode()) {
1136 InsertNode(&EntryNode);
1137 DbgInfo = new SDDbgInfo();
1138 }
1139
init(MachineFunction & NewMF,OptimizationRemarkEmitter & NewORE,Pass * PassPtr,const TargetLibraryInfo * LibraryInfo,LegacyDivergenceAnalysis * Divergence,ProfileSummaryInfo * PSIin,BlockFrequencyInfo * BFIin)1140 void SelectionDAG::init(MachineFunction &NewMF,
1141 OptimizationRemarkEmitter &NewORE,
1142 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1143 LegacyDivergenceAnalysis * Divergence,
1144 ProfileSummaryInfo *PSIin,
1145 BlockFrequencyInfo *BFIin) {
1146 MF = &NewMF;
1147 SDAGISelPass = PassPtr;
1148 ORE = &NewORE;
1149 TLI = getSubtarget().getTargetLowering();
1150 TSI = getSubtarget().getSelectionDAGInfo();
1151 LibInfo = LibraryInfo;
1152 Context = &MF->getFunction().getContext();
1153 DA = Divergence;
1154 PSI = PSIin;
1155 BFI = BFIin;
1156 }
1157
~SelectionDAG()1158 SelectionDAG::~SelectionDAG() {
1159 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1160 allnodes_clear();
1161 OperandRecycler.clear(OperandAllocator);
1162 delete DbgInfo;
1163 }
1164
shouldOptForSize() const1165 bool SelectionDAG::shouldOptForSize() const {
1166 return MF->getFunction().hasOptSize() ||
1167 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1168 }
1169
allnodes_clear()1170 void SelectionDAG::allnodes_clear() {
1171 assert(&*AllNodes.begin() == &EntryNode);
1172 AllNodes.remove(AllNodes.begin());
1173 while (!AllNodes.empty())
1174 DeallocateNode(&AllNodes.front());
1175 #ifndef NDEBUG
1176 NextPersistentId = 0;
1177 #endif
1178 }
1179
FindNodeOrInsertPos(const FoldingSetNodeID & ID,void * & InsertPos)1180 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1181 void *&InsertPos) {
1182 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1183 if (N) {
1184 switch (N->getOpcode()) {
1185 default: break;
1186 case ISD::Constant:
1187 case ISD::ConstantFP:
1188 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1189 "debug location. Use another overload.");
1190 }
1191 }
1192 return N;
1193 }
1194
FindNodeOrInsertPos(const FoldingSetNodeID & ID,const SDLoc & DL,void * & InsertPos)1195 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1196 const SDLoc &DL, void *&InsertPos) {
1197 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1198 if (N) {
1199 switch (N->getOpcode()) {
1200 case ISD::Constant:
1201 case ISD::ConstantFP:
1202 // Erase debug location from the node if the node is used at several
1203 // different places. Do not propagate one location to all uses as it
1204 // will cause a worse single stepping debugging experience.
1205 if (N->getDebugLoc() != DL.getDebugLoc())
1206 N->setDebugLoc(DebugLoc());
1207 break;
1208 default:
1209 // When the node's point of use is located earlier in the instruction
1210 // sequence than its prior point of use, update its debug info to the
1211 // earlier location.
1212 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1213 N->setDebugLoc(DL.getDebugLoc());
1214 break;
1215 }
1216 }
1217 return N;
1218 }
1219
clear()1220 void SelectionDAG::clear() {
1221 allnodes_clear();
1222 OperandRecycler.clear(OperandAllocator);
1223 OperandAllocator.Reset();
1224 CSEMap.clear();
1225
1226 ExtendedValueTypeNodes.clear();
1227 ExternalSymbols.clear();
1228 TargetExternalSymbols.clear();
1229 MCSymbols.clear();
1230 SDCallSiteDbgInfo.clear();
1231 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1232 static_cast<CondCodeSDNode*>(nullptr));
1233 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1234 static_cast<SDNode*>(nullptr));
1235
1236 EntryNode.UseList = nullptr;
1237 InsertNode(&EntryNode);
1238 Root = getEntryNode();
1239 DbgInfo->clear();
1240 }
1241
getFPExtendOrRound(SDValue Op,const SDLoc & DL,EVT VT)1242 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1243 return VT.bitsGT(Op.getValueType())
1244 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1245 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1246 }
1247
1248 std::pair<SDValue, SDValue>
getStrictFPExtendOrRound(SDValue Op,SDValue Chain,const SDLoc & DL,EVT VT)1249 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1250 const SDLoc &DL, EVT VT) {
1251 assert(!VT.bitsEq(Op.getValueType()) &&
1252 "Strict no-op FP extend/round not allowed.");
1253 SDValue Res =
1254 VT.bitsGT(Op.getValueType())
1255 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1256 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1257 {Chain, Op, getIntPtrConstant(0, DL)});
1258
1259 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1260 }
1261
getAnyExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1262 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1263 return VT.bitsGT(Op.getValueType()) ?
1264 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1265 getNode(ISD::TRUNCATE, DL, VT, Op);
1266 }
1267
getSExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1268 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1269 return VT.bitsGT(Op.getValueType()) ?
1270 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1271 getNode(ISD::TRUNCATE, DL, VT, Op);
1272 }
1273
getZExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1274 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1275 return VT.bitsGT(Op.getValueType()) ?
1276 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1277 getNode(ISD::TRUNCATE, DL, VT, Op);
1278 }
1279
getBoolExtOrTrunc(SDValue Op,const SDLoc & SL,EVT VT,EVT OpVT)1280 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1281 EVT OpVT) {
1282 if (VT.bitsLE(Op.getValueType()))
1283 return getNode(ISD::TRUNCATE, SL, VT, Op);
1284
1285 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1286 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1287 }
1288
getZeroExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1289 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1290 EVT OpVT = Op.getValueType();
1291 assert(VT.isInteger() && OpVT.isInteger() &&
1292 "Cannot getZeroExtendInReg FP types");
1293 assert(VT.isVector() == OpVT.isVector() &&
1294 "getZeroExtendInReg type should be vector iff the operand "
1295 "type is vector!");
1296 assert((!VT.isVector() ||
1297 VT.getVectorElementCount() == OpVT.getVectorElementCount()) &&
1298 "Vector element counts must match in getZeroExtendInReg");
1299 assert(VT.bitsLE(OpVT) && "Not extending!");
1300 if (OpVT == VT)
1301 return Op;
1302 APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
1303 VT.getScalarSizeInBits());
1304 return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
1305 }
1306
getPtrExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1307 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1308 // Only unsigned pointer semantics are supported right now. In the future this
1309 // might delegate to TLI to check pointer signedness.
1310 return getZExtOrTrunc(Op, DL, VT);
1311 }
1312
getPtrExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1313 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1314 // Only unsigned pointer semantics are supported right now. In the future this
1315 // might delegate to TLI to check pointer signedness.
1316 return getZeroExtendInReg(Op, DL, VT);
1317 }
1318
1319 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
getNOT(const SDLoc & DL,SDValue Val,EVT VT)1320 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1321 EVT EltVT = VT.getScalarType();
1322 SDValue NegOne =
1323 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1324 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1325 }
1326
getLogicalNOT(const SDLoc & DL,SDValue Val,EVT VT)1327 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1328 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1329 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1330 }
1331
getBoolConstant(bool V,const SDLoc & DL,EVT VT,EVT OpVT)1332 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1333 EVT OpVT) {
1334 if (!V)
1335 return getConstant(0, DL, VT);
1336
1337 switch (TLI->getBooleanContents(OpVT)) {
1338 case TargetLowering::ZeroOrOneBooleanContent:
1339 case TargetLowering::UndefinedBooleanContent:
1340 return getConstant(1, DL, VT);
1341 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1342 return getAllOnesConstant(DL, VT);
1343 }
1344 llvm_unreachable("Unexpected boolean content enum!");
1345 }
1346
getConstant(uint64_t Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1347 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1348 bool isT, bool isO) {
1349 EVT EltVT = VT.getScalarType();
1350 assert((EltVT.getSizeInBits() >= 64 ||
1351 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1352 "getConstant with a uint64_t value that doesn't fit in the type!");
1353 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1354 }
1355
getConstant(const APInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1356 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1357 bool isT, bool isO) {
1358 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1359 }
1360
getConstant(const ConstantInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1361 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1362 EVT VT, bool isT, bool isO) {
1363 assert(VT.isInteger() && "Cannot create FP integer constant!");
1364
1365 EVT EltVT = VT.getScalarType();
1366 const ConstantInt *Elt = &Val;
1367
1368 // In some cases the vector type is legal but the element type is illegal and
1369 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1370 // inserted value (the type does not need to match the vector element type).
1371 // Any extra bits introduced will be truncated away.
1372 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1373 TargetLowering::TypePromoteInteger) {
1374 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1375 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1376 Elt = ConstantInt::get(*getContext(), NewVal);
1377 }
1378 // In other cases the element type is illegal and needs to be expanded, for
1379 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1380 // the value into n parts and use a vector type with n-times the elements.
1381 // Then bitcast to the type requested.
1382 // Legalizing constants too early makes the DAGCombiner's job harder so we
1383 // only legalize if the DAG tells us we must produce legal types.
1384 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1385 TLI->getTypeAction(*getContext(), EltVT) ==
1386 TargetLowering::TypeExpandInteger) {
1387 const APInt &NewVal = Elt->getValue();
1388 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1389 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1390
1391 // For scalable vectors, try to use a SPLAT_VECTOR_PARTS node.
1392 if (VT.isScalableVector()) {
1393 assert(EltVT.getSizeInBits() % ViaEltSizeInBits == 0 &&
1394 "Can only handle an even split!");
1395 unsigned Parts = EltVT.getSizeInBits() / ViaEltSizeInBits;
1396
1397 SmallVector<SDValue, 2> ScalarParts;
1398 for (unsigned i = 0; i != Parts; ++i)
1399 ScalarParts.push_back(getConstant(
1400 NewVal.lshr(i * ViaEltSizeInBits).trunc(ViaEltSizeInBits), DL,
1401 ViaEltVT, isT, isO));
1402
1403 return getNode(ISD::SPLAT_VECTOR_PARTS, DL, VT, ScalarParts);
1404 }
1405
1406 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1407 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1408
1409 // Check the temporary vector is the correct size. If this fails then
1410 // getTypeToTransformTo() probably returned a type whose size (in bits)
1411 // isn't a power-of-2 factor of the requested type size.
1412 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1413
1414 SmallVector<SDValue, 2> EltParts;
1415 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1416 EltParts.push_back(getConstant(
1417 NewVal.lshr(i * ViaEltSizeInBits).zextOrTrunc(ViaEltSizeInBits), DL,
1418 ViaEltVT, isT, isO));
1419 }
1420
1421 // EltParts is currently in little endian order. If we actually want
1422 // big-endian order then reverse it now.
1423 if (getDataLayout().isBigEndian())
1424 std::reverse(EltParts.begin(), EltParts.end());
1425
1426 // The elements must be reversed when the element order is different
1427 // to the endianness of the elements (because the BITCAST is itself a
1428 // vector shuffle in this situation). However, we do not need any code to
1429 // perform this reversal because getConstant() is producing a vector
1430 // splat.
1431 // This situation occurs in MIPS MSA.
1432
1433 SmallVector<SDValue, 8> Ops;
1434 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1435 llvm::append_range(Ops, EltParts);
1436
1437 SDValue V =
1438 getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1439 return V;
1440 }
1441
1442 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1443 "APInt size does not match type size!");
1444 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1445 FoldingSetNodeID ID;
1446 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1447 ID.AddPointer(Elt);
1448 ID.AddBoolean(isO);
1449 void *IP = nullptr;
1450 SDNode *N = nullptr;
1451 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1452 if (!VT.isVector())
1453 return SDValue(N, 0);
1454
1455 if (!N) {
1456 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1457 CSEMap.InsertNode(N, IP);
1458 InsertNode(N);
1459 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1460 }
1461
1462 SDValue Result(N, 0);
1463 if (VT.isScalableVector())
1464 Result = getSplatVector(VT, DL, Result);
1465 else if (VT.isVector())
1466 Result = getSplatBuildVector(VT, DL, Result);
1467
1468 return Result;
1469 }
1470
getIntPtrConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1471 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1472 bool isTarget) {
1473 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1474 }
1475
getShiftAmountConstant(uint64_t Val,EVT VT,const SDLoc & DL,bool LegalTypes)1476 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1477 const SDLoc &DL, bool LegalTypes) {
1478 assert(VT.isInteger() && "Shift amount is not an integer type!");
1479 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1480 return getConstant(Val, DL, ShiftVT);
1481 }
1482
getVectorIdxConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1483 SDValue SelectionDAG::getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
1484 bool isTarget) {
1485 return getConstant(Val, DL, TLI->getVectorIdxTy(getDataLayout()), isTarget);
1486 }
1487
getConstantFP(const APFloat & V,const SDLoc & DL,EVT VT,bool isTarget)1488 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1489 bool isTarget) {
1490 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1491 }
1492
getConstantFP(const ConstantFP & V,const SDLoc & DL,EVT VT,bool isTarget)1493 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1494 EVT VT, bool isTarget) {
1495 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1496
1497 EVT EltVT = VT.getScalarType();
1498
1499 // Do the map lookup using the actual bit pattern for the floating point
1500 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1501 // we don't have issues with SNANs.
1502 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1503 FoldingSetNodeID ID;
1504 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1505 ID.AddPointer(&V);
1506 void *IP = nullptr;
1507 SDNode *N = nullptr;
1508 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1509 if (!VT.isVector())
1510 return SDValue(N, 0);
1511
1512 if (!N) {
1513 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1514 CSEMap.InsertNode(N, IP);
1515 InsertNode(N);
1516 }
1517
1518 SDValue Result(N, 0);
1519 if (VT.isScalableVector())
1520 Result = getSplatVector(VT, DL, Result);
1521 else if (VT.isVector())
1522 Result = getSplatBuildVector(VT, DL, Result);
1523 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1524 return Result;
1525 }
1526
getConstantFP(double Val,const SDLoc & DL,EVT VT,bool isTarget)1527 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1528 bool isTarget) {
1529 EVT EltVT = VT.getScalarType();
1530 if (EltVT == MVT::f32)
1531 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1532 if (EltVT == MVT::f64)
1533 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1534 if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1535 EltVT == MVT::f16 || EltVT == MVT::bf16) {
1536 bool Ignored;
1537 APFloat APF = APFloat(Val);
1538 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1539 &Ignored);
1540 return getConstantFP(APF, DL, VT, isTarget);
1541 }
1542 llvm_unreachable("Unsupported type in getConstantFP");
1543 }
1544
getGlobalAddress(const GlobalValue * GV,const SDLoc & DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned TargetFlags)1545 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1546 EVT VT, int64_t Offset, bool isTargetGA,
1547 unsigned TargetFlags) {
1548 assert((TargetFlags == 0 || isTargetGA) &&
1549 "Cannot set target flags on target-independent globals");
1550
1551 // Truncate (with sign-extension) the offset value to the pointer size.
1552 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1553 if (BitWidth < 64)
1554 Offset = SignExtend64(Offset, BitWidth);
1555
1556 unsigned Opc;
1557 if (GV->isThreadLocal())
1558 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1559 else
1560 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1561
1562 FoldingSetNodeID ID;
1563 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1564 ID.AddPointer(GV);
1565 ID.AddInteger(Offset);
1566 ID.AddInteger(TargetFlags);
1567 void *IP = nullptr;
1568 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1569 return SDValue(E, 0);
1570
1571 auto *N = newSDNode<GlobalAddressSDNode>(
1572 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1573 CSEMap.InsertNode(N, IP);
1574 InsertNode(N);
1575 return SDValue(N, 0);
1576 }
1577
getFrameIndex(int FI,EVT VT,bool isTarget)1578 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1579 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1580 FoldingSetNodeID ID;
1581 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1582 ID.AddInteger(FI);
1583 void *IP = nullptr;
1584 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1585 return SDValue(E, 0);
1586
1587 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1588 CSEMap.InsertNode(N, IP);
1589 InsertNode(N);
1590 return SDValue(N, 0);
1591 }
1592
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned TargetFlags)1593 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1594 unsigned TargetFlags) {
1595 assert((TargetFlags == 0 || isTarget) &&
1596 "Cannot set target flags on target-independent jump tables");
1597 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1598 FoldingSetNodeID ID;
1599 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1600 ID.AddInteger(JTI);
1601 ID.AddInteger(TargetFlags);
1602 void *IP = nullptr;
1603 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1604 return SDValue(E, 0);
1605
1606 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1607 CSEMap.InsertNode(N, IP);
1608 InsertNode(N);
1609 return SDValue(N, 0);
1610 }
1611
getConstantPool(const Constant * C,EVT VT,MaybeAlign Alignment,int Offset,bool isTarget,unsigned TargetFlags)1612 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1613 MaybeAlign Alignment, int Offset,
1614 bool isTarget, unsigned TargetFlags) {
1615 assert((TargetFlags == 0 || isTarget) &&
1616 "Cannot set target flags on target-independent globals");
1617 if (!Alignment)
1618 Alignment = shouldOptForSize()
1619 ? getDataLayout().getABITypeAlign(C->getType())
1620 : getDataLayout().getPrefTypeAlign(C->getType());
1621 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1622 FoldingSetNodeID ID;
1623 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1624 ID.AddInteger(Alignment->value());
1625 ID.AddInteger(Offset);
1626 ID.AddPointer(C);
1627 ID.AddInteger(TargetFlags);
1628 void *IP = nullptr;
1629 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1630 return SDValue(E, 0);
1631
1632 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1633 TargetFlags);
1634 CSEMap.InsertNode(N, IP);
1635 InsertNode(N);
1636 SDValue V = SDValue(N, 0);
1637 NewSDValueDbgMsg(V, "Creating new constant pool: ", this);
1638 return V;
1639 }
1640
getConstantPool(MachineConstantPoolValue * C,EVT VT,MaybeAlign Alignment,int Offset,bool isTarget,unsigned TargetFlags)1641 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1642 MaybeAlign Alignment, int Offset,
1643 bool isTarget, unsigned TargetFlags) {
1644 assert((TargetFlags == 0 || isTarget) &&
1645 "Cannot set target flags on target-independent globals");
1646 if (!Alignment)
1647 Alignment = getDataLayout().getPrefTypeAlign(C->getType());
1648 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1649 FoldingSetNodeID ID;
1650 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1651 ID.AddInteger(Alignment->value());
1652 ID.AddInteger(Offset);
1653 C->addSelectionDAGCSEId(ID);
1654 ID.AddInteger(TargetFlags);
1655 void *IP = nullptr;
1656 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1657 return SDValue(E, 0);
1658
1659 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, *Alignment,
1660 TargetFlags);
1661 CSEMap.InsertNode(N, IP);
1662 InsertNode(N);
1663 return SDValue(N, 0);
1664 }
1665
getTargetIndex(int Index,EVT VT,int64_t Offset,unsigned TargetFlags)1666 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1667 unsigned TargetFlags) {
1668 FoldingSetNodeID ID;
1669 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1670 ID.AddInteger(Index);
1671 ID.AddInteger(Offset);
1672 ID.AddInteger(TargetFlags);
1673 void *IP = nullptr;
1674 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1675 return SDValue(E, 0);
1676
1677 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1678 CSEMap.InsertNode(N, IP);
1679 InsertNode(N);
1680 return SDValue(N, 0);
1681 }
1682
getBasicBlock(MachineBasicBlock * MBB)1683 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1684 FoldingSetNodeID ID;
1685 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1686 ID.AddPointer(MBB);
1687 void *IP = nullptr;
1688 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1689 return SDValue(E, 0);
1690
1691 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1692 CSEMap.InsertNode(N, IP);
1693 InsertNode(N);
1694 return SDValue(N, 0);
1695 }
1696
getValueType(EVT VT)1697 SDValue SelectionDAG::getValueType(EVT VT) {
1698 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1699 ValueTypeNodes.size())
1700 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1701
1702 SDNode *&N = VT.isExtended() ?
1703 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1704
1705 if (N) return SDValue(N, 0);
1706 N = newSDNode<VTSDNode>(VT);
1707 InsertNode(N);
1708 return SDValue(N, 0);
1709 }
1710
getExternalSymbol(const char * Sym,EVT VT)1711 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1712 SDNode *&N = ExternalSymbols[Sym];
1713 if (N) return SDValue(N, 0);
1714 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1715 InsertNode(N);
1716 return SDValue(N, 0);
1717 }
1718
getMCSymbol(MCSymbol * Sym,EVT VT)1719 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1720 SDNode *&N = MCSymbols[Sym];
1721 if (N)
1722 return SDValue(N, 0);
1723 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1724 InsertNode(N);
1725 return SDValue(N, 0);
1726 }
1727
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned TargetFlags)1728 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1729 unsigned TargetFlags) {
1730 SDNode *&N =
1731 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1732 if (N) return SDValue(N, 0);
1733 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1734 InsertNode(N);
1735 return SDValue(N, 0);
1736 }
1737
getCondCode(ISD::CondCode Cond)1738 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1739 if ((unsigned)Cond >= CondCodeNodes.size())
1740 CondCodeNodes.resize(Cond+1);
1741
1742 if (!CondCodeNodes[Cond]) {
1743 auto *N = newSDNode<CondCodeSDNode>(Cond);
1744 CondCodeNodes[Cond] = N;
1745 InsertNode(N);
1746 }
1747
1748 return SDValue(CondCodeNodes[Cond], 0);
1749 }
1750
getStepVector(const SDLoc & DL,EVT ResVT,SDValue Step)1751 SDValue SelectionDAG::getStepVector(const SDLoc &DL, EVT ResVT, SDValue Step) {
1752 if (ResVT.isScalableVector())
1753 return getNode(ISD::STEP_VECTOR, DL, ResVT, Step);
1754
1755 EVT OpVT = Step.getValueType();
1756 APInt StepVal = cast<ConstantSDNode>(Step)->getAPIntValue();
1757 SmallVector<SDValue, 16> OpsStepConstants;
1758 for (uint64_t i = 0; i < ResVT.getVectorNumElements(); i++)
1759 OpsStepConstants.push_back(getConstant(StepVal * i, DL, OpVT));
1760 return getBuildVector(ResVT, DL, OpsStepConstants);
1761 }
1762
1763 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1764 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,MutableArrayRef<int> M)1765 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1766 std::swap(N1, N2);
1767 ShuffleVectorSDNode::commuteMask(M);
1768 }
1769
getVectorShuffle(EVT VT,const SDLoc & dl,SDValue N1,SDValue N2,ArrayRef<int> Mask)1770 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1771 SDValue N2, ArrayRef<int> Mask) {
1772 assert(VT.getVectorNumElements() == Mask.size() &&
1773 "Must have the same number of vector elements as mask elements!");
1774 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1775 "Invalid VECTOR_SHUFFLE");
1776
1777 // Canonicalize shuffle undef, undef -> undef
1778 if (N1.isUndef() && N2.isUndef())
1779 return getUNDEF(VT);
1780
1781 // Validate that all indices in Mask are within the range of the elements
1782 // input to the shuffle.
1783 int NElts = Mask.size();
1784 assert(llvm::all_of(Mask,
1785 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1786 "Index out of range");
1787
1788 // Copy the mask so we can do any needed cleanup.
1789 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1790
1791 // Canonicalize shuffle v, v -> v, undef
1792 if (N1 == N2) {
1793 N2 = getUNDEF(VT);
1794 for (int i = 0; i != NElts; ++i)
1795 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1796 }
1797
1798 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1799 if (N1.isUndef())
1800 commuteShuffle(N1, N2, MaskVec);
1801
1802 if (TLI->hasVectorBlend()) {
1803 // If shuffling a splat, try to blend the splat instead. We do this here so
1804 // that even when this arises during lowering we don't have to re-handle it.
1805 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1806 BitVector UndefElements;
1807 SDValue Splat = BV->getSplatValue(&UndefElements);
1808 if (!Splat)
1809 return;
1810
1811 for (int i = 0; i < NElts; ++i) {
1812 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1813 continue;
1814
1815 // If this input comes from undef, mark it as such.
1816 if (UndefElements[MaskVec[i] - Offset]) {
1817 MaskVec[i] = -1;
1818 continue;
1819 }
1820
1821 // If we can blend a non-undef lane, use that instead.
1822 if (!UndefElements[i])
1823 MaskVec[i] = i + Offset;
1824 }
1825 };
1826 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1827 BlendSplat(N1BV, 0);
1828 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1829 BlendSplat(N2BV, NElts);
1830 }
1831
1832 // Canonicalize all index into lhs, -> shuffle lhs, undef
1833 // Canonicalize all index into rhs, -> shuffle rhs, undef
1834 bool AllLHS = true, AllRHS = true;
1835 bool N2Undef = N2.isUndef();
1836 for (int i = 0; i != NElts; ++i) {
1837 if (MaskVec[i] >= NElts) {
1838 if (N2Undef)
1839 MaskVec[i] = -1;
1840 else
1841 AllLHS = false;
1842 } else if (MaskVec[i] >= 0) {
1843 AllRHS = false;
1844 }
1845 }
1846 if (AllLHS && AllRHS)
1847 return getUNDEF(VT);
1848 if (AllLHS && !N2Undef)
1849 N2 = getUNDEF(VT);
1850 if (AllRHS) {
1851 N1 = getUNDEF(VT);
1852 commuteShuffle(N1, N2, MaskVec);
1853 }
1854 // Reset our undef status after accounting for the mask.
1855 N2Undef = N2.isUndef();
1856 // Re-check whether both sides ended up undef.
1857 if (N1.isUndef() && N2Undef)
1858 return getUNDEF(VT);
1859
1860 // If Identity shuffle return that node.
1861 bool Identity = true, AllSame = true;
1862 for (int i = 0; i != NElts; ++i) {
1863 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1864 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1865 }
1866 if (Identity && NElts)
1867 return N1;
1868
1869 // Shuffling a constant splat doesn't change the result.
1870 if (N2Undef) {
1871 SDValue V = N1;
1872
1873 // Look through any bitcasts. We check that these don't change the number
1874 // (and size) of elements and just changes their types.
1875 while (V.getOpcode() == ISD::BITCAST)
1876 V = V->getOperand(0);
1877
1878 // A splat should always show up as a build vector node.
1879 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1880 BitVector UndefElements;
1881 SDValue Splat = BV->getSplatValue(&UndefElements);
1882 // If this is a splat of an undef, shuffling it is also undef.
1883 if (Splat && Splat.isUndef())
1884 return getUNDEF(VT);
1885
1886 bool SameNumElts =
1887 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1888
1889 // We only have a splat which can skip shuffles if there is a splatted
1890 // value and no undef lanes rearranged by the shuffle.
1891 if (Splat && UndefElements.none()) {
1892 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1893 // number of elements match or the value splatted is a zero constant.
1894 if (SameNumElts)
1895 return N1;
1896 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1897 if (C->isNullValue())
1898 return N1;
1899 }
1900
1901 // If the shuffle itself creates a splat, build the vector directly.
1902 if (AllSame && SameNumElts) {
1903 EVT BuildVT = BV->getValueType(0);
1904 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1905 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1906
1907 // We may have jumped through bitcasts, so the type of the
1908 // BUILD_VECTOR may not match the type of the shuffle.
1909 if (BuildVT != VT)
1910 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1911 return NewBV;
1912 }
1913 }
1914 }
1915
1916 FoldingSetNodeID ID;
1917 SDValue Ops[2] = { N1, N2 };
1918 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1919 for (int i = 0; i != NElts; ++i)
1920 ID.AddInteger(MaskVec[i]);
1921
1922 void* IP = nullptr;
1923 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1924 return SDValue(E, 0);
1925
1926 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1927 // SDNode doesn't have access to it. This memory will be "leaked" when
1928 // the node is deallocated, but recovered when the NodeAllocator is released.
1929 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1930 llvm::copy(MaskVec, MaskAlloc);
1931
1932 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1933 dl.getDebugLoc(), MaskAlloc);
1934 createOperands(N, Ops);
1935
1936 CSEMap.InsertNode(N, IP);
1937 InsertNode(N);
1938 SDValue V = SDValue(N, 0);
1939 NewSDValueDbgMsg(V, "Creating new node: ", this);
1940 return V;
1941 }
1942
getCommutedVectorShuffle(const ShuffleVectorSDNode & SV)1943 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1944 EVT VT = SV.getValueType(0);
1945 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1946 ShuffleVectorSDNode::commuteMask(MaskVec);
1947
1948 SDValue Op0 = SV.getOperand(0);
1949 SDValue Op1 = SV.getOperand(1);
1950 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1951 }
1952
getRegister(unsigned RegNo,EVT VT)1953 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1954 FoldingSetNodeID ID;
1955 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1956 ID.AddInteger(RegNo);
1957 void *IP = nullptr;
1958 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1959 return SDValue(E, 0);
1960
1961 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1962 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
1963 CSEMap.InsertNode(N, IP);
1964 InsertNode(N);
1965 return SDValue(N, 0);
1966 }
1967
getRegisterMask(const uint32_t * RegMask)1968 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1969 FoldingSetNodeID ID;
1970 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1971 ID.AddPointer(RegMask);
1972 void *IP = nullptr;
1973 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1974 return SDValue(E, 0);
1975
1976 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1977 CSEMap.InsertNode(N, IP);
1978 InsertNode(N);
1979 return SDValue(N, 0);
1980 }
1981
getEHLabel(const SDLoc & dl,SDValue Root,MCSymbol * Label)1982 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1983 MCSymbol *Label) {
1984 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
1985 }
1986
getLabelNode(unsigned Opcode,const SDLoc & dl,SDValue Root,MCSymbol * Label)1987 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
1988 SDValue Root, MCSymbol *Label) {
1989 FoldingSetNodeID ID;
1990 SDValue Ops[] = { Root };
1991 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
1992 ID.AddPointer(Label);
1993 void *IP = nullptr;
1994 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1995 return SDValue(E, 0);
1996
1997 auto *N =
1998 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
1999 createOperands(N, Ops);
2000
2001 CSEMap.InsertNode(N, IP);
2002 InsertNode(N);
2003 return SDValue(N, 0);
2004 }
2005
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned TargetFlags)2006 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
2007 int64_t Offset, bool isTarget,
2008 unsigned TargetFlags) {
2009 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
2010
2011 FoldingSetNodeID ID;
2012 AddNodeIDNode(ID, Opc, getVTList(VT), None);
2013 ID.AddPointer(BA);
2014 ID.AddInteger(Offset);
2015 ID.AddInteger(TargetFlags);
2016 void *IP = nullptr;
2017 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2018 return SDValue(E, 0);
2019
2020 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
2021 CSEMap.InsertNode(N, IP);
2022 InsertNode(N);
2023 return SDValue(N, 0);
2024 }
2025
getSrcValue(const Value * V)2026 SDValue SelectionDAG::getSrcValue(const Value *V) {
2027 FoldingSetNodeID ID;
2028 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
2029 ID.AddPointer(V);
2030
2031 void *IP = nullptr;
2032 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2033 return SDValue(E, 0);
2034
2035 auto *N = newSDNode<SrcValueSDNode>(V);
2036 CSEMap.InsertNode(N, IP);
2037 InsertNode(N);
2038 return SDValue(N, 0);
2039 }
2040
getMDNode(const MDNode * MD)2041 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
2042 FoldingSetNodeID ID;
2043 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
2044 ID.AddPointer(MD);
2045
2046 void *IP = nullptr;
2047 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
2048 return SDValue(E, 0);
2049
2050 auto *N = newSDNode<MDNodeSDNode>(MD);
2051 CSEMap.InsertNode(N, IP);
2052 InsertNode(N);
2053 return SDValue(N, 0);
2054 }
2055
getBitcast(EVT VT,SDValue V)2056 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
2057 if (VT == V.getValueType())
2058 return V;
2059
2060 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
2061 }
2062
getAddrSpaceCast(const SDLoc & dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)2063 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
2064 unsigned SrcAS, unsigned DestAS) {
2065 SDValue Ops[] = {Ptr};
2066 FoldingSetNodeID ID;
2067 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
2068 ID.AddInteger(SrcAS);
2069 ID.AddInteger(DestAS);
2070
2071 void *IP = nullptr;
2072 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
2073 return SDValue(E, 0);
2074
2075 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
2076 VT, SrcAS, DestAS);
2077 createOperands(N, Ops);
2078
2079 CSEMap.InsertNode(N, IP);
2080 InsertNode(N);
2081 return SDValue(N, 0);
2082 }
2083
getFreeze(SDValue V)2084 SDValue SelectionDAG::getFreeze(SDValue V) {
2085 return getNode(ISD::FREEZE, SDLoc(V), V.getValueType(), V);
2086 }
2087
2088 /// getShiftAmountOperand - Return the specified value casted to
2089 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)2090 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
2091 EVT OpTy = Op.getValueType();
2092 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
2093 if (OpTy == ShTy || OpTy.isVector()) return Op;
2094
2095 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
2096 }
2097
expandVAArg(SDNode * Node)2098 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
2099 SDLoc dl(Node);
2100 const TargetLowering &TLI = getTargetLoweringInfo();
2101 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2102 EVT VT = Node->getValueType(0);
2103 SDValue Tmp1 = Node->getOperand(0);
2104 SDValue Tmp2 = Node->getOperand(1);
2105 const MaybeAlign MA(Node->getConstantOperandVal(3));
2106
2107 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
2108 Tmp2, MachinePointerInfo(V));
2109 SDValue VAList = VAListLoad;
2110
2111 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
2112 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2113 getConstant(MA->value() - 1, dl, VAList.getValueType()));
2114
2115 VAList =
2116 getNode(ISD::AND, dl, VAList.getValueType(), VAList,
2117 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
2118 }
2119
2120 // Increment the pointer, VAList, to the next vaarg
2121 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
2122 getConstant(getDataLayout().getTypeAllocSize(
2123 VT.getTypeForEVT(*getContext())),
2124 dl, VAList.getValueType()));
2125 // Store the incremented VAList to the legalized pointer
2126 Tmp1 =
2127 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
2128 // Load the actual argument out of the pointer VAList
2129 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
2130 }
2131
expandVACopy(SDNode * Node)2132 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
2133 SDLoc dl(Node);
2134 const TargetLowering &TLI = getTargetLoweringInfo();
2135 // This defaults to loading a pointer from the input and storing it to the
2136 // output, returning the chain.
2137 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
2138 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
2139 SDValue Tmp1 =
2140 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
2141 Node->getOperand(2), MachinePointerInfo(VS));
2142 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
2143 MachinePointerInfo(VD));
2144 }
2145
getReducedAlign(EVT VT,bool UseABI)2146 Align SelectionDAG::getReducedAlign(EVT VT, bool UseABI) {
2147 const DataLayout &DL = getDataLayout();
2148 Type *Ty = VT.getTypeForEVT(*getContext());
2149 Align RedAlign = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2150
2151 if (TLI->isTypeLegal(VT) || !VT.isVector())
2152 return RedAlign;
2153
2154 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2155 const Align StackAlign = TFI->getStackAlign();
2156
2157 // See if we can choose a smaller ABI alignment in cases where it's an
2158 // illegal vector type that will get broken down.
2159 if (RedAlign > StackAlign) {
2160 EVT IntermediateVT;
2161 MVT RegisterVT;
2162 unsigned NumIntermediates;
2163 TLI->getVectorTypeBreakdown(*getContext(), VT, IntermediateVT,
2164 NumIntermediates, RegisterVT);
2165 Ty = IntermediateVT.getTypeForEVT(*getContext());
2166 Align RedAlign2 = UseABI ? DL.getABITypeAlign(Ty) : DL.getPrefTypeAlign(Ty);
2167 if (RedAlign2 < RedAlign)
2168 RedAlign = RedAlign2;
2169 }
2170
2171 return RedAlign;
2172 }
2173
CreateStackTemporary(TypeSize Bytes,Align Alignment)2174 SDValue SelectionDAG::CreateStackTemporary(TypeSize Bytes, Align Alignment) {
2175 MachineFrameInfo &MFI = MF->getFrameInfo();
2176 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
2177 int StackID = 0;
2178 if (Bytes.isScalable())
2179 StackID = TFI->getStackIDForScalableVectors();
2180 // The stack id gives an indication of whether the object is scalable or
2181 // not, so it's safe to pass in the minimum size here.
2182 int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment,
2183 false, nullptr, StackID);
2184 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2185 }
2186
CreateStackTemporary(EVT VT,unsigned minAlign)2187 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
2188 Type *Ty = VT.getTypeForEVT(*getContext());
2189 Align StackAlign =
2190 std::max(getDataLayout().getPrefTypeAlign(Ty), Align(minAlign));
2191 return CreateStackTemporary(VT.getStoreSize(), StackAlign);
2192 }
2193
CreateStackTemporary(EVT VT1,EVT VT2)2194 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
2195 TypeSize VT1Size = VT1.getStoreSize();
2196 TypeSize VT2Size = VT2.getStoreSize();
2197 assert(VT1Size.isScalable() == VT2Size.isScalable() &&
2198 "Don't know how to choose the maximum size when creating a stack "
2199 "temporary");
2200 TypeSize Bytes =
2201 VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size;
2202
2203 Type *Ty1 = VT1.getTypeForEVT(*getContext());
2204 Type *Ty2 = VT2.getTypeForEVT(*getContext());
2205 const DataLayout &DL = getDataLayout();
2206 Align Align = std::max(DL.getPrefTypeAlign(Ty1), DL.getPrefTypeAlign(Ty2));
2207 return CreateStackTemporary(Bytes, Align);
2208 }
2209
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,const SDLoc & dl)2210 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2211 ISD::CondCode Cond, const SDLoc &dl) {
2212 EVT OpVT = N1.getValueType();
2213
2214 // These setcc operations always fold.
2215 switch (Cond) {
2216 default: break;
2217 case ISD::SETFALSE:
2218 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2219 case ISD::SETTRUE:
2220 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2221
2222 case ISD::SETOEQ:
2223 case ISD::SETOGT:
2224 case ISD::SETOGE:
2225 case ISD::SETOLT:
2226 case ISD::SETOLE:
2227 case ISD::SETONE:
2228 case ISD::SETO:
2229 case ISD::SETUO:
2230 case ISD::SETUEQ:
2231 case ISD::SETUNE:
2232 assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2233 break;
2234 }
2235
2236 if (OpVT.isInteger()) {
2237 // For EQ and NE, we can always pick a value for the undef to make the
2238 // predicate pass or fail, so we can return undef.
2239 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2240 // icmp eq/ne X, undef -> undef.
2241 if ((N1.isUndef() || N2.isUndef()) &&
2242 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2243 return getUNDEF(VT);
2244
2245 // If both operands are undef, we can return undef for int comparison.
2246 // icmp undef, undef -> undef.
2247 if (N1.isUndef() && N2.isUndef())
2248 return getUNDEF(VT);
2249
2250 // icmp X, X -> true/false
2251 // icmp X, undef -> true/false because undef could be X.
2252 if (N1 == N2)
2253 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2254 }
2255
2256 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2257 const APInt &C2 = N2C->getAPIntValue();
2258 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2259 const APInt &C1 = N1C->getAPIntValue();
2260
2261 switch (Cond) {
2262 default: llvm_unreachable("Unknown integer setcc!");
2263 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT);
2264 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT);
2265 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
2266 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
2267 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
2268 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
2269 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
2270 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
2271 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
2272 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
2273 }
2274 }
2275 }
2276
2277 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2278 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2279
2280 if (N1CFP && N2CFP) {
2281 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2282 switch (Cond) {
2283 default: break;
2284 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2285 return getUNDEF(VT);
2286 LLVM_FALLTHROUGH;
2287 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2288 OpVT);
2289 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2290 return getUNDEF(VT);
2291 LLVM_FALLTHROUGH;
2292 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2293 R==APFloat::cmpLessThan, dl, VT,
2294 OpVT);
2295 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2296 return getUNDEF(VT);
2297 LLVM_FALLTHROUGH;
2298 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2299 OpVT);
2300 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2301 return getUNDEF(VT);
2302 LLVM_FALLTHROUGH;
2303 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2304 VT, OpVT);
2305 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2306 return getUNDEF(VT);
2307 LLVM_FALLTHROUGH;
2308 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2309 R==APFloat::cmpEqual, dl, VT,
2310 OpVT);
2311 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2312 return getUNDEF(VT);
2313 LLVM_FALLTHROUGH;
2314 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2315 R==APFloat::cmpEqual, dl, VT, OpVT);
2316 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2317 OpVT);
2318 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2319 OpVT);
2320 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2321 R==APFloat::cmpEqual, dl, VT,
2322 OpVT);
2323 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2324 OpVT);
2325 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2326 R==APFloat::cmpLessThan, dl, VT,
2327 OpVT);
2328 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2329 R==APFloat::cmpUnordered, dl, VT,
2330 OpVT);
2331 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2332 VT, OpVT);
2333 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2334 OpVT);
2335 }
2336 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2337 // Ensure that the constant occurs on the RHS.
2338 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2339 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2340 return SDValue();
2341 return getSetCC(dl, VT, N2, N1, SwappedCond);
2342 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2343 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2344 // If an operand is known to be a nan (or undef that could be a nan), we can
2345 // fold it.
2346 // Choosing NaN for the undef will always make unordered comparison succeed
2347 // and ordered comparison fails.
2348 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2349 switch (ISD::getUnorderedFlavor(Cond)) {
2350 default:
2351 llvm_unreachable("Unknown flavor!");
2352 case 0: // Known false.
2353 return getBoolConstant(false, dl, VT, OpVT);
2354 case 1: // Known true.
2355 return getBoolConstant(true, dl, VT, OpVT);
2356 case 2: // Undefined.
2357 return getUNDEF(VT);
2358 }
2359 }
2360
2361 // Could not fold it.
2362 return SDValue();
2363 }
2364
2365 /// See if the specified operand can be simplified with the knowledge that only
2366 /// the bits specified by DemandedBits are used.
2367 /// TODO: really we should be making this into the DAG equivalent of
2368 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits)2369 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2370 EVT VT = V.getValueType();
2371
2372 if (VT.isScalableVector())
2373 return SDValue();
2374
2375 APInt DemandedElts = VT.isVector()
2376 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2377 : APInt(1, 1);
2378 return GetDemandedBits(V, DemandedBits, DemandedElts);
2379 }
2380
2381 /// See if the specified operand can be simplified with the knowledge that only
2382 /// the bits specified by DemandedBits are used in the elements specified by
2383 /// DemandedElts.
2384 /// TODO: really we should be making this into the DAG equivalent of
2385 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits,const APInt & DemandedElts)2386 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2387 const APInt &DemandedElts) {
2388 switch (V.getOpcode()) {
2389 default:
2390 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2391 *this, 0);
2392 case ISD::Constant: {
2393 const APInt &CVal = cast<ConstantSDNode>(V)->getAPIntValue();
2394 APInt NewVal = CVal & DemandedBits;
2395 if (NewVal != CVal)
2396 return getConstant(NewVal, SDLoc(V), V.getValueType());
2397 break;
2398 }
2399 case ISD::SRL:
2400 // Only look at single-use SRLs.
2401 if (!V.getNode()->hasOneUse())
2402 break;
2403 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2404 // See if we can recursively simplify the LHS.
2405 unsigned Amt = RHSC->getZExtValue();
2406
2407 // Watch out for shift count overflow though.
2408 if (Amt >= DemandedBits.getBitWidth())
2409 break;
2410 APInt SrcDemandedBits = DemandedBits << Amt;
2411 if (SDValue SimplifyLHS =
2412 GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2413 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2414 V.getOperand(1));
2415 }
2416 break;
2417 }
2418 return SDValue();
2419 }
2420
2421 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2422 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const2423 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2424 unsigned BitWidth = Op.getScalarValueSizeInBits();
2425 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2426 }
2427
2428 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2429 /// this predicate to simplify operations downstream. Mask is known to be zero
2430 /// for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,unsigned Depth) const2431 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2432 unsigned Depth) const {
2433 return Mask.isSubsetOf(computeKnownBits(V, Depth).Zero);
2434 }
2435
2436 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2437 /// DemandedElts. We use this predicate to simplify operations downstream.
2438 /// Mask is known to be zero for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,const APInt & DemandedElts,unsigned Depth) const2439 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2440 const APInt &DemandedElts,
2441 unsigned Depth) const {
2442 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2443 }
2444
2445 /// Return true if the DemandedElts of the vector Op are all zero. We
2446 /// use this predicate to simplify operations downstream.
MaskedElementsAreZero(SDValue Op,const APInt & DemandedElts,unsigned Depth) const2447 bool SelectionDAG::MaskedElementsAreZero(SDValue Op, const APInt &DemandedElts,
2448 unsigned Depth) const {
2449 unsigned BitWidth = Op.getScalarValueSizeInBits();
2450 APInt DemandedBits = APInt::getAllOnesValue(BitWidth);
2451 return MaskedValueIsZero(Op, DemandedBits, DemandedElts, Depth);
2452 }
2453
2454 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
MaskedValueIsAllOnes(SDValue V,const APInt & Mask,unsigned Depth) const2455 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2456 unsigned Depth) const {
2457 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2458 }
2459
2460 /// isSplatValue - Return true if the vector V has the same value
2461 /// across all DemandedElts. For scalable vectors it does not make
2462 /// sense to specify which elements are demanded or undefined, therefore
2463 /// they are simply ignored.
isSplatValue(SDValue V,const APInt & DemandedElts,APInt & UndefElts,unsigned Depth)2464 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2465 APInt &UndefElts, unsigned Depth) {
2466 EVT VT = V.getValueType();
2467 assert(VT.isVector() && "Vector type expected");
2468
2469 if (!VT.isScalableVector() && !DemandedElts)
2470 return false; // No demanded elts, better to assume we don't know anything.
2471
2472 if (Depth >= MaxRecursionDepth)
2473 return false; // Limit search depth.
2474
2475 // Deal with some common cases here that work for both fixed and scalable
2476 // vector types.
2477 switch (V.getOpcode()) {
2478 case ISD::SPLAT_VECTOR:
2479 UndefElts = V.getOperand(0).isUndef()
2480 ? APInt::getAllOnesValue(DemandedElts.getBitWidth())
2481 : APInt(DemandedElts.getBitWidth(), 0);
2482 return true;
2483 case ISD::ADD:
2484 case ISD::SUB:
2485 case ISD::AND:
2486 case ISD::XOR:
2487 case ISD::OR: {
2488 APInt UndefLHS, UndefRHS;
2489 SDValue LHS = V.getOperand(0);
2490 SDValue RHS = V.getOperand(1);
2491 if (isSplatValue(LHS, DemandedElts, UndefLHS, Depth + 1) &&
2492 isSplatValue(RHS, DemandedElts, UndefRHS, Depth + 1)) {
2493 UndefElts = UndefLHS | UndefRHS;
2494 return true;
2495 }
2496 return false;
2497 }
2498 case ISD::ABS:
2499 case ISD::TRUNCATE:
2500 case ISD::SIGN_EXTEND:
2501 case ISD::ZERO_EXTEND:
2502 return isSplatValue(V.getOperand(0), DemandedElts, UndefElts, Depth + 1);
2503 }
2504
2505 // We don't support other cases than those above for scalable vectors at
2506 // the moment.
2507 if (VT.isScalableVector())
2508 return false;
2509
2510 unsigned NumElts = VT.getVectorNumElements();
2511 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2512 UndefElts = APInt::getNullValue(NumElts);
2513
2514 switch (V.getOpcode()) {
2515 case ISD::BUILD_VECTOR: {
2516 SDValue Scl;
2517 for (unsigned i = 0; i != NumElts; ++i) {
2518 SDValue Op = V.getOperand(i);
2519 if (Op.isUndef()) {
2520 UndefElts.setBit(i);
2521 continue;
2522 }
2523 if (!DemandedElts[i])
2524 continue;
2525 if (Scl && Scl != Op)
2526 return false;
2527 Scl = Op;
2528 }
2529 return true;
2530 }
2531 case ISD::VECTOR_SHUFFLE: {
2532 // Check if this is a shuffle node doing a splat.
2533 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2534 int SplatIndex = -1;
2535 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2536 for (int i = 0; i != (int)NumElts; ++i) {
2537 int M = Mask[i];
2538 if (M < 0) {
2539 UndefElts.setBit(i);
2540 continue;
2541 }
2542 if (!DemandedElts[i])
2543 continue;
2544 if (0 <= SplatIndex && SplatIndex != M)
2545 return false;
2546 SplatIndex = M;
2547 }
2548 return true;
2549 }
2550 case ISD::EXTRACT_SUBVECTOR: {
2551 // Offset the demanded elts by the subvector index.
2552 SDValue Src = V.getOperand(0);
2553 // We don't support scalable vectors at the moment.
2554 if (Src.getValueType().isScalableVector())
2555 return false;
2556 uint64_t Idx = V.getConstantOperandVal(1);
2557 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2558 APInt UndefSrcElts;
2559 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2560 if (isSplatValue(Src, DemandedSrcElts, UndefSrcElts, Depth + 1)) {
2561 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2562 return true;
2563 }
2564 break;
2565 }
2566 }
2567
2568 return false;
2569 }
2570
2571 /// Helper wrapper to main isSplatValue function.
isSplatValue(SDValue V,bool AllowUndefs)2572 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
2573 EVT VT = V.getValueType();
2574 assert(VT.isVector() && "Vector type expected");
2575
2576 APInt UndefElts;
2577 APInt DemandedElts;
2578
2579 // For now we don't support this with scalable vectors.
2580 if (!VT.isScalableVector())
2581 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2582 return isSplatValue(V, DemandedElts, UndefElts) &&
2583 (AllowUndefs || !UndefElts);
2584 }
2585
getSplatSourceVector(SDValue V,int & SplatIdx)2586 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2587 V = peekThroughExtractSubvectors(V);
2588
2589 EVT VT = V.getValueType();
2590 unsigned Opcode = V.getOpcode();
2591 switch (Opcode) {
2592 default: {
2593 APInt UndefElts;
2594 APInt DemandedElts;
2595
2596 if (!VT.isScalableVector())
2597 DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2598
2599 if (isSplatValue(V, DemandedElts, UndefElts)) {
2600 if (VT.isScalableVector()) {
2601 // DemandedElts and UndefElts are ignored for scalable vectors, since
2602 // the only supported cases are SPLAT_VECTOR nodes.
2603 SplatIdx = 0;
2604 } else {
2605 // Handle case where all demanded elements are UNDEF.
2606 if (DemandedElts.isSubsetOf(UndefElts)) {
2607 SplatIdx = 0;
2608 return getUNDEF(VT);
2609 }
2610 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2611 }
2612 return V;
2613 }
2614 break;
2615 }
2616 case ISD::SPLAT_VECTOR:
2617 SplatIdx = 0;
2618 return V;
2619 case ISD::VECTOR_SHUFFLE: {
2620 if (VT.isScalableVector())
2621 return SDValue();
2622
2623 // Check if this is a shuffle node doing a splat.
2624 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2625 // getTargetVShiftNode currently struggles without the splat source.
2626 auto *SVN = cast<ShuffleVectorSDNode>(V);
2627 if (!SVN->isSplat())
2628 break;
2629 int Idx = SVN->getSplatIndex();
2630 int NumElts = V.getValueType().getVectorNumElements();
2631 SplatIdx = Idx % NumElts;
2632 return V.getOperand(Idx / NumElts);
2633 }
2634 }
2635
2636 return SDValue();
2637 }
2638
getSplatValue(SDValue V,bool LegalTypes)2639 SDValue SelectionDAG::getSplatValue(SDValue V, bool LegalTypes) {
2640 int SplatIdx;
2641 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx)) {
2642 EVT SVT = SrcVector.getValueType().getScalarType();
2643 EVT LegalSVT = SVT;
2644 if (LegalTypes && !TLI->isTypeLegal(SVT)) {
2645 if (!SVT.isInteger())
2646 return SDValue();
2647 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
2648 if (LegalSVT.bitsLT(SVT))
2649 return SDValue();
2650 }
2651 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), LegalSVT, SrcVector,
2652 getVectorIdxConstant(SplatIdx, SDLoc(V)));
2653 }
2654 return SDValue();
2655 }
2656
2657 const APInt *
getValidShiftAmountConstant(SDValue V,const APInt & DemandedElts) const2658 SelectionDAG::getValidShiftAmountConstant(SDValue V,
2659 const APInt &DemandedElts) const {
2660 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2661 V.getOpcode() == ISD::SRA) &&
2662 "Unknown shift node");
2663 unsigned BitWidth = V.getScalarValueSizeInBits();
2664 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) {
2665 // Shifting more than the bitwidth is not valid.
2666 const APInt &ShAmt = SA->getAPIntValue();
2667 if (ShAmt.ult(BitWidth))
2668 return &ShAmt;
2669 }
2670 return nullptr;
2671 }
2672
getValidMinimumShiftAmountConstant(SDValue V,const APInt & DemandedElts) const2673 const APInt *SelectionDAG::getValidMinimumShiftAmountConstant(
2674 SDValue V, const APInt &DemandedElts) const {
2675 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2676 V.getOpcode() == ISD::SRA) &&
2677 "Unknown shift node");
2678 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2679 return ValidAmt;
2680 unsigned BitWidth = V.getScalarValueSizeInBits();
2681 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2682 if (!BV)
2683 return nullptr;
2684 const APInt *MinShAmt = nullptr;
2685 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2686 if (!DemandedElts[i])
2687 continue;
2688 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2689 if (!SA)
2690 return nullptr;
2691 // Shifting more than the bitwidth is not valid.
2692 const APInt &ShAmt = SA->getAPIntValue();
2693 if (ShAmt.uge(BitWidth))
2694 return nullptr;
2695 if (MinShAmt && MinShAmt->ule(ShAmt))
2696 continue;
2697 MinShAmt = &ShAmt;
2698 }
2699 return MinShAmt;
2700 }
2701
getValidMaximumShiftAmountConstant(SDValue V,const APInt & DemandedElts) const2702 const APInt *SelectionDAG::getValidMaximumShiftAmountConstant(
2703 SDValue V, const APInt &DemandedElts) const {
2704 assert((V.getOpcode() == ISD::SHL || V.getOpcode() == ISD::SRL ||
2705 V.getOpcode() == ISD::SRA) &&
2706 "Unknown shift node");
2707 if (const APInt *ValidAmt = getValidShiftAmountConstant(V, DemandedElts))
2708 return ValidAmt;
2709 unsigned BitWidth = V.getScalarValueSizeInBits();
2710 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2711 if (!BV)
2712 return nullptr;
2713 const APInt *MaxShAmt = nullptr;
2714 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2715 if (!DemandedElts[i])
2716 continue;
2717 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2718 if (!SA)
2719 return nullptr;
2720 // Shifting more than the bitwidth is not valid.
2721 const APInt &ShAmt = SA->getAPIntValue();
2722 if (ShAmt.uge(BitWidth))
2723 return nullptr;
2724 if (MaxShAmt && MaxShAmt->uge(ShAmt))
2725 continue;
2726 MaxShAmt = &ShAmt;
2727 }
2728 return MaxShAmt;
2729 }
2730
2731 /// Determine which bits of Op are known to be either zero or one and return
2732 /// them in Known. For vectors, the known bits are those that are shared by
2733 /// every vector element.
computeKnownBits(SDValue Op,unsigned Depth) const2734 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2735 EVT VT = Op.getValueType();
2736
2737 // TOOD: Until we have a plan for how to represent demanded elements for
2738 // scalable vectors, we can just bail out for now.
2739 if (Op.getValueType().isScalableVector()) {
2740 unsigned BitWidth = Op.getScalarValueSizeInBits();
2741 return KnownBits(BitWidth);
2742 }
2743
2744 APInt DemandedElts = VT.isVector()
2745 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2746 : APInt(1, 1);
2747 return computeKnownBits(Op, DemandedElts, Depth);
2748 }
2749
2750 /// Determine which bits of Op are known to be either zero or one and return
2751 /// them in Known. The DemandedElts argument allows us to only collect the known
2752 /// bits that are shared by the requested vector elements.
computeKnownBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const2753 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2754 unsigned Depth) const {
2755 unsigned BitWidth = Op.getScalarValueSizeInBits();
2756
2757 KnownBits Known(BitWidth); // Don't know anything.
2758
2759 // TOOD: Until we have a plan for how to represent demanded elements for
2760 // scalable vectors, we can just bail out for now.
2761 if (Op.getValueType().isScalableVector())
2762 return Known;
2763
2764 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2765 // We know all of the bits for a constant!
2766 return KnownBits::makeConstant(C->getAPIntValue());
2767 }
2768 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2769 // We know all of the bits for a constant fp!
2770 return KnownBits::makeConstant(C->getValueAPF().bitcastToAPInt());
2771 }
2772
2773 if (Depth >= MaxRecursionDepth)
2774 return Known; // Limit search depth.
2775
2776 KnownBits Known2;
2777 unsigned NumElts = DemandedElts.getBitWidth();
2778 assert((!Op.getValueType().isVector() ||
2779 NumElts == Op.getValueType().getVectorNumElements()) &&
2780 "Unexpected vector size");
2781
2782 if (!DemandedElts)
2783 return Known; // No demanded elts, better to assume we don't know anything.
2784
2785 unsigned Opcode = Op.getOpcode();
2786 switch (Opcode) {
2787 case ISD::BUILD_VECTOR:
2788 // Collect the known bits that are shared by every demanded vector element.
2789 Known.Zero.setAllBits(); Known.One.setAllBits();
2790 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2791 if (!DemandedElts[i])
2792 continue;
2793
2794 SDValue SrcOp = Op.getOperand(i);
2795 Known2 = computeKnownBits(SrcOp, Depth + 1);
2796
2797 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2798 if (SrcOp.getValueSizeInBits() != BitWidth) {
2799 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2800 "Expected BUILD_VECTOR implicit truncation");
2801 Known2 = Known2.trunc(BitWidth);
2802 }
2803
2804 // Known bits are the values that are shared by every demanded element.
2805 Known = KnownBits::commonBits(Known, Known2);
2806
2807 // If we don't know any bits, early out.
2808 if (Known.isUnknown())
2809 break;
2810 }
2811 break;
2812 case ISD::VECTOR_SHUFFLE: {
2813 // Collect the known bits that are shared by every vector element referenced
2814 // by the shuffle.
2815 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2816 Known.Zero.setAllBits(); Known.One.setAllBits();
2817 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2818 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2819 for (unsigned i = 0; i != NumElts; ++i) {
2820 if (!DemandedElts[i])
2821 continue;
2822
2823 int M = SVN->getMaskElt(i);
2824 if (M < 0) {
2825 // For UNDEF elements, we don't know anything about the common state of
2826 // the shuffle result.
2827 Known.resetAll();
2828 DemandedLHS.clearAllBits();
2829 DemandedRHS.clearAllBits();
2830 break;
2831 }
2832
2833 if ((unsigned)M < NumElts)
2834 DemandedLHS.setBit((unsigned)M % NumElts);
2835 else
2836 DemandedRHS.setBit((unsigned)M % NumElts);
2837 }
2838 // Known bits are the values that are shared by every demanded element.
2839 if (!!DemandedLHS) {
2840 SDValue LHS = Op.getOperand(0);
2841 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2842 Known = KnownBits::commonBits(Known, Known2);
2843 }
2844 // If we don't know any bits, early out.
2845 if (Known.isUnknown())
2846 break;
2847 if (!!DemandedRHS) {
2848 SDValue RHS = Op.getOperand(1);
2849 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2850 Known = KnownBits::commonBits(Known, Known2);
2851 }
2852 break;
2853 }
2854 case ISD::CONCAT_VECTORS: {
2855 // Split DemandedElts and test each of the demanded subvectors.
2856 Known.Zero.setAllBits(); Known.One.setAllBits();
2857 EVT SubVectorVT = Op.getOperand(0).getValueType();
2858 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2859 unsigned NumSubVectors = Op.getNumOperands();
2860 for (unsigned i = 0; i != NumSubVectors; ++i) {
2861 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2862 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2863 if (!!DemandedSub) {
2864 SDValue Sub = Op.getOperand(i);
2865 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2866 Known = KnownBits::commonBits(Known, Known2);
2867 }
2868 // If we don't know any bits, early out.
2869 if (Known.isUnknown())
2870 break;
2871 }
2872 break;
2873 }
2874 case ISD::INSERT_SUBVECTOR: {
2875 // Demand any elements from the subvector and the remainder from the src its
2876 // inserted into.
2877 SDValue Src = Op.getOperand(0);
2878 SDValue Sub = Op.getOperand(1);
2879 uint64_t Idx = Op.getConstantOperandVal(2);
2880 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2881 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2882 APInt DemandedSrcElts = DemandedElts;
2883 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
2884
2885 Known.One.setAllBits();
2886 Known.Zero.setAllBits();
2887 if (!!DemandedSubElts) {
2888 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2889 if (Known.isUnknown())
2890 break; // early-out.
2891 }
2892 if (!!DemandedSrcElts) {
2893 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2894 Known = KnownBits::commonBits(Known, Known2);
2895 }
2896 break;
2897 }
2898 case ISD::EXTRACT_SUBVECTOR: {
2899 // Offset the demanded elts by the subvector index.
2900 SDValue Src = Op.getOperand(0);
2901 // Bail until we can represent demanded elements for scalable vectors.
2902 if (Src.getValueType().isScalableVector())
2903 break;
2904 uint64_t Idx = Op.getConstantOperandVal(1);
2905 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2906 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2907 Known = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2908 break;
2909 }
2910 case ISD::SCALAR_TO_VECTOR: {
2911 // We know about scalar_to_vector as much as we know about it source,
2912 // which becomes the first element of otherwise unknown vector.
2913 if (DemandedElts != 1)
2914 break;
2915
2916 SDValue N0 = Op.getOperand(0);
2917 Known = computeKnownBits(N0, Depth + 1);
2918 if (N0.getValueSizeInBits() != BitWidth)
2919 Known = Known.trunc(BitWidth);
2920
2921 break;
2922 }
2923 case ISD::BITCAST: {
2924 SDValue N0 = Op.getOperand(0);
2925 EVT SubVT = N0.getValueType();
2926 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2927
2928 // Ignore bitcasts from unsupported types.
2929 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2930 break;
2931
2932 // Fast handling of 'identity' bitcasts.
2933 if (BitWidth == SubBitWidth) {
2934 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2935 break;
2936 }
2937
2938 bool IsLE = getDataLayout().isLittleEndian();
2939
2940 // Bitcast 'small element' vector to 'large element' scalar/vector.
2941 if ((BitWidth % SubBitWidth) == 0) {
2942 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2943
2944 // Collect known bits for the (larger) output by collecting the known
2945 // bits from each set of sub elements and shift these into place.
2946 // We need to separately call computeKnownBits for each set of
2947 // sub elements as the knownbits for each is likely to be different.
2948 unsigned SubScale = BitWidth / SubBitWidth;
2949 APInt SubDemandedElts(NumElts * SubScale, 0);
2950 for (unsigned i = 0; i != NumElts; ++i)
2951 if (DemandedElts[i])
2952 SubDemandedElts.setBit(i * SubScale);
2953
2954 for (unsigned i = 0; i != SubScale; ++i) {
2955 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
2956 Depth + 1);
2957 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
2958 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts);
2959 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts);
2960 }
2961 }
2962
2963 // Bitcast 'large element' scalar/vector to 'small element' vector.
2964 if ((SubBitWidth % BitWidth) == 0) {
2965 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2966
2967 // Collect known bits for the (smaller) output by collecting the known
2968 // bits from the overlapping larger input elements and extracting the
2969 // sub sections we actually care about.
2970 unsigned SubScale = SubBitWidth / BitWidth;
2971 APInt SubDemandedElts(NumElts / SubScale, 0);
2972 for (unsigned i = 0; i != NumElts; ++i)
2973 if (DemandedElts[i])
2974 SubDemandedElts.setBit(i / SubScale);
2975
2976 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
2977
2978 Known.Zero.setAllBits(); Known.One.setAllBits();
2979 for (unsigned i = 0; i != NumElts; ++i)
2980 if (DemandedElts[i]) {
2981 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
2982 unsigned Offset = (Shifts % SubScale) * BitWidth;
2983 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2984 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
2985 // If we don't know any bits, early out.
2986 if (Known.isUnknown())
2987 break;
2988 }
2989 }
2990 break;
2991 }
2992 case ISD::AND:
2993 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2994 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2995
2996 Known &= Known2;
2997 break;
2998 case ISD::OR:
2999 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3000 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3001
3002 Known |= Known2;
3003 break;
3004 case ISD::XOR:
3005 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3006 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3007
3008 Known ^= Known2;
3009 break;
3010 case ISD::MUL: {
3011 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3012 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3013 Known = KnownBits::mul(Known, Known2);
3014 break;
3015 }
3016 case ISD::MULHU: {
3017 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3018 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3019 Known = KnownBits::mulhu(Known, Known2);
3020 break;
3021 }
3022 case ISD::MULHS: {
3023 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3024 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3025 Known = KnownBits::mulhs(Known, Known2);
3026 break;
3027 }
3028 case ISD::UMUL_LOHI: {
3029 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3030 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3031 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3032 if (Op.getResNo() == 0)
3033 Known = KnownBits::mul(Known, Known2);
3034 else
3035 Known = KnownBits::mulhu(Known, Known2);
3036 break;
3037 }
3038 case ISD::SMUL_LOHI: {
3039 assert((Op.getResNo() == 0 || Op.getResNo() == 1) && "Unknown result");
3040 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3041 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3042 if (Op.getResNo() == 0)
3043 Known = KnownBits::mul(Known, Known2);
3044 else
3045 Known = KnownBits::mulhs(Known, Known2);
3046 break;
3047 }
3048 case ISD::UDIV: {
3049 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3050 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3051 Known = KnownBits::udiv(Known, Known2);
3052 break;
3053 }
3054 case ISD::SELECT:
3055 case ISD::VSELECT:
3056 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3057 // If we don't know any bits, early out.
3058 if (Known.isUnknown())
3059 break;
3060 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
3061
3062 // Only known if known in both the LHS and RHS.
3063 Known = KnownBits::commonBits(Known, Known2);
3064 break;
3065 case ISD::SELECT_CC:
3066 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
3067 // If we don't know any bits, early out.
3068 if (Known.isUnknown())
3069 break;
3070 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
3071
3072 // Only known if known in both the LHS and RHS.
3073 Known = KnownBits::commonBits(Known, Known2);
3074 break;
3075 case ISD::SMULO:
3076 case ISD::UMULO:
3077 if (Op.getResNo() != 1)
3078 break;
3079 // The boolean result conforms to getBooleanContents.
3080 // If we know the result of a setcc has the top bits zero, use this info.
3081 // We know that we have an integer-based boolean since these operations
3082 // are only available for integer.
3083 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3084 TargetLowering::ZeroOrOneBooleanContent &&
3085 BitWidth > 1)
3086 Known.Zero.setBitsFrom(1);
3087 break;
3088 case ISD::SETCC:
3089 case ISD::STRICT_FSETCC:
3090 case ISD::STRICT_FSETCCS: {
3091 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3092 // If we know the result of a setcc has the top bits zero, use this info.
3093 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3094 TargetLowering::ZeroOrOneBooleanContent &&
3095 BitWidth > 1)
3096 Known.Zero.setBitsFrom(1);
3097 break;
3098 }
3099 case ISD::SHL:
3100 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3101 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3102 Known = KnownBits::shl(Known, Known2);
3103
3104 // Minimum shift low bits are known zero.
3105 if (const APInt *ShMinAmt =
3106 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3107 Known.Zero.setLowBits(ShMinAmt->getZExtValue());
3108 break;
3109 case ISD::SRL:
3110 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3111 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3112 Known = KnownBits::lshr(Known, Known2);
3113
3114 // Minimum shift high bits are known zero.
3115 if (const APInt *ShMinAmt =
3116 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3117 Known.Zero.setHighBits(ShMinAmt->getZExtValue());
3118 break;
3119 case ISD::SRA:
3120 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3121 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3122 Known = KnownBits::ashr(Known, Known2);
3123 // TODO: Add minimum shift high known sign bits.
3124 break;
3125 case ISD::FSHL:
3126 case ISD::FSHR:
3127 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
3128 unsigned Amt = C->getAPIntValue().urem(BitWidth);
3129
3130 // For fshl, 0-shift returns the 1st arg.
3131 // For fshr, 0-shift returns the 2nd arg.
3132 if (Amt == 0) {
3133 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
3134 DemandedElts, Depth + 1);
3135 break;
3136 }
3137
3138 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
3139 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
3140 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3141 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3142 if (Opcode == ISD::FSHL) {
3143 Known.One <<= Amt;
3144 Known.Zero <<= Amt;
3145 Known2.One.lshrInPlace(BitWidth - Amt);
3146 Known2.Zero.lshrInPlace(BitWidth - Amt);
3147 } else {
3148 Known.One <<= BitWidth - Amt;
3149 Known.Zero <<= BitWidth - Amt;
3150 Known2.One.lshrInPlace(Amt);
3151 Known2.Zero.lshrInPlace(Amt);
3152 }
3153 Known.One |= Known2.One;
3154 Known.Zero |= Known2.Zero;
3155 }
3156 break;
3157 case ISD::SIGN_EXTEND_INREG: {
3158 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3159 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3160 Known = Known.sextInReg(EVT.getScalarSizeInBits());
3161 break;
3162 }
3163 case ISD::CTTZ:
3164 case ISD::CTTZ_ZERO_UNDEF: {
3165 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3166 // If we have a known 1, its position is our upper bound.
3167 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
3168 unsigned LowBits = Log2_32(PossibleTZ) + 1;
3169 Known.Zero.setBitsFrom(LowBits);
3170 break;
3171 }
3172 case ISD::CTLZ:
3173 case ISD::CTLZ_ZERO_UNDEF: {
3174 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3175 // If we have a known 1, its position is our upper bound.
3176 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
3177 unsigned LowBits = Log2_32(PossibleLZ) + 1;
3178 Known.Zero.setBitsFrom(LowBits);
3179 break;
3180 }
3181 case ISD::CTPOP: {
3182 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3183 // If we know some of the bits are zero, they can't be one.
3184 unsigned PossibleOnes = Known2.countMaxPopulation();
3185 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
3186 break;
3187 }
3188 case ISD::PARITY: {
3189 // Parity returns 0 everywhere but the LSB.
3190 Known.Zero.setBitsFrom(1);
3191 break;
3192 }
3193 case ISD::LOAD: {
3194 LoadSDNode *LD = cast<LoadSDNode>(Op);
3195 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3196 if (ISD::isNON_EXTLoad(LD) && Cst) {
3197 // Determine any common known bits from the loaded constant pool value.
3198 Type *CstTy = Cst->getType();
3199 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
3200 // If its a vector splat, then we can (quickly) reuse the scalar path.
3201 // NOTE: We assume all elements match and none are UNDEF.
3202 if (CstTy->isVectorTy()) {
3203 if (const Constant *Splat = Cst->getSplatValue()) {
3204 Cst = Splat;
3205 CstTy = Cst->getType();
3206 }
3207 }
3208 // TODO - do we need to handle different bitwidths?
3209 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3210 // Iterate across all vector elements finding common known bits.
3211 Known.One.setAllBits();
3212 Known.Zero.setAllBits();
3213 for (unsigned i = 0; i != NumElts; ++i) {
3214 if (!DemandedElts[i])
3215 continue;
3216 if (Constant *Elt = Cst->getAggregateElement(i)) {
3217 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3218 const APInt &Value = CInt->getValue();
3219 Known.One &= Value;
3220 Known.Zero &= ~Value;
3221 continue;
3222 }
3223 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3224 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3225 Known.One &= Value;
3226 Known.Zero &= ~Value;
3227 continue;
3228 }
3229 }
3230 Known.One.clearAllBits();
3231 Known.Zero.clearAllBits();
3232 break;
3233 }
3234 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3235 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3236 Known = KnownBits::makeConstant(CInt->getValue());
3237 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3238 Known =
3239 KnownBits::makeConstant(CFP->getValueAPF().bitcastToAPInt());
3240 }
3241 }
3242 }
3243 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
3244 // If this is a ZEXTLoad and we are looking at the loaded value.
3245 EVT VT = LD->getMemoryVT();
3246 unsigned MemBits = VT.getScalarSizeInBits();
3247 Known.Zero.setBitsFrom(MemBits);
3248 } else if (const MDNode *Ranges = LD->getRanges()) {
3249 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
3250 computeKnownBitsFromRangeMetadata(*Ranges, Known);
3251 }
3252 break;
3253 }
3254 case ISD::ZERO_EXTEND_VECTOR_INREG: {
3255 EVT InVT = Op.getOperand(0).getValueType();
3256 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3257 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3258 Known = Known.zext(BitWidth);
3259 break;
3260 }
3261 case ISD::ZERO_EXTEND: {
3262 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3263 Known = Known.zext(BitWidth);
3264 break;
3265 }
3266 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3267 EVT InVT = Op.getOperand(0).getValueType();
3268 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3269 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3270 // If the sign bit is known to be zero or one, then sext will extend
3271 // it to the top bits, else it will just zext.
3272 Known = Known.sext(BitWidth);
3273 break;
3274 }
3275 case ISD::SIGN_EXTEND: {
3276 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3277 // If the sign bit is known to be zero or one, then sext will extend
3278 // it to the top bits, else it will just zext.
3279 Known = Known.sext(BitWidth);
3280 break;
3281 }
3282 case ISD::ANY_EXTEND_VECTOR_INREG: {
3283 EVT InVT = Op.getOperand(0).getValueType();
3284 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3285 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3286 Known = Known.anyext(BitWidth);
3287 break;
3288 }
3289 case ISD::ANY_EXTEND: {
3290 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3291 Known = Known.anyext(BitWidth);
3292 break;
3293 }
3294 case ISD::TRUNCATE: {
3295 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3296 Known = Known.trunc(BitWidth);
3297 break;
3298 }
3299 case ISD::AssertZext: {
3300 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3301 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3302 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3303 Known.Zero |= (~InMask);
3304 Known.One &= (~Known.Zero);
3305 break;
3306 }
3307 case ISD::AssertAlign: {
3308 unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
3309 assert(LogOfAlign != 0);
3310 // If a node is guaranteed to be aligned, set low zero bits accordingly as
3311 // well as clearing one bits.
3312 Known.Zero.setLowBits(LogOfAlign);
3313 Known.One.clearLowBits(LogOfAlign);
3314 break;
3315 }
3316 case ISD::FGETSIGN:
3317 // All bits are zero except the low bit.
3318 Known.Zero.setBitsFrom(1);
3319 break;
3320 case ISD::USUBO:
3321 case ISD::SSUBO:
3322 if (Op.getResNo() == 1) {
3323 // If we know the result of a setcc has the top bits zero, use this info.
3324 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3325 TargetLowering::ZeroOrOneBooleanContent &&
3326 BitWidth > 1)
3327 Known.Zero.setBitsFrom(1);
3328 break;
3329 }
3330 LLVM_FALLTHROUGH;
3331 case ISD::SUB:
3332 case ISD::SUBC: {
3333 assert(Op.getResNo() == 0 &&
3334 "We only compute knownbits for the difference here.");
3335
3336 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3337 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3338 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3339 Known, Known2);
3340 break;
3341 }
3342 case ISD::UADDO:
3343 case ISD::SADDO:
3344 case ISD::ADDCARRY:
3345 if (Op.getResNo() == 1) {
3346 // If we know the result of a setcc has the top bits zero, use this info.
3347 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3348 TargetLowering::ZeroOrOneBooleanContent &&
3349 BitWidth > 1)
3350 Known.Zero.setBitsFrom(1);
3351 break;
3352 }
3353 LLVM_FALLTHROUGH;
3354 case ISD::ADD:
3355 case ISD::ADDC:
3356 case ISD::ADDE: {
3357 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
3358
3359 // With ADDE and ADDCARRY, a carry bit may be added in.
3360 KnownBits Carry(1);
3361 if (Opcode == ISD::ADDE)
3362 // Can't track carry from glue, set carry to unknown.
3363 Carry.resetAll();
3364 else if (Opcode == ISD::ADDCARRY)
3365 // TODO: Compute known bits for the carry operand. Not sure if it is worth
3366 // the trouble (how often will we find a known carry bit). And I haven't
3367 // tested this very much yet, but something like this might work:
3368 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3369 // Carry = Carry.zextOrTrunc(1, false);
3370 Carry.resetAll();
3371 else
3372 Carry.setAllZero();
3373
3374 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3375 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3376 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3377 break;
3378 }
3379 case ISD::SREM: {
3380 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3381 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3382 Known = KnownBits::srem(Known, Known2);
3383 break;
3384 }
3385 case ISD::UREM: {
3386 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3387 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3388 Known = KnownBits::urem(Known, Known2);
3389 break;
3390 }
3391 case ISD::EXTRACT_ELEMENT: {
3392 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3393 const unsigned Index = Op.getConstantOperandVal(1);
3394 const unsigned EltBitWidth = Op.getValueSizeInBits();
3395
3396 // Remove low part of known bits mask
3397 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3398 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3399
3400 // Remove high part of known bit mask
3401 Known = Known.trunc(EltBitWidth);
3402 break;
3403 }
3404 case ISD::EXTRACT_VECTOR_ELT: {
3405 SDValue InVec = Op.getOperand(0);
3406 SDValue EltNo = Op.getOperand(1);
3407 EVT VecVT = InVec.getValueType();
3408 // computeKnownBits not yet implemented for scalable vectors.
3409 if (VecVT.isScalableVector())
3410 break;
3411 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3412 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3413
3414 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3415 // anything about the extended bits.
3416 if (BitWidth > EltBitWidth)
3417 Known = Known.trunc(EltBitWidth);
3418
3419 // If we know the element index, just demand that vector element, else for
3420 // an unknown element index, ignore DemandedElts and demand them all.
3421 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3422 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3423 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3424 DemandedSrcElts =
3425 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3426
3427 Known = computeKnownBits(InVec, DemandedSrcElts, Depth + 1);
3428 if (BitWidth > EltBitWidth)
3429 Known = Known.anyext(BitWidth);
3430 break;
3431 }
3432 case ISD::INSERT_VECTOR_ELT: {
3433 // If we know the element index, split the demand between the
3434 // source vector and the inserted element, otherwise assume we need
3435 // the original demanded vector elements and the value.
3436 SDValue InVec = Op.getOperand(0);
3437 SDValue InVal = Op.getOperand(1);
3438 SDValue EltNo = Op.getOperand(2);
3439 bool DemandedVal = true;
3440 APInt DemandedVecElts = DemandedElts;
3441 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3442 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3443 unsigned EltIdx = CEltNo->getZExtValue();
3444 DemandedVal = !!DemandedElts[EltIdx];
3445 DemandedVecElts.clearBit(EltIdx);
3446 }
3447 Known.One.setAllBits();
3448 Known.Zero.setAllBits();
3449 if (DemandedVal) {
3450 Known2 = computeKnownBits(InVal, Depth + 1);
3451 Known = KnownBits::commonBits(Known, Known2.zextOrTrunc(BitWidth));
3452 }
3453 if (!!DemandedVecElts) {
3454 Known2 = computeKnownBits(InVec, DemandedVecElts, Depth + 1);
3455 Known = KnownBits::commonBits(Known, Known2);
3456 }
3457 break;
3458 }
3459 case ISD::BITREVERSE: {
3460 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3461 Known = Known2.reverseBits();
3462 break;
3463 }
3464 case ISD::BSWAP: {
3465 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3466 Known = Known2.byteSwap();
3467 break;
3468 }
3469 case ISD::ABS: {
3470 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3471 Known = Known2.abs();
3472 break;
3473 }
3474 case ISD::USUBSAT: {
3475 // The result of usubsat will never be larger than the LHS.
3476 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3477 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
3478 break;
3479 }
3480 case ISD::UMIN: {
3481 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3482 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3483 Known = KnownBits::umin(Known, Known2);
3484 break;
3485 }
3486 case ISD::UMAX: {
3487 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3488 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3489 Known = KnownBits::umax(Known, Known2);
3490 break;
3491 }
3492 case ISD::SMIN:
3493 case ISD::SMAX: {
3494 // If we have a clamp pattern, we know that the number of sign bits will be
3495 // the minimum of the clamp min/max range.
3496 bool IsMax = (Opcode == ISD::SMAX);
3497 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3498 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3499 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3500 CstHigh =
3501 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3502 if (CstLow && CstHigh) {
3503 if (!IsMax)
3504 std::swap(CstLow, CstHigh);
3505
3506 const APInt &ValueLow = CstLow->getAPIntValue();
3507 const APInt &ValueHigh = CstHigh->getAPIntValue();
3508 if (ValueLow.sle(ValueHigh)) {
3509 unsigned LowSignBits = ValueLow.getNumSignBits();
3510 unsigned HighSignBits = ValueHigh.getNumSignBits();
3511 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3512 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3513 Known.One.setHighBits(MinSignBits);
3514 break;
3515 }
3516 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3517 Known.Zero.setHighBits(MinSignBits);
3518 break;
3519 }
3520 }
3521 }
3522
3523 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3524 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3525 if (IsMax)
3526 Known = KnownBits::smax(Known, Known2);
3527 else
3528 Known = KnownBits::smin(Known, Known2);
3529 break;
3530 }
3531 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
3532 if (Op.getResNo() == 1) {
3533 // The boolean result conforms to getBooleanContents.
3534 // If we know the result of a setcc has the top bits zero, use this info.
3535 // We know that we have an integer-based boolean since these operations
3536 // are only available for integer.
3537 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
3538 TargetLowering::ZeroOrOneBooleanContent &&
3539 BitWidth > 1)
3540 Known.Zero.setBitsFrom(1);
3541 break;
3542 }
3543 LLVM_FALLTHROUGH;
3544 case ISD::ATOMIC_CMP_SWAP:
3545 case ISD::ATOMIC_SWAP:
3546 case ISD::ATOMIC_LOAD_ADD:
3547 case ISD::ATOMIC_LOAD_SUB:
3548 case ISD::ATOMIC_LOAD_AND:
3549 case ISD::ATOMIC_LOAD_CLR:
3550 case ISD::ATOMIC_LOAD_OR:
3551 case ISD::ATOMIC_LOAD_XOR:
3552 case ISD::ATOMIC_LOAD_NAND:
3553 case ISD::ATOMIC_LOAD_MIN:
3554 case ISD::ATOMIC_LOAD_MAX:
3555 case ISD::ATOMIC_LOAD_UMIN:
3556 case ISD::ATOMIC_LOAD_UMAX:
3557 case ISD::ATOMIC_LOAD: {
3558 unsigned MemBits =
3559 cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
3560 // If we are looking at the loaded value.
3561 if (Op.getResNo() == 0) {
3562 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
3563 Known.Zero.setBitsFrom(MemBits);
3564 }
3565 break;
3566 }
3567 case ISD::FrameIndex:
3568 case ISD::TargetFrameIndex:
3569 TLI->computeKnownBitsForFrameIndex(cast<FrameIndexSDNode>(Op)->getIndex(),
3570 Known, getMachineFunction());
3571 break;
3572
3573 default:
3574 if (Opcode < ISD::BUILTIN_OP_END)
3575 break;
3576 LLVM_FALLTHROUGH;
3577 case ISD::INTRINSIC_WO_CHAIN:
3578 case ISD::INTRINSIC_W_CHAIN:
3579 case ISD::INTRINSIC_VOID:
3580 // Allow the target to implement this method for its nodes.
3581 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3582 break;
3583 }
3584
3585 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3586 return Known;
3587 }
3588
computeOverflowKind(SDValue N0,SDValue N1) const3589 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3590 SDValue N1) const {
3591 // X + 0 never overflow
3592 if (isNullConstant(N1))
3593 return OFK_Never;
3594
3595 KnownBits N1Known = computeKnownBits(N1);
3596 if (N1Known.Zero.getBoolValue()) {
3597 KnownBits N0Known = computeKnownBits(N0);
3598
3599 bool overflow;
3600 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow);
3601 if (!overflow)
3602 return OFK_Never;
3603 }
3604
3605 // mulhi + 1 never overflow
3606 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3607 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue())
3608 return OFK_Never;
3609
3610 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3611 KnownBits N0Known = computeKnownBits(N0);
3612
3613 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue())
3614 return OFK_Never;
3615 }
3616
3617 return OFK_Sometime;
3618 }
3619
isKnownToBeAPowerOfTwo(SDValue Val) const3620 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3621 EVT OpVT = Val.getValueType();
3622 unsigned BitWidth = OpVT.getScalarSizeInBits();
3623
3624 // Is the constant a known power of 2?
3625 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3626 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3627
3628 // A left-shift of a constant one will have exactly one bit set because
3629 // shifting the bit off the end is undefined.
3630 if (Val.getOpcode() == ISD::SHL) {
3631 auto *C = isConstOrConstSplat(Val.getOperand(0));
3632 if (C && C->getAPIntValue() == 1)
3633 return true;
3634 }
3635
3636 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3637 // one bit set.
3638 if (Val.getOpcode() == ISD::SRL) {
3639 auto *C = isConstOrConstSplat(Val.getOperand(0));
3640 if (C && C->getAPIntValue().isSignMask())
3641 return true;
3642 }
3643
3644 // Are all operands of a build vector constant powers of two?
3645 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3646 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3647 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3648 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3649 return false;
3650 }))
3651 return true;
3652
3653 // More could be done here, though the above checks are enough
3654 // to handle some common cases.
3655
3656 // Fall back to computeKnownBits to catch other known cases.
3657 KnownBits Known = computeKnownBits(Val);
3658 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3659 }
3660
ComputeNumSignBits(SDValue Op,unsigned Depth) const3661 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3662 EVT VT = Op.getValueType();
3663
3664 // TODO: Assume we don't know anything for now.
3665 if (VT.isScalableVector())
3666 return 1;
3667
3668 APInt DemandedElts = VT.isVector()
3669 ? APInt::getAllOnesValue(VT.getVectorNumElements())
3670 : APInt(1, 1);
3671 return ComputeNumSignBits(Op, DemandedElts, Depth);
3672 }
3673
ComputeNumSignBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const3674 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3675 unsigned Depth) const {
3676 EVT VT = Op.getValueType();
3677 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3678 unsigned VTBits = VT.getScalarSizeInBits();
3679 unsigned NumElts = DemandedElts.getBitWidth();
3680 unsigned Tmp, Tmp2;
3681 unsigned FirstAnswer = 1;
3682
3683 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3684 const APInt &Val = C->getAPIntValue();
3685 return Val.getNumSignBits();
3686 }
3687
3688 if (Depth >= MaxRecursionDepth)
3689 return 1; // Limit search depth.
3690
3691 if (!DemandedElts || VT.isScalableVector())
3692 return 1; // No demanded elts, better to assume we don't know anything.
3693
3694 unsigned Opcode = Op.getOpcode();
3695 switch (Opcode) {
3696 default: break;
3697 case ISD::AssertSext:
3698 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3699 return VTBits-Tmp+1;
3700 case ISD::AssertZext:
3701 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3702 return VTBits-Tmp;
3703
3704 case ISD::BUILD_VECTOR:
3705 Tmp = VTBits;
3706 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3707 if (!DemandedElts[i])
3708 continue;
3709
3710 SDValue SrcOp = Op.getOperand(i);
3711 Tmp2 = ComputeNumSignBits(SrcOp, Depth + 1);
3712
3713 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3714 if (SrcOp.getValueSizeInBits() != VTBits) {
3715 assert(SrcOp.getValueSizeInBits() > VTBits &&
3716 "Expected BUILD_VECTOR implicit truncation");
3717 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3718 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3719 }
3720 Tmp = std::min(Tmp, Tmp2);
3721 }
3722 return Tmp;
3723
3724 case ISD::VECTOR_SHUFFLE: {
3725 // Collect the minimum number of sign bits that are shared by every vector
3726 // element referenced by the shuffle.
3727 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3728 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3729 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3730 for (unsigned i = 0; i != NumElts; ++i) {
3731 int M = SVN->getMaskElt(i);
3732 if (!DemandedElts[i])
3733 continue;
3734 // For UNDEF elements, we don't know anything about the common state of
3735 // the shuffle result.
3736 if (M < 0)
3737 return 1;
3738 if ((unsigned)M < NumElts)
3739 DemandedLHS.setBit((unsigned)M % NumElts);
3740 else
3741 DemandedRHS.setBit((unsigned)M % NumElts);
3742 }
3743 Tmp = std::numeric_limits<unsigned>::max();
3744 if (!!DemandedLHS)
3745 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3746 if (!!DemandedRHS) {
3747 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3748 Tmp = std::min(Tmp, Tmp2);
3749 }
3750 // If we don't know anything, early out and try computeKnownBits fall-back.
3751 if (Tmp == 1)
3752 break;
3753 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3754 return Tmp;
3755 }
3756
3757 case ISD::BITCAST: {
3758 SDValue N0 = Op.getOperand(0);
3759 EVT SrcVT = N0.getValueType();
3760 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3761
3762 // Ignore bitcasts from unsupported types..
3763 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3764 break;
3765
3766 // Fast handling of 'identity' bitcasts.
3767 if (VTBits == SrcBits)
3768 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3769
3770 bool IsLE = getDataLayout().isLittleEndian();
3771
3772 // Bitcast 'large element' scalar/vector to 'small element' vector.
3773 if ((SrcBits % VTBits) == 0) {
3774 assert(VT.isVector() && "Expected bitcast to vector");
3775
3776 unsigned Scale = SrcBits / VTBits;
3777 APInt SrcDemandedElts(NumElts / Scale, 0);
3778 for (unsigned i = 0; i != NumElts; ++i)
3779 if (DemandedElts[i])
3780 SrcDemandedElts.setBit(i / Scale);
3781
3782 // Fast case - sign splat can be simply split across the small elements.
3783 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3784 if (Tmp == SrcBits)
3785 return VTBits;
3786
3787 // Slow case - determine how far the sign extends into each sub-element.
3788 Tmp2 = VTBits;
3789 for (unsigned i = 0; i != NumElts; ++i)
3790 if (DemandedElts[i]) {
3791 unsigned SubOffset = i % Scale;
3792 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3793 SubOffset = SubOffset * VTBits;
3794 if (Tmp <= SubOffset)
3795 return 1;
3796 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3797 }
3798 return Tmp2;
3799 }
3800 break;
3801 }
3802
3803 case ISD::SIGN_EXTEND:
3804 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3805 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3806 case ISD::SIGN_EXTEND_INREG:
3807 // Max of the input and what this extends.
3808 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3809 Tmp = VTBits-Tmp+1;
3810 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3811 return std::max(Tmp, Tmp2);
3812 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3813 SDValue Src = Op.getOperand(0);
3814 EVT SrcVT = Src.getValueType();
3815 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3816 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3817 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3818 }
3819 case ISD::SRA:
3820 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3821 // SRA X, C -> adds C sign bits.
3822 if (const APInt *ShAmt =
3823 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3824 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3825 return Tmp;
3826 case ISD::SHL:
3827 if (const APInt *ShAmt =
3828 getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
3829 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
3830 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3831 if (ShAmt->ult(Tmp))
3832 return Tmp - ShAmt->getZExtValue();
3833 }
3834 break;
3835 case ISD::AND:
3836 case ISD::OR:
3837 case ISD::XOR: // NOT is handled here.
3838 // Logical binary ops preserve the number of sign bits at the worst.
3839 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3840 if (Tmp != 1) {
3841 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3842 FirstAnswer = std::min(Tmp, Tmp2);
3843 // We computed what we know about the sign bits as our first
3844 // answer. Now proceed to the generic code that uses
3845 // computeKnownBits, and pick whichever answer is better.
3846 }
3847 break;
3848
3849 case ISD::SELECT:
3850 case ISD::VSELECT:
3851 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3852 if (Tmp == 1) return 1; // Early out.
3853 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3854 return std::min(Tmp, Tmp2);
3855 case ISD::SELECT_CC:
3856 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3857 if (Tmp == 1) return 1; // Early out.
3858 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3859 return std::min(Tmp, Tmp2);
3860
3861 case ISD::SMIN:
3862 case ISD::SMAX: {
3863 // If we have a clamp pattern, we know that the number of sign bits will be
3864 // the minimum of the clamp min/max range.
3865 bool IsMax = (Opcode == ISD::SMAX);
3866 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3867 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3868 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3869 CstHigh =
3870 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3871 if (CstLow && CstHigh) {
3872 if (!IsMax)
3873 std::swap(CstLow, CstHigh);
3874 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3875 Tmp = CstLow->getAPIntValue().getNumSignBits();
3876 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3877 return std::min(Tmp, Tmp2);
3878 }
3879 }
3880
3881 // Fallback - just get the minimum number of sign bits of the operands.
3882 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3883 if (Tmp == 1)
3884 return 1; // Early out.
3885 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3886 return std::min(Tmp, Tmp2);
3887 }
3888 case ISD::UMIN:
3889 case ISD::UMAX:
3890 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3891 if (Tmp == 1)
3892 return 1; // Early out.
3893 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3894 return std::min(Tmp, Tmp2);
3895 case ISD::SADDO:
3896 case ISD::UADDO:
3897 case ISD::SSUBO:
3898 case ISD::USUBO:
3899 case ISD::SMULO:
3900 case ISD::UMULO:
3901 if (Op.getResNo() != 1)
3902 break;
3903 // The boolean result conforms to getBooleanContents. Fall through.
3904 // If setcc returns 0/-1, all bits are sign bits.
3905 // We know that we have an integer-based boolean since these operations
3906 // are only available for integer.
3907 if (TLI->getBooleanContents(VT.isVector(), false) ==
3908 TargetLowering::ZeroOrNegativeOneBooleanContent)
3909 return VTBits;
3910 break;
3911 case ISD::SETCC:
3912 case ISD::STRICT_FSETCC:
3913 case ISD::STRICT_FSETCCS: {
3914 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3915 // If setcc returns 0/-1, all bits are sign bits.
3916 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3917 TargetLowering::ZeroOrNegativeOneBooleanContent)
3918 return VTBits;
3919 break;
3920 }
3921 case ISD::ROTL:
3922 case ISD::ROTR:
3923 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3924
3925 // If we're rotating an 0/-1 value, then it stays an 0/-1 value.
3926 if (Tmp == VTBits)
3927 return VTBits;
3928
3929 if (ConstantSDNode *C =
3930 isConstOrConstSplat(Op.getOperand(1), DemandedElts)) {
3931 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3932
3933 // Handle rotate right by N like a rotate left by 32-N.
3934 if (Opcode == ISD::ROTR)
3935 RotAmt = (VTBits - RotAmt) % VTBits;
3936
3937 // If we aren't rotating out all of the known-in sign bits, return the
3938 // number that are left. This handles rotl(sext(x), 1) for example.
3939 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3940 }
3941 break;
3942 case ISD::ADD:
3943 case ISD::ADDC:
3944 // Add can have at most one carry bit. Thus we know that the output
3945 // is, at worst, one more bit than the inputs.
3946 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3947 if (Tmp == 1) return 1; // Early out.
3948
3949 // Special case decrementing a value (ADD X, -1):
3950 if (ConstantSDNode *CRHS =
3951 isConstOrConstSplat(Op.getOperand(1), DemandedElts))
3952 if (CRHS->isAllOnesValue()) {
3953 KnownBits Known =
3954 computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3955
3956 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3957 // sign bits set.
3958 if ((Known.Zero | 1).isAllOnesValue())
3959 return VTBits;
3960
3961 // If we are subtracting one from a positive number, there is no carry
3962 // out of the result.
3963 if (Known.isNonNegative())
3964 return Tmp;
3965 }
3966
3967 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3968 if (Tmp2 == 1) return 1; // Early out.
3969 return std::min(Tmp, Tmp2) - 1;
3970 case ISD::SUB:
3971 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
3972 if (Tmp2 == 1) return 1; // Early out.
3973
3974 // Handle NEG.
3975 if (ConstantSDNode *CLHS =
3976 isConstOrConstSplat(Op.getOperand(0), DemandedElts))
3977 if (CLHS->isNullValue()) {
3978 KnownBits Known =
3979 computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3980 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3981 // sign bits set.
3982 if ((Known.Zero | 1).isAllOnesValue())
3983 return VTBits;
3984
3985 // If the input is known to be positive (the sign bit is known clear),
3986 // the output of the NEG has the same number of sign bits as the input.
3987 if (Known.isNonNegative())
3988 return Tmp2;
3989
3990 // Otherwise, we treat this like a SUB.
3991 }
3992
3993 // Sub can have at most one carry bit. Thus we know that the output
3994 // is, at worst, one more bit than the inputs.
3995 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3996 if (Tmp == 1) return 1; // Early out.
3997 return std::min(Tmp, Tmp2) - 1;
3998 case ISD::MUL: {
3999 // The output of the Mul can be at most twice the valid bits in the inputs.
4000 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4001 if (SignBitsOp0 == 1)
4002 break;
4003 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
4004 if (SignBitsOp1 == 1)
4005 break;
4006 unsigned OutValidBits =
4007 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
4008 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
4009 }
4010 case ISD::SREM:
4011 // The sign bit is the LHS's sign bit, except when the result of the
4012 // remainder is zero. The magnitude of the result should be less than or
4013 // equal to the magnitude of the LHS. Therefore, the result should have
4014 // at least as many sign bits as the left hand side.
4015 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
4016 case ISD::TRUNCATE: {
4017 // Check if the sign bits of source go down as far as the truncated value.
4018 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
4019 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4020 if (NumSrcSignBits > (NumSrcBits - VTBits))
4021 return NumSrcSignBits - (NumSrcBits - VTBits);
4022 break;
4023 }
4024 case ISD::EXTRACT_ELEMENT: {
4025 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
4026 const int BitWidth = Op.getValueSizeInBits();
4027 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
4028
4029 // Get reverse index (starting from 1), Op1 value indexes elements from
4030 // little end. Sign starts at big end.
4031 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
4032
4033 // If the sign portion ends in our element the subtraction gives correct
4034 // result. Otherwise it gives either negative or > bitwidth result
4035 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
4036 }
4037 case ISD::INSERT_VECTOR_ELT: {
4038 // If we know the element index, split the demand between the
4039 // source vector and the inserted element, otherwise assume we need
4040 // the original demanded vector elements and the value.
4041 SDValue InVec = Op.getOperand(0);
4042 SDValue InVal = Op.getOperand(1);
4043 SDValue EltNo = Op.getOperand(2);
4044 bool DemandedVal = true;
4045 APInt DemandedVecElts = DemandedElts;
4046 auto *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
4047 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
4048 unsigned EltIdx = CEltNo->getZExtValue();
4049 DemandedVal = !!DemandedElts[EltIdx];
4050 DemandedVecElts.clearBit(EltIdx);
4051 }
4052 Tmp = std::numeric_limits<unsigned>::max();
4053 if (DemandedVal) {
4054 // TODO - handle implicit truncation of inserted elements.
4055 if (InVal.getScalarValueSizeInBits() != VTBits)
4056 break;
4057 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
4058 Tmp = std::min(Tmp, Tmp2);
4059 }
4060 if (!!DemandedVecElts) {
4061 Tmp2 = ComputeNumSignBits(InVec, DemandedVecElts, Depth + 1);
4062 Tmp = std::min(Tmp, Tmp2);
4063 }
4064 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4065 return Tmp;
4066 }
4067 case ISD::EXTRACT_VECTOR_ELT: {
4068 SDValue InVec = Op.getOperand(0);
4069 SDValue EltNo = Op.getOperand(1);
4070 EVT VecVT = InVec.getValueType();
4071 // ComputeNumSignBits not yet implemented for scalable vectors.
4072 if (VecVT.isScalableVector())
4073 break;
4074 const unsigned BitWidth = Op.getValueSizeInBits();
4075 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
4076 const unsigned NumSrcElts = VecVT.getVectorNumElements();
4077
4078 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
4079 // anything about sign bits. But if the sizes match we can derive knowledge
4080 // about sign bits from the vector operand.
4081 if (BitWidth != EltBitWidth)
4082 break;
4083
4084 // If we know the element index, just demand that vector element, else for
4085 // an unknown element index, ignore DemandedElts and demand them all.
4086 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
4087 auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
4088 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
4089 DemandedSrcElts =
4090 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
4091
4092 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
4093 }
4094 case ISD::EXTRACT_SUBVECTOR: {
4095 // Offset the demanded elts by the subvector index.
4096 SDValue Src = Op.getOperand(0);
4097 // Bail until we can represent demanded elements for scalable vectors.
4098 if (Src.getValueType().isScalableVector())
4099 break;
4100 uint64_t Idx = Op.getConstantOperandVal(1);
4101 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
4102 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
4103 return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4104 }
4105 case ISD::CONCAT_VECTORS: {
4106 // Determine the minimum number of sign bits across all demanded
4107 // elts of the input vectors. Early out if the result is already 1.
4108 Tmp = std::numeric_limits<unsigned>::max();
4109 EVT SubVectorVT = Op.getOperand(0).getValueType();
4110 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
4111 unsigned NumSubVectors = Op.getNumOperands();
4112 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
4113 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
4114 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
4115 if (!DemandedSub)
4116 continue;
4117 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
4118 Tmp = std::min(Tmp, Tmp2);
4119 }
4120 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4121 return Tmp;
4122 }
4123 case ISD::INSERT_SUBVECTOR: {
4124 // Demand any elements from the subvector and the remainder from the src its
4125 // inserted into.
4126 SDValue Src = Op.getOperand(0);
4127 SDValue Sub = Op.getOperand(1);
4128 uint64_t Idx = Op.getConstantOperandVal(2);
4129 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
4130 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
4131 APInt DemandedSrcElts = DemandedElts;
4132 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx);
4133
4134 Tmp = std::numeric_limits<unsigned>::max();
4135 if (!!DemandedSubElts) {
4136 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
4137 if (Tmp == 1)
4138 return 1; // early-out
4139 }
4140 if (!!DemandedSrcElts) {
4141 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
4142 Tmp = std::min(Tmp, Tmp2);
4143 }
4144 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
4145 return Tmp;
4146 }
4147 case ISD::ATOMIC_CMP_SWAP:
4148 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
4149 case ISD::ATOMIC_SWAP:
4150 case ISD::ATOMIC_LOAD_ADD:
4151 case ISD::ATOMIC_LOAD_SUB:
4152 case ISD::ATOMIC_LOAD_AND:
4153 case ISD::ATOMIC_LOAD_CLR:
4154 case ISD::ATOMIC_LOAD_OR:
4155 case ISD::ATOMIC_LOAD_XOR:
4156 case ISD::ATOMIC_LOAD_NAND:
4157 case ISD::ATOMIC_LOAD_MIN:
4158 case ISD::ATOMIC_LOAD_MAX:
4159 case ISD::ATOMIC_LOAD_UMIN:
4160 case ISD::ATOMIC_LOAD_UMAX:
4161 case ISD::ATOMIC_LOAD: {
4162 Tmp = cast<AtomicSDNode>(Op)->getMemoryVT().getScalarSizeInBits();
4163 // If we are looking at the loaded value.
4164 if (Op.getResNo() == 0) {
4165 if (Tmp == VTBits)
4166 return 1; // early-out
4167 if (TLI->getExtendForAtomicOps() == ISD::SIGN_EXTEND)
4168 return VTBits - Tmp + 1;
4169 if (TLI->getExtendForAtomicOps() == ISD::ZERO_EXTEND)
4170 return VTBits - Tmp;
4171 }
4172 break;
4173 }
4174 }
4175
4176 // If we are looking at the loaded value of the SDNode.
4177 if (Op.getResNo() == 0) {
4178 // Handle LOADX separately here. EXTLOAD case will fallthrough.
4179 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
4180 unsigned ExtType = LD->getExtensionType();
4181 switch (ExtType) {
4182 default: break;
4183 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
4184 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4185 return VTBits - Tmp + 1;
4186 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
4187 Tmp = LD->getMemoryVT().getScalarSizeInBits();
4188 return VTBits - Tmp;
4189 case ISD::NON_EXTLOAD:
4190 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
4191 // We only need to handle vectors - computeKnownBits should handle
4192 // scalar cases.
4193 Type *CstTy = Cst->getType();
4194 if (CstTy->isVectorTy() &&
4195 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
4196 Tmp = VTBits;
4197 for (unsigned i = 0; i != NumElts; ++i) {
4198 if (!DemandedElts[i])
4199 continue;
4200 if (Constant *Elt = Cst->getAggregateElement(i)) {
4201 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4202 const APInt &Value = CInt->getValue();
4203 Tmp = std::min(Tmp, Value.getNumSignBits());
4204 continue;
4205 }
4206 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4207 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4208 Tmp = std::min(Tmp, Value.getNumSignBits());
4209 continue;
4210 }
4211 }
4212 // Unknown type. Conservatively assume no bits match sign bit.
4213 return 1;
4214 }
4215 return Tmp;
4216 }
4217 }
4218 break;
4219 }
4220 }
4221 }
4222
4223 // Allow the target to implement this method for its nodes.
4224 if (Opcode >= ISD::BUILTIN_OP_END ||
4225 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4226 Opcode == ISD::INTRINSIC_W_CHAIN ||
4227 Opcode == ISD::INTRINSIC_VOID) {
4228 unsigned NumBits =
4229 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
4230 if (NumBits > 1)
4231 FirstAnswer = std::max(FirstAnswer, NumBits);
4232 }
4233
4234 // Finally, if we can prove that the top bits of the result are 0's or 1's,
4235 // use this information.
4236 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4237
4238 APInt Mask;
4239 if (Known.isNonNegative()) { // sign bit is 0
4240 Mask = Known.Zero;
4241 } else if (Known.isNegative()) { // sign bit is 1;
4242 Mask = Known.One;
4243 } else {
4244 // Nothing known.
4245 return FirstAnswer;
4246 }
4247
4248 // Okay, we know that the sign bit in Mask is set. Use CLO to determine
4249 // the number of identical bits in the top of the input value.
4250 Mask <<= Mask.getBitWidth()-VTBits;
4251 return std::max(FirstAnswer, Mask.countLeadingOnes());
4252 }
4253
isBaseWithConstantOffset(SDValue Op) const4254 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
4255 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
4256 !isa<ConstantSDNode>(Op.getOperand(1)))
4257 return false;
4258
4259 if (Op.getOpcode() == ISD::OR &&
4260 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
4261 return false;
4262
4263 return true;
4264 }
4265
isKnownNeverNaN(SDValue Op,bool SNaN,unsigned Depth) const4266 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
4267 // If we're told that NaNs won't happen, assume they won't.
4268 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
4269 return true;
4270
4271 if (Depth >= MaxRecursionDepth)
4272 return false; // Limit search depth.
4273
4274 // TODO: Handle vectors.
4275 // If the value is a constant, we can obviously see if it is a NaN or not.
4276 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
4277 return !C->getValueAPF().isNaN() ||
4278 (SNaN && !C->getValueAPF().isSignaling());
4279 }
4280
4281 unsigned Opcode = Op.getOpcode();
4282 switch (Opcode) {
4283 case ISD::FADD:
4284 case ISD::FSUB:
4285 case ISD::FMUL:
4286 case ISD::FDIV:
4287 case ISD::FREM:
4288 case ISD::FSIN:
4289 case ISD::FCOS: {
4290 if (SNaN)
4291 return true;
4292 // TODO: Need isKnownNeverInfinity
4293 return false;
4294 }
4295 case ISD::FCANONICALIZE:
4296 case ISD::FEXP:
4297 case ISD::FEXP2:
4298 case ISD::FTRUNC:
4299 case ISD::FFLOOR:
4300 case ISD::FCEIL:
4301 case ISD::FROUND:
4302 case ISD::FROUNDEVEN:
4303 case ISD::FRINT:
4304 case ISD::FNEARBYINT: {
4305 if (SNaN)
4306 return true;
4307 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4308 }
4309 case ISD::FABS:
4310 case ISD::FNEG:
4311 case ISD::FCOPYSIGN: {
4312 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4313 }
4314 case ISD::SELECT:
4315 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4316 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4317 case ISD::FP_EXTEND:
4318 case ISD::FP_ROUND: {
4319 if (SNaN)
4320 return true;
4321 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4322 }
4323 case ISD::SINT_TO_FP:
4324 case ISD::UINT_TO_FP:
4325 return true;
4326 case ISD::FMA:
4327 case ISD::FMAD: {
4328 if (SNaN)
4329 return true;
4330 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4331 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4332 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4333 }
4334 case ISD::FSQRT: // Need is known positive
4335 case ISD::FLOG:
4336 case ISD::FLOG2:
4337 case ISD::FLOG10:
4338 case ISD::FPOWI:
4339 case ISD::FPOW: {
4340 if (SNaN)
4341 return true;
4342 // TODO: Refine on operand
4343 return false;
4344 }
4345 case ISD::FMINNUM:
4346 case ISD::FMAXNUM: {
4347 // Only one needs to be known not-nan, since it will be returned if the
4348 // other ends up being one.
4349 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4350 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4351 }
4352 case ISD::FMINNUM_IEEE:
4353 case ISD::FMAXNUM_IEEE: {
4354 if (SNaN)
4355 return true;
4356 // This can return a NaN if either operand is an sNaN, or if both operands
4357 // are NaN.
4358 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4359 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4360 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4361 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4362 }
4363 case ISD::FMINIMUM:
4364 case ISD::FMAXIMUM: {
4365 // TODO: Does this quiet or return the origina NaN as-is?
4366 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4367 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4368 }
4369 case ISD::EXTRACT_VECTOR_ELT: {
4370 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4371 }
4372 default:
4373 if (Opcode >= ISD::BUILTIN_OP_END ||
4374 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4375 Opcode == ISD::INTRINSIC_W_CHAIN ||
4376 Opcode == ISD::INTRINSIC_VOID) {
4377 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4378 }
4379
4380 return false;
4381 }
4382 }
4383
isKnownNeverZeroFloat(SDValue Op) const4384 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4385 assert(Op.getValueType().isFloatingPoint() &&
4386 "Floating point type expected");
4387
4388 // If the value is a constant, we can obviously see if it is a zero or not.
4389 // TODO: Add BuildVector support.
4390 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4391 return !C->isZero();
4392 return false;
4393 }
4394
isKnownNeverZero(SDValue Op) const4395 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4396 assert(!Op.getValueType().isFloatingPoint() &&
4397 "Floating point types unsupported - use isKnownNeverZeroFloat");
4398
4399 // If the value is a constant, we can obviously see if it is a zero or not.
4400 if (ISD::matchUnaryPredicate(
4401 Op, [](ConstantSDNode *C) { return !C->isNullValue(); }))
4402 return true;
4403
4404 // TODO: Recognize more cases here.
4405 switch (Op.getOpcode()) {
4406 default: break;
4407 case ISD::OR:
4408 if (isKnownNeverZero(Op.getOperand(1)) ||
4409 isKnownNeverZero(Op.getOperand(0)))
4410 return true;
4411 break;
4412 }
4413
4414 return false;
4415 }
4416
isEqualTo(SDValue A,SDValue B) const4417 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4418 // Check the obvious case.
4419 if (A == B) return true;
4420
4421 // For for negative and positive zero.
4422 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4423 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4424 if (CA->isZero() && CB->isZero()) return true;
4425
4426 // Otherwise they may not be equal.
4427 return false;
4428 }
4429
4430 // FIXME: unify with llvm::haveNoCommonBitsSet.
4431 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
haveNoCommonBitsSet(SDValue A,SDValue B) const4432 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4433 assert(A.getValueType() == B.getValueType() &&
4434 "Values must have the same type");
4435 return KnownBits::haveNoCommonBitsSet(computeKnownBits(A),
4436 computeKnownBits(B));
4437 }
4438
FoldSTEP_VECTOR(const SDLoc & DL,EVT VT,SDValue Step,SelectionDAG & DAG)4439 static SDValue FoldSTEP_VECTOR(const SDLoc &DL, EVT VT, SDValue Step,
4440 SelectionDAG &DAG) {
4441 if (cast<ConstantSDNode>(Step)->isNullValue())
4442 return DAG.getConstant(0, DL, VT);
4443
4444 return SDValue();
4445 }
4446
FoldBUILD_VECTOR(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4447 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4448 ArrayRef<SDValue> Ops,
4449 SelectionDAG &DAG) {
4450 int NumOps = Ops.size();
4451 assert(NumOps != 0 && "Can't build an empty vector!");
4452 assert(!VT.isScalableVector() &&
4453 "BUILD_VECTOR cannot be used with scalable types");
4454 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
4455 "Incorrect element count in BUILD_VECTOR!");
4456
4457 // BUILD_VECTOR of UNDEFs is UNDEF.
4458 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4459 return DAG.getUNDEF(VT);
4460
4461 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4462 SDValue IdentitySrc;
4463 bool IsIdentity = true;
4464 for (int i = 0; i != NumOps; ++i) {
4465 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4466 Ops[i].getOperand(0).getValueType() != VT ||
4467 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4468 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4469 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4470 IsIdentity = false;
4471 break;
4472 }
4473 IdentitySrc = Ops[i].getOperand(0);
4474 }
4475 if (IsIdentity)
4476 return IdentitySrc;
4477
4478 return SDValue();
4479 }
4480
4481 /// Try to simplify vector concatenation to an input value, undef, or build
4482 /// vector.
foldCONCAT_VECTORS(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4483 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4484 ArrayRef<SDValue> Ops,
4485 SelectionDAG &DAG) {
4486 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
4487 assert(llvm::all_of(Ops,
4488 [Ops](SDValue Op) {
4489 return Ops[0].getValueType() == Op.getValueType();
4490 }) &&
4491 "Concatenation of vectors with inconsistent value types!");
4492 assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) ==
4493 VT.getVectorElementCount() &&
4494 "Incorrect element count in vector concatenation!");
4495
4496 if (Ops.size() == 1)
4497 return Ops[0];
4498
4499 // Concat of UNDEFs is UNDEF.
4500 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4501 return DAG.getUNDEF(VT);
4502
4503 // Scan the operands and look for extract operations from a single source
4504 // that correspond to insertion at the same location via this concatenation:
4505 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4506 SDValue IdentitySrc;
4507 bool IsIdentity = true;
4508 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4509 SDValue Op = Ops[i];
4510 unsigned IdentityIndex = i * Op.getValueType().getVectorMinNumElements();
4511 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4512 Op.getOperand(0).getValueType() != VT ||
4513 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4514 Op.getConstantOperandVal(1) != IdentityIndex) {
4515 IsIdentity = false;
4516 break;
4517 }
4518 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
4519 "Unexpected identity source vector for concat of extracts");
4520 IdentitySrc = Op.getOperand(0);
4521 }
4522 if (IsIdentity) {
4523 assert(IdentitySrc && "Failed to set source vector of extracts");
4524 return IdentitySrc;
4525 }
4526
4527 // The code below this point is only designed to work for fixed width
4528 // vectors, so we bail out for now.
4529 if (VT.isScalableVector())
4530 return SDValue();
4531
4532 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4533 // simplified to one big BUILD_VECTOR.
4534 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4535 EVT SVT = VT.getScalarType();
4536 SmallVector<SDValue, 16> Elts;
4537 for (SDValue Op : Ops) {
4538 EVT OpVT = Op.getValueType();
4539 if (Op.isUndef())
4540 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4541 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4542 Elts.append(Op->op_begin(), Op->op_end());
4543 else
4544 return SDValue();
4545 }
4546
4547 // BUILD_VECTOR requires all inputs to be of the same type, find the
4548 // maximum type and extend them all.
4549 for (SDValue Op : Elts)
4550 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4551
4552 if (SVT.bitsGT(VT.getScalarType())) {
4553 for (SDValue &Op : Elts) {
4554 if (Op.isUndef())
4555 Op = DAG.getUNDEF(SVT);
4556 else
4557 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4558 ? DAG.getZExtOrTrunc(Op, DL, SVT)
4559 : DAG.getSExtOrTrunc(Op, DL, SVT);
4560 }
4561 }
4562
4563 SDValue V = DAG.getBuildVector(VT, DL, Elts);
4564 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4565 return V;
4566 }
4567
4568 /// Gets or creates the specified node.
getNode(unsigned Opcode,const SDLoc & DL,EVT VT)4569 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4570 FoldingSetNodeID ID;
4571 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4572 void *IP = nullptr;
4573 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4574 return SDValue(E, 0);
4575
4576 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4577 getVTList(VT));
4578 CSEMap.InsertNode(N, IP);
4579
4580 InsertNode(N);
4581 SDValue V = SDValue(N, 0);
4582 NewSDValueDbgMsg(V, "Creating new node: ", this);
4583 return V;
4584 }
4585
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue Operand)4586 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4587 SDValue Operand) {
4588 SDNodeFlags Flags;
4589 if (Inserter)
4590 Flags = Inserter->getFlags();
4591 return getNode(Opcode, DL, VT, Operand, Flags);
4592 }
4593
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue Operand,const SDNodeFlags Flags)4594 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4595 SDValue Operand, const SDNodeFlags Flags) {
4596 assert(Operand.getOpcode() != ISD::DELETED_NODE &&
4597 "Operand is DELETED_NODE!");
4598 // Constant fold unary operations with an integer constant operand. Even
4599 // opaque constant will be folded, because the folding of unary operations
4600 // doesn't create new constants with different values. Nevertheless, the
4601 // opaque flag is preserved during folding to prevent future folding with
4602 // other constants.
4603 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4604 const APInt &Val = C->getAPIntValue();
4605 switch (Opcode) {
4606 default: break;
4607 case ISD::SIGN_EXTEND:
4608 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4609 C->isTargetOpcode(), C->isOpaque());
4610 case ISD::TRUNCATE:
4611 if (C->isOpaque())
4612 break;
4613 LLVM_FALLTHROUGH;
4614 case ISD::ANY_EXTEND:
4615 case ISD::ZERO_EXTEND:
4616 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4617 C->isTargetOpcode(), C->isOpaque());
4618 case ISD::UINT_TO_FP:
4619 case ISD::SINT_TO_FP: {
4620 APFloat apf(EVTToAPFloatSemantics(VT),
4621 APInt::getNullValue(VT.getSizeInBits()));
4622 (void)apf.convertFromAPInt(Val,
4623 Opcode==ISD::SINT_TO_FP,
4624 APFloat::rmNearestTiesToEven);
4625 return getConstantFP(apf, DL, VT);
4626 }
4627 case ISD::BITCAST:
4628 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4629 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4630 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4631 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4632 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4633 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4634 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4635 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4636 break;
4637 case ISD::ABS:
4638 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4639 C->isOpaque());
4640 case ISD::BITREVERSE:
4641 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4642 C->isOpaque());
4643 case ISD::BSWAP:
4644 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4645 C->isOpaque());
4646 case ISD::CTPOP:
4647 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4648 C->isOpaque());
4649 case ISD::CTLZ:
4650 case ISD::CTLZ_ZERO_UNDEF:
4651 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4652 C->isOpaque());
4653 case ISD::CTTZ:
4654 case ISD::CTTZ_ZERO_UNDEF:
4655 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4656 C->isOpaque());
4657 case ISD::FP16_TO_FP: {
4658 bool Ignored;
4659 APFloat FPV(APFloat::IEEEhalf(),
4660 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4661
4662 // This can return overflow, underflow, or inexact; we don't care.
4663 // FIXME need to be more flexible about rounding mode.
4664 (void)FPV.convert(EVTToAPFloatSemantics(VT),
4665 APFloat::rmNearestTiesToEven, &Ignored);
4666 return getConstantFP(FPV, DL, VT);
4667 }
4668 case ISD::STEP_VECTOR: {
4669 if (SDValue V = FoldSTEP_VECTOR(DL, VT, Operand, *this))
4670 return V;
4671 break;
4672 }
4673 }
4674 }
4675
4676 // Constant fold unary operations with a floating point constant operand.
4677 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4678 APFloat V = C->getValueAPF(); // make copy
4679 switch (Opcode) {
4680 case ISD::FNEG:
4681 V.changeSign();
4682 return getConstantFP(V, DL, VT);
4683 case ISD::FABS:
4684 V.clearSign();
4685 return getConstantFP(V, DL, VT);
4686 case ISD::FCEIL: {
4687 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4688 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4689 return getConstantFP(V, DL, VT);
4690 break;
4691 }
4692 case ISD::FTRUNC: {
4693 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4694 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4695 return getConstantFP(V, DL, VT);
4696 break;
4697 }
4698 case ISD::FFLOOR: {
4699 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4700 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4701 return getConstantFP(V, DL, VT);
4702 break;
4703 }
4704 case ISD::FP_EXTEND: {
4705 bool ignored;
4706 // This can return overflow, underflow, or inexact; we don't care.
4707 // FIXME need to be more flexible about rounding mode.
4708 (void)V.convert(EVTToAPFloatSemantics(VT),
4709 APFloat::rmNearestTiesToEven, &ignored);
4710 return getConstantFP(V, DL, VT);
4711 }
4712 case ISD::FP_TO_SINT:
4713 case ISD::FP_TO_UINT: {
4714 bool ignored;
4715 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4716 // FIXME need to be more flexible about rounding mode.
4717 APFloat::opStatus s =
4718 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4719 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4720 break;
4721 return getConstant(IntVal, DL, VT);
4722 }
4723 case ISD::BITCAST:
4724 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4725 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4726 if (VT == MVT::i16 && C->getValueType(0) == MVT::bf16)
4727 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4728 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4729 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4730 if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4731 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4732 break;
4733 case ISD::FP_TO_FP16: {
4734 bool Ignored;
4735 // This can return overflow, underflow, or inexact; we don't care.
4736 // FIXME need to be more flexible about rounding mode.
4737 (void)V.convert(APFloat::IEEEhalf(),
4738 APFloat::rmNearestTiesToEven, &Ignored);
4739 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4740 }
4741 }
4742 }
4743
4744 // Constant fold unary operations with a vector integer or float operand.
4745 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
4746 if (BV->isConstant()) {
4747 switch (Opcode) {
4748 default:
4749 // FIXME: Entirely reasonable to perform folding of other unary
4750 // operations here as the need arises.
4751 break;
4752 case ISD::FNEG:
4753 case ISD::FABS:
4754 case ISD::FCEIL:
4755 case ISD::FTRUNC:
4756 case ISD::FFLOOR:
4757 case ISD::FP_EXTEND:
4758 case ISD::FP_TO_SINT:
4759 case ISD::FP_TO_UINT:
4760 case ISD::TRUNCATE:
4761 case ISD::ANY_EXTEND:
4762 case ISD::ZERO_EXTEND:
4763 case ISD::SIGN_EXTEND:
4764 case ISD::UINT_TO_FP:
4765 case ISD::SINT_TO_FP:
4766 case ISD::ABS:
4767 case ISD::BITREVERSE:
4768 case ISD::BSWAP:
4769 case ISD::CTLZ:
4770 case ISD::CTLZ_ZERO_UNDEF:
4771 case ISD::CTTZ:
4772 case ISD::CTTZ_ZERO_UNDEF:
4773 case ISD::CTPOP: {
4774 SDValue Ops = { Operand };
4775 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4776 return Fold;
4777 }
4778 }
4779 }
4780 }
4781
4782 unsigned OpOpcode = Operand.getNode()->getOpcode();
4783 switch (Opcode) {
4784 case ISD::STEP_VECTOR:
4785 assert(VT.isScalableVector() &&
4786 "STEP_VECTOR can only be used with scalable types");
4787 assert(VT.getScalarSizeInBits() >= 8 &&
4788 "STEP_VECTOR can only be used with vectors of integers that are at "
4789 "least 8 bits wide");
4790 assert(isa<ConstantSDNode>(Operand) &&
4791 cast<ConstantSDNode>(Operand)->getAPIntValue().isSignedIntN(
4792 VT.getScalarSizeInBits()) &&
4793 "Expected STEP_VECTOR integer constant to fit in "
4794 "the vector element type");
4795 break;
4796 case ISD::FREEZE:
4797 assert(VT == Operand.getValueType() && "Unexpected VT!");
4798 break;
4799 case ISD::TokenFactor:
4800 case ISD::MERGE_VALUES:
4801 case ISD::CONCAT_VECTORS:
4802 return Operand; // Factor, merge or concat of one node? No need.
4803 case ISD::BUILD_VECTOR: {
4804 // Attempt to simplify BUILD_VECTOR.
4805 SDValue Ops[] = {Operand};
4806 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4807 return V;
4808 break;
4809 }
4810 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
4811 case ISD::FP_EXTEND:
4812 assert(VT.isFloatingPoint() &&
4813 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
4814 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4815 assert((!VT.isVector() ||
4816 VT.getVectorElementCount() ==
4817 Operand.getValueType().getVectorElementCount()) &&
4818 "Vector element count mismatch!");
4819 assert(Operand.getValueType().bitsLT(VT) &&
4820 "Invalid fpext node, dst < src!");
4821 if (Operand.isUndef())
4822 return getUNDEF(VT);
4823 break;
4824 case ISD::FP_TO_SINT:
4825 case ISD::FP_TO_UINT:
4826 if (Operand.isUndef())
4827 return getUNDEF(VT);
4828 break;
4829 case ISD::SINT_TO_FP:
4830 case ISD::UINT_TO_FP:
4831 // [us]itofp(undef) = 0, because the result value is bounded.
4832 if (Operand.isUndef())
4833 return getConstantFP(0.0, DL, VT);
4834 break;
4835 case ISD::SIGN_EXTEND:
4836 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4837 "Invalid SIGN_EXTEND!");
4838 assert(VT.isVector() == Operand.getValueType().isVector() &&
4839 "SIGN_EXTEND result type type should be vector iff the operand "
4840 "type is vector!");
4841 if (Operand.getValueType() == VT) return Operand; // noop extension
4842 assert((!VT.isVector() ||
4843 VT.getVectorElementCount() ==
4844 Operand.getValueType().getVectorElementCount()) &&
4845 "Vector element count mismatch!");
4846 assert(Operand.getValueType().bitsLT(VT) &&
4847 "Invalid sext node, dst < src!");
4848 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4849 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4850 if (OpOpcode == ISD::UNDEF)
4851 // sext(undef) = 0, because the top bits will all be the same.
4852 return getConstant(0, DL, VT);
4853 break;
4854 case ISD::ZERO_EXTEND:
4855 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4856 "Invalid ZERO_EXTEND!");
4857 assert(VT.isVector() == Operand.getValueType().isVector() &&
4858 "ZERO_EXTEND result type type should be vector iff the operand "
4859 "type is vector!");
4860 if (Operand.getValueType() == VT) return Operand; // noop extension
4861 assert((!VT.isVector() ||
4862 VT.getVectorElementCount() ==
4863 Operand.getValueType().getVectorElementCount()) &&
4864 "Vector element count mismatch!");
4865 assert(Operand.getValueType().bitsLT(VT) &&
4866 "Invalid zext node, dst < src!");
4867 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
4868 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4869 if (OpOpcode == ISD::UNDEF)
4870 // zext(undef) = 0, because the top bits will be zero.
4871 return getConstant(0, DL, VT);
4872 break;
4873 case ISD::ANY_EXTEND:
4874 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4875 "Invalid ANY_EXTEND!");
4876 assert(VT.isVector() == Operand.getValueType().isVector() &&
4877 "ANY_EXTEND result type type should be vector iff the operand "
4878 "type is vector!");
4879 if (Operand.getValueType() == VT) return Operand; // noop extension
4880 assert((!VT.isVector() ||
4881 VT.getVectorElementCount() ==
4882 Operand.getValueType().getVectorElementCount()) &&
4883 "Vector element count mismatch!");
4884 assert(Operand.getValueType().bitsLT(VT) &&
4885 "Invalid anyext node, dst < src!");
4886
4887 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4888 OpOpcode == ISD::ANY_EXTEND)
4889 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4890 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4891 if (OpOpcode == ISD::UNDEF)
4892 return getUNDEF(VT);
4893
4894 // (ext (trunc x)) -> x
4895 if (OpOpcode == ISD::TRUNCATE) {
4896 SDValue OpOp = Operand.getOperand(0);
4897 if (OpOp.getValueType() == VT) {
4898 transferDbgValues(Operand, OpOp);
4899 return OpOp;
4900 }
4901 }
4902 break;
4903 case ISD::TRUNCATE:
4904 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4905 "Invalid TRUNCATE!");
4906 assert(VT.isVector() == Operand.getValueType().isVector() &&
4907 "TRUNCATE result type type should be vector iff the operand "
4908 "type is vector!");
4909 if (Operand.getValueType() == VT) return Operand; // noop truncate
4910 assert((!VT.isVector() ||
4911 VT.getVectorElementCount() ==
4912 Operand.getValueType().getVectorElementCount()) &&
4913 "Vector element count mismatch!");
4914 assert(Operand.getValueType().bitsGT(VT) &&
4915 "Invalid truncate node, src < dst!");
4916 if (OpOpcode == ISD::TRUNCATE)
4917 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4918 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4919 OpOpcode == ISD::ANY_EXTEND) {
4920 // If the source is smaller than the dest, we still need an extend.
4921 if (Operand.getOperand(0).getValueType().getScalarType()
4922 .bitsLT(VT.getScalarType()))
4923 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4924 if (Operand.getOperand(0).getValueType().bitsGT(VT))
4925 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4926 return Operand.getOperand(0);
4927 }
4928 if (OpOpcode == ISD::UNDEF)
4929 return getUNDEF(VT);
4930 break;
4931 case ISD::ANY_EXTEND_VECTOR_INREG:
4932 case ISD::ZERO_EXTEND_VECTOR_INREG:
4933 case ISD::SIGN_EXTEND_VECTOR_INREG:
4934 assert(VT.isVector() && "This DAG node is restricted to vector types.");
4935 assert(Operand.getValueType().bitsLE(VT) &&
4936 "The input must be the same size or smaller than the result.");
4937 assert(VT.getVectorMinNumElements() <
4938 Operand.getValueType().getVectorMinNumElements() &&
4939 "The destination vector type must have fewer lanes than the input.");
4940 break;
4941 case ISD::ABS:
4942 assert(VT.isInteger() && VT == Operand.getValueType() &&
4943 "Invalid ABS!");
4944 if (OpOpcode == ISD::UNDEF)
4945 return getUNDEF(VT);
4946 break;
4947 case ISD::BSWAP:
4948 assert(VT.isInteger() && VT == Operand.getValueType() &&
4949 "Invalid BSWAP!");
4950 assert((VT.getScalarSizeInBits() % 16 == 0) &&
4951 "BSWAP types must be a multiple of 16 bits!");
4952 if (OpOpcode == ISD::UNDEF)
4953 return getUNDEF(VT);
4954 break;
4955 case ISD::BITREVERSE:
4956 assert(VT.isInteger() && VT == Operand.getValueType() &&
4957 "Invalid BITREVERSE!");
4958 if (OpOpcode == ISD::UNDEF)
4959 return getUNDEF(VT);
4960 break;
4961 case ISD::BITCAST:
4962 // Basic sanity checking.
4963 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
4964 "Cannot BITCAST between types of different sizes!");
4965 if (VT == Operand.getValueType()) return Operand; // noop conversion.
4966 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
4967 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
4968 if (OpOpcode == ISD::UNDEF)
4969 return getUNDEF(VT);
4970 break;
4971 case ISD::SCALAR_TO_VECTOR:
4972 assert(VT.isVector() && !Operand.getValueType().isVector() &&
4973 (VT.getVectorElementType() == Operand.getValueType() ||
4974 (VT.getVectorElementType().isInteger() &&
4975 Operand.getValueType().isInteger() &&
4976 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
4977 "Illegal SCALAR_TO_VECTOR node!");
4978 if (OpOpcode == ISD::UNDEF)
4979 return getUNDEF(VT);
4980 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
4981 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
4982 isa<ConstantSDNode>(Operand.getOperand(1)) &&
4983 Operand.getConstantOperandVal(1) == 0 &&
4984 Operand.getOperand(0).getValueType() == VT)
4985 return Operand.getOperand(0);
4986 break;
4987 case ISD::FNEG:
4988 // Negation of an unknown bag of bits is still completely undefined.
4989 if (OpOpcode == ISD::UNDEF)
4990 return getUNDEF(VT);
4991
4992 if (OpOpcode == ISD::FNEG) // --X -> X
4993 return Operand.getOperand(0);
4994 break;
4995 case ISD::FABS:
4996 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
4997 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
4998 break;
4999 case ISD::VSCALE:
5000 assert(VT == Operand.getValueType() && "Unexpected VT!");
5001 break;
5002 case ISD::CTPOP:
5003 if (Operand.getValueType().getScalarType() == MVT::i1)
5004 return Operand;
5005 break;
5006 case ISD::CTLZ:
5007 case ISD::CTTZ:
5008 if (Operand.getValueType().getScalarType() == MVT::i1)
5009 return getNOT(DL, Operand, Operand.getValueType());
5010 break;
5011 case ISD::VECREDUCE_SMIN:
5012 case ISD::VECREDUCE_UMAX:
5013 if (Operand.getValueType().getScalarType() == MVT::i1)
5014 return getNode(ISD::VECREDUCE_OR, DL, VT, Operand);
5015 break;
5016 case ISD::VECREDUCE_SMAX:
5017 case ISD::VECREDUCE_UMIN:
5018 if (Operand.getValueType().getScalarType() == MVT::i1)
5019 return getNode(ISD::VECREDUCE_AND, DL, VT, Operand);
5020 break;
5021 }
5022
5023 SDNode *N;
5024 SDVTList VTs = getVTList(VT);
5025 SDValue Ops[] = {Operand};
5026 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
5027 FoldingSetNodeID ID;
5028 AddNodeIDNode(ID, Opcode, VTs, Ops);
5029 void *IP = nullptr;
5030 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5031 E->intersectFlagsWith(Flags);
5032 return SDValue(E, 0);
5033 }
5034
5035 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5036 N->setFlags(Flags);
5037 createOperands(N, Ops);
5038 CSEMap.InsertNode(N, IP);
5039 } else {
5040 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5041 createOperands(N, Ops);
5042 }
5043
5044 InsertNode(N);
5045 SDValue V = SDValue(N, 0);
5046 NewSDValueDbgMsg(V, "Creating new node: ", this);
5047 return V;
5048 }
5049
FoldValue(unsigned Opcode,const APInt & C1,const APInt & C2)5050 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
5051 const APInt &C2) {
5052 switch (Opcode) {
5053 case ISD::ADD: return C1 + C2;
5054 case ISD::SUB: return C1 - C2;
5055 case ISD::MUL: return C1 * C2;
5056 case ISD::AND: return C1 & C2;
5057 case ISD::OR: return C1 | C2;
5058 case ISD::XOR: return C1 ^ C2;
5059 case ISD::SHL: return C1 << C2;
5060 case ISD::SRL: return C1.lshr(C2);
5061 case ISD::SRA: return C1.ashr(C2);
5062 case ISD::ROTL: return C1.rotl(C2);
5063 case ISD::ROTR: return C1.rotr(C2);
5064 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
5065 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
5066 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
5067 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
5068 case ISD::SADDSAT: return C1.sadd_sat(C2);
5069 case ISD::UADDSAT: return C1.uadd_sat(C2);
5070 case ISD::SSUBSAT: return C1.ssub_sat(C2);
5071 case ISD::USUBSAT: return C1.usub_sat(C2);
5072 case ISD::UDIV:
5073 if (!C2.getBoolValue())
5074 break;
5075 return C1.udiv(C2);
5076 case ISD::UREM:
5077 if (!C2.getBoolValue())
5078 break;
5079 return C1.urem(C2);
5080 case ISD::SDIV:
5081 if (!C2.getBoolValue())
5082 break;
5083 return C1.sdiv(C2);
5084 case ISD::SREM:
5085 if (!C2.getBoolValue())
5086 break;
5087 return C1.srem(C2);
5088 }
5089 return llvm::None;
5090 }
5091
FoldSymbolOffset(unsigned Opcode,EVT VT,const GlobalAddressSDNode * GA,const SDNode * N2)5092 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
5093 const GlobalAddressSDNode *GA,
5094 const SDNode *N2) {
5095 if (GA->getOpcode() != ISD::GlobalAddress)
5096 return SDValue();
5097 if (!TLI->isOffsetFoldingLegal(GA))
5098 return SDValue();
5099 auto *C2 = dyn_cast<ConstantSDNode>(N2);
5100 if (!C2)
5101 return SDValue();
5102 int64_t Offset = C2->getSExtValue();
5103 switch (Opcode) {
5104 case ISD::ADD: break;
5105 case ISD::SUB: Offset = -uint64_t(Offset); break;
5106 default: return SDValue();
5107 }
5108 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
5109 GA->getOffset() + uint64_t(Offset));
5110 }
5111
isUndef(unsigned Opcode,ArrayRef<SDValue> Ops)5112 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
5113 switch (Opcode) {
5114 case ISD::SDIV:
5115 case ISD::UDIV:
5116 case ISD::SREM:
5117 case ISD::UREM: {
5118 // If a divisor is zero/undef or any element of a divisor vector is
5119 // zero/undef, the whole op is undef.
5120 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
5121 SDValue Divisor = Ops[1];
5122 if (Divisor.isUndef() || isNullConstant(Divisor))
5123 return true;
5124
5125 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
5126 llvm::any_of(Divisor->op_values(),
5127 [](SDValue V) { return V.isUndef() ||
5128 isNullConstant(V); });
5129 // TODO: Handle signed overflow.
5130 }
5131 // TODO: Handle oversized shifts.
5132 default:
5133 return false;
5134 }
5135 }
5136
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops)5137 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
5138 EVT VT, ArrayRef<SDValue> Ops) {
5139 // If the opcode is a target-specific ISD node, there's nothing we can
5140 // do here and the operand rules may not line up with the below, so
5141 // bail early.
5142 // We can't create a scalar CONCAT_VECTORS so skip it. It will break
5143 // for concats involving SPLAT_VECTOR. Concats of BUILD_VECTORS are handled by
5144 // foldCONCAT_VECTORS in getNode before this is called.
5145 if (Opcode >= ISD::BUILTIN_OP_END || Opcode == ISD::CONCAT_VECTORS)
5146 return SDValue();
5147
5148 // For now, the array Ops should only contain two values.
5149 // This enforcement will be removed once this function is merged with
5150 // FoldConstantVectorArithmetic
5151 if (Ops.size() != 2)
5152 return SDValue();
5153
5154 if (isUndef(Opcode, Ops))
5155 return getUNDEF(VT);
5156
5157 SDNode *N1 = Ops[0].getNode();
5158 SDNode *N2 = Ops[1].getNode();
5159
5160 // Handle the case of two scalars.
5161 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) {
5162 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) {
5163 if (C1->isOpaque() || C2->isOpaque())
5164 return SDValue();
5165
5166 Optional<APInt> FoldAttempt =
5167 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue());
5168 if (!FoldAttempt)
5169 return SDValue();
5170
5171 SDValue Folded = getConstant(FoldAttempt.getValue(), DL, VT);
5172 assert((!Folded || !VT.isVector()) &&
5173 "Can't fold vectors ops with scalar operands");
5174 return Folded;
5175 }
5176 }
5177
5178 // fold (add Sym, c) -> Sym+c
5179 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1))
5180 return FoldSymbolOffset(Opcode, VT, GA, N2);
5181 if (TLI->isCommutativeBinOp(Opcode))
5182 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2))
5183 return FoldSymbolOffset(Opcode, VT, GA, N1);
5184
5185 // For fixed width vectors, extract each constant element and fold them
5186 // individually. Either input may be an undef value.
5187 bool IsBVOrSV1 = N1->getOpcode() == ISD::BUILD_VECTOR ||
5188 N1->getOpcode() == ISD::SPLAT_VECTOR;
5189 if (!IsBVOrSV1 && !N1->isUndef())
5190 return SDValue();
5191 bool IsBVOrSV2 = N2->getOpcode() == ISD::BUILD_VECTOR ||
5192 N2->getOpcode() == ISD::SPLAT_VECTOR;
5193 if (!IsBVOrSV2 && !N2->isUndef())
5194 return SDValue();
5195 // If both operands are undef, that's handled the same way as scalars.
5196 if (!IsBVOrSV1 && !IsBVOrSV2)
5197 return SDValue();
5198
5199 EVT SVT = VT.getScalarType();
5200 EVT LegalSVT = SVT;
5201 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5202 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5203 if (LegalSVT.bitsLT(SVT))
5204 return SDValue();
5205 }
5206
5207 SmallVector<SDValue, 4> Outputs;
5208 unsigned NumOps = 0;
5209 if (IsBVOrSV1)
5210 NumOps = std::max(NumOps, N1->getNumOperands());
5211 if (IsBVOrSV2)
5212 NumOps = std::max(NumOps, N2->getNumOperands());
5213 assert(NumOps != 0 && "Expected non-zero operands");
5214 // Scalable vectors should only be SPLAT_VECTOR or UNDEF here. We only need
5215 // one iteration for that.
5216 assert((!VT.isScalableVector() || NumOps == 1) &&
5217 "Scalar vector should only have one scalar");
5218
5219 for (unsigned I = 0; I != NumOps; ++I) {
5220 // We can have a fixed length SPLAT_VECTOR and a BUILD_VECTOR so we need
5221 // to use operand 0 of the SPLAT_VECTOR for each fixed element.
5222 SDValue V1;
5223 if (N1->getOpcode() == ISD::BUILD_VECTOR)
5224 V1 = N1->getOperand(I);
5225 else if (N1->getOpcode() == ISD::SPLAT_VECTOR)
5226 V1 = N1->getOperand(0);
5227 else
5228 V1 = getUNDEF(SVT);
5229
5230 SDValue V2;
5231 if (N2->getOpcode() == ISD::BUILD_VECTOR)
5232 V2 = N2->getOperand(I);
5233 else if (N2->getOpcode() == ISD::SPLAT_VECTOR)
5234 V2 = N2->getOperand(0);
5235 else
5236 V2 = getUNDEF(SVT);
5237
5238 if (SVT.isInteger()) {
5239 if (V1.getValueType().bitsGT(SVT))
5240 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
5241 if (V2.getValueType().bitsGT(SVT))
5242 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
5243 }
5244
5245 if (V1.getValueType() != SVT || V2.getValueType() != SVT)
5246 return SDValue();
5247
5248 // Fold one vector element.
5249 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
5250 if (LegalSVT != SVT)
5251 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5252
5253 // Scalar folding only succeeded if the result is a constant or UNDEF.
5254 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5255 ScalarResult.getOpcode() != ISD::ConstantFP)
5256 return SDValue();
5257 Outputs.push_back(ScalarResult);
5258 }
5259
5260 if (N1->getOpcode() == ISD::BUILD_VECTOR ||
5261 N2->getOpcode() == ISD::BUILD_VECTOR) {
5262 assert(VT.getVectorNumElements() == Outputs.size() &&
5263 "Vector size mismatch!");
5264
5265 // Build a big vector out of the scalar elements we generated.
5266 return getBuildVector(VT, SDLoc(), Outputs);
5267 }
5268
5269 assert((N1->getOpcode() == ISD::SPLAT_VECTOR ||
5270 N2->getOpcode() == ISD::SPLAT_VECTOR) &&
5271 "One operand should be a splat vector");
5272
5273 assert(Outputs.size() == 1 && "Vector size mismatch!");
5274 return getSplatVector(VT, SDLoc(), Outputs[0]);
5275 }
5276
5277 // TODO: Merge with FoldConstantArithmetic
FoldConstantVectorArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)5278 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
5279 const SDLoc &DL, EVT VT,
5280 ArrayRef<SDValue> Ops,
5281 const SDNodeFlags Flags) {
5282 // If the opcode is a target-specific ISD node, there's nothing we can
5283 // do here and the operand rules may not line up with the below, so
5284 // bail early.
5285 if (Opcode >= ISD::BUILTIN_OP_END)
5286 return SDValue();
5287
5288 if (isUndef(Opcode, Ops))
5289 return getUNDEF(VT);
5290
5291 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
5292 if (!VT.isVector())
5293 return SDValue();
5294
5295 // TODO: All the folds below are performed lane-by-lane and assume a fixed
5296 // vector width, however we should be able to do constant folds involving
5297 // splat vector nodes too.
5298 if (VT.isScalableVector())
5299 return SDValue();
5300
5301 // From this point onwards all vectors are assumed to be fixed width.
5302 unsigned NumElts = VT.getVectorNumElements();
5303
5304 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
5305 return !Op.getValueType().isVector() ||
5306 Op.getValueType().getVectorNumElements() == NumElts;
5307 };
5308
5309 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
5310 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
5311 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
5312 (BV && BV->isConstant());
5313 };
5314
5315 // All operands must be vector types with the same number of elements as
5316 // the result type and must be either UNDEF or a build vector of constant
5317 // or UNDEF scalars.
5318 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
5319 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
5320 return SDValue();
5321
5322 // If we are comparing vectors, then the result needs to be a i1 boolean
5323 // that is then sign-extended back to the legal result type.
5324 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
5325
5326 // Find legal integer scalar type for constant promotion and
5327 // ensure that its scalar size is at least as large as source.
5328 EVT LegalSVT = VT.getScalarType();
5329 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5330 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5331 if (LegalSVT.bitsLT(VT.getScalarType()))
5332 return SDValue();
5333 }
5334
5335 // Constant fold each scalar lane separately.
5336 SmallVector<SDValue, 4> ScalarResults;
5337 for (unsigned i = 0; i != NumElts; i++) {
5338 SmallVector<SDValue, 4> ScalarOps;
5339 for (SDValue Op : Ops) {
5340 EVT InSVT = Op.getValueType().getScalarType();
5341 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
5342 if (!InBV) {
5343 // We've checked that this is UNDEF or a constant of some kind.
5344 if (Op.isUndef())
5345 ScalarOps.push_back(getUNDEF(InSVT));
5346 else
5347 ScalarOps.push_back(Op);
5348 continue;
5349 }
5350
5351 SDValue ScalarOp = InBV->getOperand(i);
5352 EVT ScalarVT = ScalarOp.getValueType();
5353
5354 // Build vector (integer) scalar operands may need implicit
5355 // truncation - do this before constant folding.
5356 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
5357 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
5358
5359 ScalarOps.push_back(ScalarOp);
5360 }
5361
5362 // Constant fold the scalar operands.
5363 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
5364
5365 // Legalize the (integer) scalar constant if necessary.
5366 if (LegalSVT != SVT)
5367 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5368
5369 // Scalar folding only succeeded if the result is a constant or UNDEF.
5370 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5371 ScalarResult.getOpcode() != ISD::ConstantFP)
5372 return SDValue();
5373 ScalarResults.push_back(ScalarResult);
5374 }
5375
5376 SDValue V = getBuildVector(VT, DL, ScalarResults);
5377 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
5378 return V;
5379 }
5380
foldConstantFPMath(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2)5381 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
5382 EVT VT, SDValue N1, SDValue N2) {
5383 // TODO: We don't do any constant folding for strict FP opcodes here, but we
5384 // should. That will require dealing with a potentially non-default
5385 // rounding mode, checking the "opStatus" return value from the APFloat
5386 // math calculations, and possibly other variations.
5387 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
5388 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
5389 if (N1CFP && N2CFP) {
5390 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF();
5391 switch (Opcode) {
5392 case ISD::FADD:
5393 C1.add(C2, APFloat::rmNearestTiesToEven);
5394 return getConstantFP(C1, DL, VT);
5395 case ISD::FSUB:
5396 C1.subtract(C2, APFloat::rmNearestTiesToEven);
5397 return getConstantFP(C1, DL, VT);
5398 case ISD::FMUL:
5399 C1.multiply(C2, APFloat::rmNearestTiesToEven);
5400 return getConstantFP(C1, DL, VT);
5401 case ISD::FDIV:
5402 C1.divide(C2, APFloat::rmNearestTiesToEven);
5403 return getConstantFP(C1, DL, VT);
5404 case ISD::FREM:
5405 C1.mod(C2);
5406 return getConstantFP(C1, DL, VT);
5407 case ISD::FCOPYSIGN:
5408 C1.copySign(C2);
5409 return getConstantFP(C1, DL, VT);
5410 default: break;
5411 }
5412 }
5413 if (N1CFP && Opcode == ISD::FP_ROUND) {
5414 APFloat C1 = N1CFP->getValueAPF(); // make copy
5415 bool Unused;
5416 // This can return overflow, underflow, or inexact; we don't care.
5417 // FIXME need to be more flexible about rounding mode.
5418 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5419 &Unused);
5420 return getConstantFP(C1, DL, VT);
5421 }
5422
5423 switch (Opcode) {
5424 case ISD::FSUB:
5425 // -0.0 - undef --> undef (consistent with "fneg undef")
5426 if (N1CFP && N1CFP->getValueAPF().isNegZero() && N2.isUndef())
5427 return getUNDEF(VT);
5428 LLVM_FALLTHROUGH;
5429
5430 case ISD::FADD:
5431 case ISD::FMUL:
5432 case ISD::FDIV:
5433 case ISD::FREM:
5434 // If both operands are undef, the result is undef. If 1 operand is undef,
5435 // the result is NaN. This should match the behavior of the IR optimizer.
5436 if (N1.isUndef() && N2.isUndef())
5437 return getUNDEF(VT);
5438 if (N1.isUndef() || N2.isUndef())
5439 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5440 }
5441 return SDValue();
5442 }
5443
getAssertAlign(const SDLoc & DL,SDValue Val,Align A)5444 SDValue SelectionDAG::getAssertAlign(const SDLoc &DL, SDValue Val, Align A) {
5445 assert(Val.getValueType().isInteger() && "Invalid AssertAlign!");
5446
5447 // There's no need to assert on a byte-aligned pointer. All pointers are at
5448 // least byte aligned.
5449 if (A == Align(1))
5450 return Val;
5451
5452 FoldingSetNodeID ID;
5453 AddNodeIDNode(ID, ISD::AssertAlign, getVTList(Val.getValueType()), {Val});
5454 ID.AddInteger(A.value());
5455
5456 void *IP = nullptr;
5457 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
5458 return SDValue(E, 0);
5459
5460 auto *N = newSDNode<AssertAlignSDNode>(DL.getIROrder(), DL.getDebugLoc(),
5461 Val.getValueType(), A);
5462 createOperands(N, {Val});
5463
5464 CSEMap.InsertNode(N, IP);
5465 InsertNode(N);
5466
5467 SDValue V(N, 0);
5468 NewSDValueDbgMsg(V, "Creating new node: ", this);
5469 return V;
5470 }
5471
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2)5472 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5473 SDValue N1, SDValue N2) {
5474 SDNodeFlags Flags;
5475 if (Inserter)
5476 Flags = Inserter->getFlags();
5477 return getNode(Opcode, DL, VT, N1, N2, Flags);
5478 }
5479
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,const SDNodeFlags Flags)5480 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5481 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
5482 assert(N1.getOpcode() != ISD::DELETED_NODE &&
5483 N2.getOpcode() != ISD::DELETED_NODE &&
5484 "Operand is DELETED_NODE!");
5485 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
5486 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
5487 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5488 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5489
5490 // Canonicalize constant to RHS if commutative.
5491 if (TLI->isCommutativeBinOp(Opcode)) {
5492 if (N1C && !N2C) {
5493 std::swap(N1C, N2C);
5494 std::swap(N1, N2);
5495 } else if (N1CFP && !N2CFP) {
5496 std::swap(N1CFP, N2CFP);
5497 std::swap(N1, N2);
5498 }
5499 }
5500
5501 switch (Opcode) {
5502 default: break;
5503 case ISD::TokenFactor:
5504 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
5505 N2.getValueType() == MVT::Other && "Invalid token factor!");
5506 // Fold trivial token factors.
5507 if (N1.getOpcode() == ISD::EntryToken) return N2;
5508 if (N2.getOpcode() == ISD::EntryToken) return N1;
5509 if (N1 == N2) return N1;
5510 break;
5511 case ISD::BUILD_VECTOR: {
5512 // Attempt to simplify BUILD_VECTOR.
5513 SDValue Ops[] = {N1, N2};
5514 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5515 return V;
5516 break;
5517 }
5518 case ISD::CONCAT_VECTORS: {
5519 SDValue Ops[] = {N1, N2};
5520 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5521 return V;
5522 break;
5523 }
5524 case ISD::AND:
5525 assert(VT.isInteger() && "This operator does not apply to FP types!");
5526 assert(N1.getValueType() == N2.getValueType() &&
5527 N1.getValueType() == VT && "Binary operator types must match!");
5528 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
5529 // worth handling here.
5530 if (N2C && N2C->isNullValue())
5531 return N2;
5532 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
5533 return N1;
5534 break;
5535 case ISD::OR:
5536 case ISD::XOR:
5537 case ISD::ADD:
5538 case ISD::SUB:
5539 assert(VT.isInteger() && "This operator does not apply to FP types!");
5540 assert(N1.getValueType() == N2.getValueType() &&
5541 N1.getValueType() == VT && "Binary operator types must match!");
5542 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
5543 // it's worth handling here.
5544 if (N2C && N2C->isNullValue())
5545 return N1;
5546 if ((Opcode == ISD::ADD || Opcode == ISD::SUB) && VT.isVector() &&
5547 VT.getVectorElementType() == MVT::i1)
5548 return getNode(ISD::XOR, DL, VT, N1, N2);
5549 break;
5550 case ISD::MUL:
5551 assert(VT.isInteger() && "This operator does not apply to FP types!");
5552 assert(N1.getValueType() == N2.getValueType() &&
5553 N1.getValueType() == VT && "Binary operator types must match!");
5554 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5555 return getNode(ISD::AND, DL, VT, N1, N2);
5556 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5557 const APInt &MulImm = N1->getConstantOperandAPInt(0);
5558 const APInt &N2CImm = N2C->getAPIntValue();
5559 return getVScale(DL, VT, MulImm * N2CImm);
5560 }
5561 break;
5562 case ISD::UDIV:
5563 case ISD::UREM:
5564 case ISD::MULHU:
5565 case ISD::MULHS:
5566 case ISD::SDIV:
5567 case ISD::SREM:
5568 case ISD::SADDSAT:
5569 case ISD::SSUBSAT:
5570 case ISD::UADDSAT:
5571 case ISD::USUBSAT:
5572 assert(VT.isInteger() && "This operator does not apply to FP types!");
5573 assert(N1.getValueType() == N2.getValueType() &&
5574 N1.getValueType() == VT && "Binary operator types must match!");
5575 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
5576 // fold (add_sat x, y) -> (or x, y) for bool types.
5577 if (Opcode == ISD::SADDSAT || Opcode == ISD::UADDSAT)
5578 return getNode(ISD::OR, DL, VT, N1, N2);
5579 // fold (sub_sat x, y) -> (and x, ~y) for bool types.
5580 if (Opcode == ISD::SSUBSAT || Opcode == ISD::USUBSAT)
5581 return getNode(ISD::AND, DL, VT, N1, getNOT(DL, N2, VT));
5582 }
5583 break;
5584 case ISD::SMIN:
5585 case ISD::UMAX:
5586 assert(VT.isInteger() && "This operator does not apply to FP types!");
5587 assert(N1.getValueType() == N2.getValueType() &&
5588 N1.getValueType() == VT && "Binary operator types must match!");
5589 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5590 return getNode(ISD::OR, DL, VT, N1, N2);
5591 break;
5592 case ISD::SMAX:
5593 case ISD::UMIN:
5594 assert(VT.isInteger() && "This operator does not apply to FP types!");
5595 assert(N1.getValueType() == N2.getValueType() &&
5596 N1.getValueType() == VT && "Binary operator types must match!");
5597 if (VT.isVector() && VT.getVectorElementType() == MVT::i1)
5598 return getNode(ISD::AND, DL, VT, N1, N2);
5599 break;
5600 case ISD::FADD:
5601 case ISD::FSUB:
5602 case ISD::FMUL:
5603 case ISD::FDIV:
5604 case ISD::FREM:
5605 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5606 assert(N1.getValueType() == N2.getValueType() &&
5607 N1.getValueType() == VT && "Binary operator types must match!");
5608 if (SDValue V = simplifyFPBinop(Opcode, N1, N2, Flags))
5609 return V;
5610 break;
5611 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
5612 assert(N1.getValueType() == VT &&
5613 N1.getValueType().isFloatingPoint() &&
5614 N2.getValueType().isFloatingPoint() &&
5615 "Invalid FCOPYSIGN!");
5616 break;
5617 case ISD::SHL:
5618 if (N2C && (N1.getOpcode() == ISD::VSCALE) && Flags.hasNoSignedWrap()) {
5619 const APInt &MulImm = N1->getConstantOperandAPInt(0);
5620 const APInt &ShiftImm = N2C->getAPIntValue();
5621 return getVScale(DL, VT, MulImm << ShiftImm);
5622 }
5623 LLVM_FALLTHROUGH;
5624 case ISD::SRA:
5625 case ISD::SRL:
5626 if (SDValue V = simplifyShift(N1, N2))
5627 return V;
5628 LLVM_FALLTHROUGH;
5629 case ISD::ROTL:
5630 case ISD::ROTR:
5631 assert(VT == N1.getValueType() &&
5632 "Shift operators return type must be the same as their first arg");
5633 assert(VT.isInteger() && N2.getValueType().isInteger() &&
5634 "Shifts only work on integers");
5635 assert((!VT.isVector() || VT == N2.getValueType()) &&
5636 "Vector shift amounts must be in the same as their first arg");
5637 // Verify that the shift amount VT is big enough to hold valid shift
5638 // amounts. This catches things like trying to shift an i1024 value by an
5639 // i8, which is easy to fall into in generic code that uses
5640 // TLI.getShiftAmount().
5641 assert(N2.getValueType().getScalarSizeInBits() >=
5642 Log2_32_Ceil(VT.getScalarSizeInBits()) &&
5643 "Invalid use of small shift amount with oversized value!");
5644
5645 // Always fold shifts of i1 values so the code generator doesn't need to
5646 // handle them. Since we know the size of the shift has to be less than the
5647 // size of the value, the shift/rotate count is guaranteed to be zero.
5648 if (VT == MVT::i1)
5649 return N1;
5650 if (N2C && N2C->isNullValue())
5651 return N1;
5652 break;
5653 case ISD::FP_ROUND:
5654 assert(VT.isFloatingPoint() &&
5655 N1.getValueType().isFloatingPoint() &&
5656 VT.bitsLE(N1.getValueType()) &&
5657 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
5658 "Invalid FP_ROUND!");
5659 if (N1.getValueType() == VT) return N1; // noop conversion.
5660 break;
5661 case ISD::AssertSext:
5662 case ISD::AssertZext: {
5663 EVT EVT = cast<VTSDNode>(N2)->getVT();
5664 assert(VT == N1.getValueType() && "Not an inreg extend!");
5665 assert(VT.isInteger() && EVT.isInteger() &&
5666 "Cannot *_EXTEND_INREG FP types");
5667 assert(!EVT.isVector() &&
5668 "AssertSExt/AssertZExt type should be the vector element type "
5669 "rather than the vector type!");
5670 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
5671 if (VT.getScalarType() == EVT) return N1; // noop assertion.
5672 break;
5673 }
5674 case ISD::SIGN_EXTEND_INREG: {
5675 EVT EVT = cast<VTSDNode>(N2)->getVT();
5676 assert(VT == N1.getValueType() && "Not an inreg extend!");
5677 assert(VT.isInteger() && EVT.isInteger() &&
5678 "Cannot *_EXTEND_INREG FP types");
5679 assert(EVT.isVector() == VT.isVector() &&
5680 "SIGN_EXTEND_INREG type should be vector iff the operand "
5681 "type is vector!");
5682 assert((!EVT.isVector() ||
5683 EVT.getVectorElementCount() == VT.getVectorElementCount()) &&
5684 "Vector element counts must match in SIGN_EXTEND_INREG");
5685 assert(EVT.bitsLE(VT) && "Not extending!");
5686 if (EVT == VT) return N1; // Not actually extending
5687
5688 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
5689 unsigned FromBits = EVT.getScalarSizeInBits();
5690 Val <<= Val.getBitWidth() - FromBits;
5691 Val.ashrInPlace(Val.getBitWidth() - FromBits);
5692 return getConstant(Val, DL, ConstantVT);
5693 };
5694
5695 if (N1C) {
5696 const APInt &Val = N1C->getAPIntValue();
5697 return SignExtendInReg(Val, VT);
5698 }
5699 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
5700 SmallVector<SDValue, 8> Ops;
5701 llvm::EVT OpVT = N1.getOperand(0).getValueType();
5702 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5703 SDValue Op = N1.getOperand(i);
5704 if (Op.isUndef()) {
5705 Ops.push_back(getUNDEF(OpVT));
5706 continue;
5707 }
5708 ConstantSDNode *C = cast<ConstantSDNode>(Op);
5709 APInt Val = C->getAPIntValue();
5710 Ops.push_back(SignExtendInReg(Val, OpVT));
5711 }
5712 return getBuildVector(VT, DL, Ops);
5713 }
5714 break;
5715 }
5716 case ISD::FP_TO_SINT_SAT:
5717 case ISD::FP_TO_UINT_SAT: {
5718 assert(VT.isInteger() && cast<VTSDNode>(N2)->getVT().isInteger() &&
5719 N1.getValueType().isFloatingPoint() && "Invalid FP_TO_*INT_SAT");
5720 assert(N1.getValueType().isVector() == VT.isVector() &&
5721 "FP_TO_*INT_SAT type should be vector iff the operand type is "
5722 "vector!");
5723 assert((!VT.isVector() || VT.getVectorNumElements() ==
5724 N1.getValueType().getVectorNumElements()) &&
5725 "Vector element counts must match in FP_TO_*INT_SAT");
5726 assert(!cast<VTSDNode>(N2)->getVT().isVector() &&
5727 "Type to saturate to must be a scalar.");
5728 assert(cast<VTSDNode>(N2)->getVT().bitsLE(VT.getScalarType()) &&
5729 "Not extending!");
5730 break;
5731 }
5732 case ISD::EXTRACT_VECTOR_ELT:
5733 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
5734 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
5735 element type of the vector.");
5736
5737 // Extract from an undefined value or using an undefined index is undefined.
5738 if (N1.isUndef() || N2.isUndef())
5739 return getUNDEF(VT);
5740
5741 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF for fixed length
5742 // vectors. For scalable vectors we will provide appropriate support for
5743 // dealing with arbitrary indices.
5744 if (N2C && N1.getValueType().isFixedLengthVector() &&
5745 N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
5746 return getUNDEF(VT);
5747
5748 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5749 // expanding copies of large vectors from registers. This only works for
5750 // fixed length vectors, since we need to know the exact number of
5751 // elements.
5752 if (N2C && N1.getOperand(0).getValueType().isFixedLengthVector() &&
5753 N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0) {
5754 unsigned Factor =
5755 N1.getOperand(0).getValueType().getVectorNumElements();
5756 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
5757 N1.getOperand(N2C->getZExtValue() / Factor),
5758 getVectorIdxConstant(N2C->getZExtValue() % Factor, DL));
5759 }
5760
5761 // EXTRACT_VECTOR_ELT of BUILD_VECTOR or SPLAT_VECTOR is often formed while
5762 // lowering is expanding large vector constants.
5763 if (N2C && (N1.getOpcode() == ISD::BUILD_VECTOR ||
5764 N1.getOpcode() == ISD::SPLAT_VECTOR)) {
5765 assert((N1.getOpcode() != ISD::BUILD_VECTOR ||
5766 N1.getValueType().isFixedLengthVector()) &&
5767 "BUILD_VECTOR used for scalable vectors");
5768 unsigned Index =
5769 N1.getOpcode() == ISD::BUILD_VECTOR ? N2C->getZExtValue() : 0;
5770 SDValue Elt = N1.getOperand(Index);
5771
5772 if (VT != Elt.getValueType())
5773 // If the vector element type is not legal, the BUILD_VECTOR operands
5774 // are promoted and implicitly truncated, and the result implicitly
5775 // extended. Make that explicit here.
5776 Elt = getAnyExtOrTrunc(Elt, DL, VT);
5777
5778 return Elt;
5779 }
5780
5781 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5782 // operations are lowered to scalars.
5783 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
5784 // If the indices are the same, return the inserted element else
5785 // if the indices are known different, extract the element from
5786 // the original vector.
5787 SDValue N1Op2 = N1.getOperand(2);
5788 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
5789
5790 if (N1Op2C && N2C) {
5791 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
5792 if (VT == N1.getOperand(1).getValueType())
5793 return N1.getOperand(1);
5794 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
5795 }
5796 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
5797 }
5798 }
5799
5800 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5801 // when vector types are scalarized and v1iX is legal.
5802 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx).
5803 // Here we are completely ignoring the extract element index (N2),
5804 // which is fine for fixed width vectors, since any index other than 0
5805 // is undefined anyway. However, this cannot be ignored for scalable
5806 // vectors - in theory we could support this, but we don't want to do this
5807 // without a profitability check.
5808 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5809 N1.getValueType().isFixedLengthVector() &&
5810 N1.getValueType().getVectorNumElements() == 1) {
5811 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
5812 N1.getOperand(1));
5813 }
5814 break;
5815 case ISD::EXTRACT_ELEMENT:
5816 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
5817 assert(!N1.getValueType().isVector() && !VT.isVector() &&
5818 (N1.getValueType().isInteger() == VT.isInteger()) &&
5819 N1.getValueType() != VT &&
5820 "Wrong types for EXTRACT_ELEMENT!");
5821
5822 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5823 // 64-bit integers into 32-bit parts. Instead of building the extract of
5824 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5825 if (N1.getOpcode() == ISD::BUILD_PAIR)
5826 return N1.getOperand(N2C->getZExtValue());
5827
5828 // EXTRACT_ELEMENT of a constant int is also very common.
5829 if (N1C) {
5830 unsigned ElementSize = VT.getSizeInBits();
5831 unsigned Shift = ElementSize * N2C->getZExtValue();
5832 const APInt &Val = N1C->getAPIntValue();
5833 return getConstant(Val.extractBits(ElementSize, Shift), DL, VT);
5834 }
5835 break;
5836 case ISD::EXTRACT_SUBVECTOR:
5837 EVT N1VT = N1.getValueType();
5838 assert(VT.isVector() && N1VT.isVector() &&
5839 "Extract subvector VTs must be vectors!");
5840 assert(VT.getVectorElementType() == N1VT.getVectorElementType() &&
5841 "Extract subvector VTs must have the same element type!");
5842 assert((VT.isFixedLengthVector() || N1VT.isScalableVector()) &&
5843 "Cannot extract a scalable vector from a fixed length vector!");
5844 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
5845 VT.getVectorMinNumElements() <= N1VT.getVectorMinNumElements()) &&
5846 "Extract subvector must be from larger vector to smaller vector!");
5847 assert(N2C && "Extract subvector index must be a constant");
5848 assert((VT.isScalableVector() != N1VT.isScalableVector() ||
5849 (VT.getVectorMinNumElements() + N2C->getZExtValue()) <=
5850 N1VT.getVectorMinNumElements()) &&
5851 "Extract subvector overflow!");
5852 assert(N2C->getAPIntValue().getBitWidth() ==
5853 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&
5854 "Constant index for EXTRACT_SUBVECTOR has an invalid size");
5855
5856 // Trivial extraction.
5857 if (VT == N1VT)
5858 return N1;
5859
5860 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5861 if (N1.isUndef())
5862 return getUNDEF(VT);
5863
5864 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5865 // the concat have the same type as the extract.
5866 if (N1.getOpcode() == ISD::CONCAT_VECTORS && N1.getNumOperands() > 0 &&
5867 VT == N1.getOperand(0).getValueType()) {
5868 unsigned Factor = VT.getVectorMinNumElements();
5869 return N1.getOperand(N2C->getZExtValue() / Factor);
5870 }
5871
5872 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5873 // during shuffle legalization.
5874 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
5875 VT == N1.getOperand(1).getValueType())
5876 return N1.getOperand(1);
5877 break;
5878 }
5879
5880 // Perform trivial constant folding.
5881 if (SDValue SV = FoldConstantArithmetic(Opcode, DL, VT, {N1, N2}))
5882 return SV;
5883
5884 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2))
5885 return V;
5886
5887 // Canonicalize an UNDEF to the RHS, even over a constant.
5888 if (N1.isUndef()) {
5889 if (TLI->isCommutativeBinOp(Opcode)) {
5890 std::swap(N1, N2);
5891 } else {
5892 switch (Opcode) {
5893 case ISD::SIGN_EXTEND_INREG:
5894 case ISD::SUB:
5895 return getUNDEF(VT); // fold op(undef, arg2) -> undef
5896 case ISD::UDIV:
5897 case ISD::SDIV:
5898 case ISD::UREM:
5899 case ISD::SREM:
5900 case ISD::SSUBSAT:
5901 case ISD::USUBSAT:
5902 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
5903 }
5904 }
5905 }
5906
5907 // Fold a bunch of operators when the RHS is undef.
5908 if (N2.isUndef()) {
5909 switch (Opcode) {
5910 case ISD::XOR:
5911 if (N1.isUndef())
5912 // Handle undef ^ undef -> 0 special case. This is a common
5913 // idiom (misuse).
5914 return getConstant(0, DL, VT);
5915 LLVM_FALLTHROUGH;
5916 case ISD::ADD:
5917 case ISD::SUB:
5918 case ISD::UDIV:
5919 case ISD::SDIV:
5920 case ISD::UREM:
5921 case ISD::SREM:
5922 return getUNDEF(VT); // fold op(arg1, undef) -> undef
5923 case ISD::MUL:
5924 case ISD::AND:
5925 case ISD::SSUBSAT:
5926 case ISD::USUBSAT:
5927 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
5928 case ISD::OR:
5929 case ISD::SADDSAT:
5930 case ISD::UADDSAT:
5931 return getAllOnesConstant(DL, VT);
5932 }
5933 }
5934
5935 // Memoize this node if possible.
5936 SDNode *N;
5937 SDVTList VTs = getVTList(VT);
5938 SDValue Ops[] = {N1, N2};
5939 if (VT != MVT::Glue) {
5940 FoldingSetNodeID ID;
5941 AddNodeIDNode(ID, Opcode, VTs, Ops);
5942 void *IP = nullptr;
5943 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5944 E->intersectFlagsWith(Flags);
5945 return SDValue(E, 0);
5946 }
5947
5948 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5949 N->setFlags(Flags);
5950 createOperands(N, Ops);
5951 CSEMap.InsertNode(N, IP);
5952 } else {
5953 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5954 createOperands(N, Ops);
5955 }
5956
5957 InsertNode(N);
5958 SDValue V = SDValue(N, 0);
5959 NewSDValueDbgMsg(V, "Creating new node: ", this);
5960 return V;
5961 }
5962
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3)5963 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5964 SDValue N1, SDValue N2, SDValue N3) {
5965 SDNodeFlags Flags;
5966 if (Inserter)
5967 Flags = Inserter->getFlags();
5968 return getNode(Opcode, DL, VT, N1, N2, N3, Flags);
5969 }
5970
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,const SDNodeFlags Flags)5971 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5972 SDValue N1, SDValue N2, SDValue N3,
5973 const SDNodeFlags Flags) {
5974 assert(N1.getOpcode() != ISD::DELETED_NODE &&
5975 N2.getOpcode() != ISD::DELETED_NODE &&
5976 N3.getOpcode() != ISD::DELETED_NODE &&
5977 "Operand is DELETED_NODE!");
5978 // Perform various simplifications.
5979 switch (Opcode) {
5980 case ISD::FMA: {
5981 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5982 assert(N1.getValueType() == VT && N2.getValueType() == VT &&
5983 N3.getValueType() == VT && "FMA types must match!");
5984 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5985 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5986 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
5987 if (N1CFP && N2CFP && N3CFP) {
5988 APFloat V1 = N1CFP->getValueAPF();
5989 const APFloat &V2 = N2CFP->getValueAPF();
5990 const APFloat &V3 = N3CFP->getValueAPF();
5991 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
5992 return getConstantFP(V1, DL, VT);
5993 }
5994 break;
5995 }
5996 case ISD::BUILD_VECTOR: {
5997 // Attempt to simplify BUILD_VECTOR.
5998 SDValue Ops[] = {N1, N2, N3};
5999 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
6000 return V;
6001 break;
6002 }
6003 case ISD::CONCAT_VECTORS: {
6004 SDValue Ops[] = {N1, N2, N3};
6005 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
6006 return V;
6007 break;
6008 }
6009 case ISD::SETCC: {
6010 assert(VT.isInteger() && "SETCC result type must be an integer!");
6011 assert(N1.getValueType() == N2.getValueType() &&
6012 "SETCC operands must have the same type!");
6013 assert(VT.isVector() == N1.getValueType().isVector() &&
6014 "SETCC type should be vector iff the operand type is vector!");
6015 assert((!VT.isVector() || VT.getVectorElementCount() ==
6016 N1.getValueType().getVectorElementCount()) &&
6017 "SETCC vector element counts must match!");
6018 // Use FoldSetCC to simplify SETCC's.
6019 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
6020 return V;
6021 // Vector constant folding.
6022 SDValue Ops[] = {N1, N2, N3};
6023 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
6024 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
6025 return V;
6026 }
6027 break;
6028 }
6029 case ISD::SELECT:
6030 case ISD::VSELECT:
6031 if (SDValue V = simplifySelect(N1, N2, N3))
6032 return V;
6033 break;
6034 case ISD::VECTOR_SHUFFLE:
6035 llvm_unreachable("should use getVectorShuffle constructor!");
6036 case ISD::INSERT_VECTOR_ELT: {
6037 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
6038 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF, except
6039 // for scalable vectors where we will generate appropriate code to
6040 // deal with out-of-bounds cases correctly.
6041 if (N3C && N1.getValueType().isFixedLengthVector() &&
6042 N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
6043 return getUNDEF(VT);
6044
6045 // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
6046 if (N3.isUndef())
6047 return getUNDEF(VT);
6048
6049 // If the inserted element is an UNDEF, just use the input vector.
6050 if (N2.isUndef())
6051 return N1;
6052
6053 break;
6054 }
6055 case ISD::INSERT_SUBVECTOR: {
6056 // Inserting undef into undef is still undef.
6057 if (N1.isUndef() && N2.isUndef())
6058 return getUNDEF(VT);
6059
6060 EVT N2VT = N2.getValueType();
6061 assert(VT == N1.getValueType() &&
6062 "Dest and insert subvector source types must match!");
6063 assert(VT.isVector() && N2VT.isVector() &&
6064 "Insert subvector VTs must be vectors!");
6065 assert((VT.isScalableVector() || N2VT.isFixedLengthVector()) &&
6066 "Cannot insert a scalable vector into a fixed length vector!");
6067 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
6068 VT.getVectorMinNumElements() >= N2VT.getVectorMinNumElements()) &&
6069 "Insert subvector must be from smaller vector to larger vector!");
6070 assert(isa<ConstantSDNode>(N3) &&
6071 "Insert subvector index must be constant");
6072 assert((VT.isScalableVector() != N2VT.isScalableVector() ||
6073 (N2VT.getVectorMinNumElements() +
6074 cast<ConstantSDNode>(N3)->getZExtValue()) <=
6075 VT.getVectorMinNumElements()) &&
6076 "Insert subvector overflow!");
6077 assert(cast<ConstantSDNode>(N3)->getAPIntValue().getBitWidth() ==
6078 TLI->getVectorIdxTy(getDataLayout()).getFixedSizeInBits() &&
6079 "Constant index for INSERT_SUBVECTOR has an invalid size");
6080
6081 // Trivial insertion.
6082 if (VT == N2VT)
6083 return N2;
6084
6085 // If this is an insert of an extracted vector into an undef vector, we
6086 // can just use the input to the extract.
6087 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6088 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
6089 return N2.getOperand(0);
6090 break;
6091 }
6092 case ISD::BITCAST:
6093 // Fold bit_convert nodes from a type to themselves.
6094 if (N1.getValueType() == VT)
6095 return N1;
6096 break;
6097 }
6098
6099 // Memoize node if it doesn't produce a flag.
6100 SDNode *N;
6101 SDVTList VTs = getVTList(VT);
6102 SDValue Ops[] = {N1, N2, N3};
6103 if (VT != MVT::Glue) {
6104 FoldingSetNodeID ID;
6105 AddNodeIDNode(ID, Opcode, VTs, Ops);
6106 void *IP = nullptr;
6107 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
6108 E->intersectFlagsWith(Flags);
6109 return SDValue(E, 0);
6110 }
6111
6112 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6113 N->setFlags(Flags);
6114 createOperands(N, Ops);
6115 CSEMap.InsertNode(N, IP);
6116 } else {
6117 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
6118 createOperands(N, Ops);
6119 }
6120
6121 InsertNode(N);
6122 SDValue V = SDValue(N, 0);
6123 NewSDValueDbgMsg(V, "Creating new node: ", this);
6124 return V;
6125 }
6126
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)6127 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6128 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
6129 SDValue Ops[] = { N1, N2, N3, N4 };
6130 return getNode(Opcode, DL, VT, Ops);
6131 }
6132
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)6133 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
6134 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
6135 SDValue N5) {
6136 SDValue Ops[] = { N1, N2, N3, N4, N5 };
6137 return getNode(Opcode, DL, VT, Ops);
6138 }
6139
6140 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
6141 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)6142 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
6143 SmallVector<SDValue, 8> ArgChains;
6144
6145 // Include the original chain at the beginning of the list. When this is
6146 // used by target LowerCall hooks, this helps legalize find the
6147 // CALLSEQ_BEGIN node.
6148 ArgChains.push_back(Chain);
6149
6150 // Add a chain value for each stack argument.
6151 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
6152 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
6153 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
6154 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
6155 if (FI->getIndex() < 0)
6156 ArgChains.push_back(SDValue(L, 1));
6157
6158 // Build a tokenfactor for all the chains.
6159 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
6160 }
6161
6162 /// getMemsetValue - Vectorized representation of the memset value
6163 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,const SDLoc & dl)6164 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
6165 const SDLoc &dl) {
6166 assert(!Value.isUndef());
6167
6168 unsigned NumBits = VT.getScalarSizeInBits();
6169 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
6170 assert(C->getAPIntValue().getBitWidth() == 8);
6171 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
6172 if (VT.isInteger()) {
6173 bool IsOpaque = VT.getSizeInBits() > 64 ||
6174 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
6175 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
6176 }
6177 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
6178 VT);
6179 }
6180
6181 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
6182 EVT IntVT = VT.getScalarType();
6183 if (!IntVT.isInteger())
6184 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
6185
6186 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
6187 if (NumBits > 8) {
6188 // Use a multiplication with 0x010101... to extend the input to the
6189 // required length.
6190 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
6191 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
6192 DAG.getConstant(Magic, dl, IntVT));
6193 }
6194
6195 if (VT != Value.getValueType() && !VT.isInteger())
6196 Value = DAG.getBitcast(VT.getScalarType(), Value);
6197 if (VT != Value.getValueType())
6198 Value = DAG.getSplatBuildVector(VT, dl, Value);
6199
6200 return Value;
6201 }
6202
6203 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
6204 /// used when a memcpy is turned into a memset when the source is a constant
6205 /// string ptr.
getMemsetStringVal(EVT VT,const SDLoc & dl,SelectionDAG & DAG,const TargetLowering & TLI,const ConstantDataArraySlice & Slice)6206 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
6207 const TargetLowering &TLI,
6208 const ConstantDataArraySlice &Slice) {
6209 // Handle vector with all elements zero.
6210 if (Slice.Array == nullptr) {
6211 if (VT.isInteger())
6212 return DAG.getConstant(0, dl, VT);
6213 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
6214 return DAG.getConstantFP(0.0, dl, VT);
6215 if (VT.isVector()) {
6216 unsigned NumElts = VT.getVectorNumElements();
6217 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
6218 return DAG.getNode(ISD::BITCAST, dl, VT,
6219 DAG.getConstant(0, dl,
6220 EVT::getVectorVT(*DAG.getContext(),
6221 EltVT, NumElts)));
6222 }
6223 llvm_unreachable("Expected type!");
6224 }
6225
6226 assert(!VT.isVector() && "Can't handle vector type here!");
6227 unsigned NumVTBits = VT.getSizeInBits();
6228 unsigned NumVTBytes = NumVTBits / 8;
6229 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
6230
6231 APInt Val(NumVTBits, 0);
6232 if (DAG.getDataLayout().isLittleEndian()) {
6233 for (unsigned i = 0; i != NumBytes; ++i)
6234 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
6235 } else {
6236 for (unsigned i = 0; i != NumBytes; ++i)
6237 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
6238 }
6239
6240 // If the "cost" of materializing the integer immediate is less than the cost
6241 // of a load, then it is cost effective to turn the load into the immediate.
6242 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
6243 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
6244 return DAG.getConstant(Val, dl, VT);
6245 return SDValue(nullptr, 0);
6246 }
6247
getMemBasePlusOffset(SDValue Base,TypeSize Offset,const SDLoc & DL,const SDNodeFlags Flags)6248 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, TypeSize Offset,
6249 const SDLoc &DL,
6250 const SDNodeFlags Flags) {
6251 EVT VT = Base.getValueType();
6252 SDValue Index;
6253
6254 if (Offset.isScalable())
6255 Index = getVScale(DL, Base.getValueType(),
6256 APInt(Base.getValueSizeInBits().getFixedSize(),
6257 Offset.getKnownMinSize()));
6258 else
6259 Index = getConstant(Offset.getFixedSize(), DL, VT);
6260
6261 return getMemBasePlusOffset(Base, Index, DL, Flags);
6262 }
6263
getMemBasePlusOffset(SDValue Ptr,SDValue Offset,const SDLoc & DL,const SDNodeFlags Flags)6264 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset,
6265 const SDLoc &DL,
6266 const SDNodeFlags Flags) {
6267 assert(Offset.getValueType().isInteger());
6268 EVT BasePtrVT = Ptr.getValueType();
6269 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags);
6270 }
6271
6272 /// Returns true if memcpy source is constant data.
isMemSrcFromConstant(SDValue Src,ConstantDataArraySlice & Slice)6273 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
6274 uint64_t SrcDelta = 0;
6275 GlobalAddressSDNode *G = nullptr;
6276 if (Src.getOpcode() == ISD::GlobalAddress)
6277 G = cast<GlobalAddressSDNode>(Src);
6278 else if (Src.getOpcode() == ISD::ADD &&
6279 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
6280 Src.getOperand(1).getOpcode() == ISD::Constant) {
6281 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
6282 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
6283 }
6284 if (!G)
6285 return false;
6286
6287 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
6288 SrcDelta + G->getOffset());
6289 }
6290
shouldLowerMemFuncForSize(const MachineFunction & MF,SelectionDAG & DAG)6291 static bool shouldLowerMemFuncForSize(const MachineFunction &MF,
6292 SelectionDAG &DAG) {
6293 // On Darwin, -Os means optimize for size without hurting performance, so
6294 // only really optimize for size when -Oz (MinSize) is used.
6295 if (MF.getTarget().getTargetTriple().isOSDarwin())
6296 return MF.getFunction().hasMinSize();
6297 return DAG.shouldOptForSize();
6298 }
6299
chainLoadsAndStoresForMemcpy(SelectionDAG & DAG,const SDLoc & dl,SmallVector<SDValue,32> & OutChains,unsigned From,unsigned To,SmallVector<SDValue,16> & OutLoadChains,SmallVector<SDValue,16> & OutStoreChains)6300 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
6301 SmallVector<SDValue, 32> &OutChains, unsigned From,
6302 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
6303 SmallVector<SDValue, 16> &OutStoreChains) {
6304 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
6305 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
6306 SmallVector<SDValue, 16> GluedLoadChains;
6307 for (unsigned i = From; i < To; ++i) {
6308 OutChains.push_back(OutLoadChains[i]);
6309 GluedLoadChains.push_back(OutLoadChains[i]);
6310 }
6311
6312 // Chain for all loads.
6313 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
6314 GluedLoadChains);
6315
6316 for (unsigned i = From; i < To; ++i) {
6317 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
6318 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
6319 ST->getBasePtr(), ST->getMemoryVT(),
6320 ST->getMemOperand());
6321 OutChains.push_back(NewStore);
6322 }
6323 }
6324
getMemcpyLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6325 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6326 SDValue Chain, SDValue Dst, SDValue Src,
6327 uint64_t Size, Align Alignment,
6328 bool isVol, bool AlwaysInline,
6329 MachinePointerInfo DstPtrInfo,
6330 MachinePointerInfo SrcPtrInfo) {
6331 // Turn a memcpy of undef to nop.
6332 // FIXME: We need to honor volatile even is Src is undef.
6333 if (Src.isUndef())
6334 return Chain;
6335
6336 // Expand memcpy to a series of load and store ops if the size operand falls
6337 // below a certain threshold.
6338 // TODO: In the AlwaysInline case, if the size is big then generate a loop
6339 // rather than maybe a humongous number of loads and stores.
6340 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6341 const DataLayout &DL = DAG.getDataLayout();
6342 LLVMContext &C = *DAG.getContext();
6343 std::vector<EVT> MemOps;
6344 bool DstAlignCanChange = false;
6345 MachineFunction &MF = DAG.getMachineFunction();
6346 MachineFrameInfo &MFI = MF.getFrameInfo();
6347 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6348 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6349 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6350 DstAlignCanChange = true;
6351 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6352 if (!SrcAlign || Alignment > *SrcAlign)
6353 SrcAlign = Alignment;
6354 assert(SrcAlign && "SrcAlign must be set");
6355 ConstantDataArraySlice Slice;
6356 // If marked as volatile, perform a copy even when marked as constant.
6357 bool CopyFromConstant = !isVol && isMemSrcFromConstant(Src, Slice);
6358 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
6359 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
6360 const MemOp Op = isZeroConstant
6361 ? MemOp::Set(Size, DstAlignCanChange, Alignment,
6362 /*IsZeroMemset*/ true, isVol)
6363 : MemOp::Copy(Size, DstAlignCanChange, Alignment,
6364 *SrcAlign, isVol, CopyFromConstant);
6365 if (!TLI.findOptimalMemOpLowering(
6366 MemOps, Limit, Op, DstPtrInfo.getAddrSpace(),
6367 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
6368 return SDValue();
6369
6370 if (DstAlignCanChange) {
6371 Type *Ty = MemOps[0].getTypeForEVT(C);
6372 Align NewAlign = DL.getABITypeAlign(Ty);
6373
6374 // Don't promote to an alignment that would require dynamic stack
6375 // realignment.
6376 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
6377 if (!TRI->hasStackRealignment(MF))
6378 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
6379 NewAlign = NewAlign / 2;
6380
6381 if (NewAlign > Alignment) {
6382 // Give the stack frame object a larger alignment if needed.
6383 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6384 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6385 Alignment = NewAlign;
6386 }
6387 }
6388
6389 MachineMemOperand::Flags MMOFlags =
6390 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6391 SmallVector<SDValue, 16> OutLoadChains;
6392 SmallVector<SDValue, 16> OutStoreChains;
6393 SmallVector<SDValue, 32> OutChains;
6394 unsigned NumMemOps = MemOps.size();
6395 uint64_t SrcOff = 0, DstOff = 0;
6396 for (unsigned i = 0; i != NumMemOps; ++i) {
6397 EVT VT = MemOps[i];
6398 unsigned VTSize = VT.getSizeInBits() / 8;
6399 SDValue Value, Store;
6400
6401 if (VTSize > Size) {
6402 // Issuing an unaligned load / store pair that overlaps with the previous
6403 // pair. Adjust the offset accordingly.
6404 assert(i == NumMemOps-1 && i != 0);
6405 SrcOff -= VTSize - Size;
6406 DstOff -= VTSize - Size;
6407 }
6408
6409 if (CopyFromConstant &&
6410 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
6411 // It's unlikely a store of a vector immediate can be done in a single
6412 // instruction. It would require a load from a constantpool first.
6413 // We only handle zero vectors here.
6414 // FIXME: Handle other cases where store of vector immediate is done in
6415 // a single instruction.
6416 ConstantDataArraySlice SubSlice;
6417 if (SrcOff < Slice.Length) {
6418 SubSlice = Slice;
6419 SubSlice.move(SrcOff);
6420 } else {
6421 // This is an out-of-bounds access and hence UB. Pretend we read zero.
6422 SubSlice.Array = nullptr;
6423 SubSlice.Offset = 0;
6424 SubSlice.Length = VTSize;
6425 }
6426 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
6427 if (Value.getNode()) {
6428 Store = DAG.getStore(
6429 Chain, dl, Value,
6430 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6431 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
6432 OutChains.push_back(Store);
6433 }
6434 }
6435
6436 if (!Store.getNode()) {
6437 // The type might not be legal for the target. This should only happen
6438 // if the type is smaller than a legal type, as on PPC, so the right
6439 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
6440 // to Load/Store if NVT==VT.
6441 // FIXME does the case above also need this?
6442 EVT NVT = TLI.getTypeToTransformTo(C, VT);
6443 assert(NVT.bitsGE(VT));
6444
6445 bool isDereferenceable =
6446 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6447 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6448 if (isDereferenceable)
6449 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6450
6451 Value = DAG.getExtLoad(
6452 ISD::EXTLOAD, dl, NVT, Chain,
6453 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6454 SrcPtrInfo.getWithOffset(SrcOff), VT,
6455 commonAlignment(*SrcAlign, SrcOff), SrcMMOFlags);
6456 OutLoadChains.push_back(Value.getValue(1));
6457
6458 Store = DAG.getTruncStore(
6459 Chain, dl, Value,
6460 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6461 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
6462 OutStoreChains.push_back(Store);
6463 }
6464 SrcOff += VTSize;
6465 DstOff += VTSize;
6466 Size -= VTSize;
6467 }
6468
6469 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
6470 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
6471 unsigned NumLdStInMemcpy = OutStoreChains.size();
6472
6473 if (NumLdStInMemcpy) {
6474 // It may be that memcpy might be converted to memset if it's memcpy
6475 // of constants. In such a case, we won't have loads and stores, but
6476 // just stores. In the absence of loads, there is nothing to gang up.
6477 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
6478 // If target does not care, just leave as it.
6479 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
6480 OutChains.push_back(OutLoadChains[i]);
6481 OutChains.push_back(OutStoreChains[i]);
6482 }
6483 } else {
6484 // Ld/St less than/equal limit set by target.
6485 if (NumLdStInMemcpy <= GluedLdStLimit) {
6486 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6487 NumLdStInMemcpy, OutLoadChains,
6488 OutStoreChains);
6489 } else {
6490 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
6491 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
6492 unsigned GlueIter = 0;
6493
6494 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
6495 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
6496 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
6497
6498 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
6499 OutLoadChains, OutStoreChains);
6500 GlueIter += GluedLdStLimit;
6501 }
6502
6503 // Residual ld/st.
6504 if (RemainingLdStInMemcpy) {
6505 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6506 RemainingLdStInMemcpy, OutLoadChains,
6507 OutStoreChains);
6508 }
6509 }
6510 }
6511 }
6512 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6513 }
6514
getMemmoveLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6515 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6516 SDValue Chain, SDValue Dst, SDValue Src,
6517 uint64_t Size, Align Alignment,
6518 bool isVol, bool AlwaysInline,
6519 MachinePointerInfo DstPtrInfo,
6520 MachinePointerInfo SrcPtrInfo) {
6521 // Turn a memmove of undef to nop.
6522 // FIXME: We need to honor volatile even is Src is undef.
6523 if (Src.isUndef())
6524 return Chain;
6525
6526 // Expand memmove to a series of load and store ops if the size operand falls
6527 // below a certain threshold.
6528 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6529 const DataLayout &DL = DAG.getDataLayout();
6530 LLVMContext &C = *DAG.getContext();
6531 std::vector<EVT> MemOps;
6532 bool DstAlignCanChange = false;
6533 MachineFunction &MF = DAG.getMachineFunction();
6534 MachineFrameInfo &MFI = MF.getFrameInfo();
6535 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6536 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6537 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6538 DstAlignCanChange = true;
6539 MaybeAlign SrcAlign = DAG.InferPtrAlign(Src);
6540 if (!SrcAlign || Alignment > *SrcAlign)
6541 SrcAlign = Alignment;
6542 assert(SrcAlign && "SrcAlign must be set");
6543 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
6544 if (!TLI.findOptimalMemOpLowering(
6545 MemOps, Limit,
6546 MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign,
6547 /*IsVolatile*/ true),
6548 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
6549 MF.getFunction().getAttributes()))
6550 return SDValue();
6551
6552 if (DstAlignCanChange) {
6553 Type *Ty = MemOps[0].getTypeForEVT(C);
6554 Align NewAlign = DL.getABITypeAlign(Ty);
6555 if (NewAlign > Alignment) {
6556 // Give the stack frame object a larger alignment if needed.
6557 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6558 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6559 Alignment = NewAlign;
6560 }
6561 }
6562
6563 MachineMemOperand::Flags MMOFlags =
6564 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6565 uint64_t SrcOff = 0, DstOff = 0;
6566 SmallVector<SDValue, 8> LoadValues;
6567 SmallVector<SDValue, 8> LoadChains;
6568 SmallVector<SDValue, 8> OutChains;
6569 unsigned NumMemOps = MemOps.size();
6570 for (unsigned i = 0; i < NumMemOps; i++) {
6571 EVT VT = MemOps[i];
6572 unsigned VTSize = VT.getSizeInBits() / 8;
6573 SDValue Value;
6574
6575 bool isDereferenceable =
6576 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6577 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6578 if (isDereferenceable)
6579 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6580
6581 Value =
6582 DAG.getLoad(VT, dl, Chain,
6583 DAG.getMemBasePlusOffset(Src, TypeSize::Fixed(SrcOff), dl),
6584 SrcPtrInfo.getWithOffset(SrcOff), *SrcAlign, SrcMMOFlags);
6585 LoadValues.push_back(Value);
6586 LoadChains.push_back(Value.getValue(1));
6587 SrcOff += VTSize;
6588 }
6589 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
6590 OutChains.clear();
6591 for (unsigned i = 0; i < NumMemOps; i++) {
6592 EVT VT = MemOps[i];
6593 unsigned VTSize = VT.getSizeInBits() / 8;
6594 SDValue Store;
6595
6596 Store =
6597 DAG.getStore(Chain, dl, LoadValues[i],
6598 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6599 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
6600 OutChains.push_back(Store);
6601 DstOff += VTSize;
6602 }
6603
6604 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6605 }
6606
6607 /// Lower the call to 'memset' intrinsic function into a series of store
6608 /// operations.
6609 ///
6610 /// \param DAG Selection DAG where lowered code is placed.
6611 /// \param dl Link to corresponding IR location.
6612 /// \param Chain Control flow dependency.
6613 /// \param Dst Pointer to destination memory location.
6614 /// \param Src Value of byte to write into the memory.
6615 /// \param Size Number of bytes to write.
6616 /// \param Alignment Alignment of the destination in bytes.
6617 /// \param isVol True if destination is volatile.
6618 /// \param DstPtrInfo IR information on the memory pointer.
6619 /// \returns New head in the control flow, if lowering was successful, empty
6620 /// SDValue otherwise.
6621 ///
6622 /// The function tries to replace 'llvm.memset' intrinsic with several store
6623 /// operations and value calculation code. This is usually profitable for small
6624 /// memory size.
getMemsetStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,Align Alignment,bool isVol,MachinePointerInfo DstPtrInfo)6625 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
6626 SDValue Chain, SDValue Dst, SDValue Src,
6627 uint64_t Size, Align Alignment, bool isVol,
6628 MachinePointerInfo DstPtrInfo) {
6629 // Turn a memset of undef to nop.
6630 // FIXME: We need to honor volatile even is Src is undef.
6631 if (Src.isUndef())
6632 return Chain;
6633
6634 // Expand memset to a series of load/store ops if the size operand
6635 // falls below a certain threshold.
6636 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6637 std::vector<EVT> MemOps;
6638 bool DstAlignCanChange = false;
6639 MachineFunction &MF = DAG.getMachineFunction();
6640 MachineFrameInfo &MFI = MF.getFrameInfo();
6641 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6642 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6643 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6644 DstAlignCanChange = true;
6645 bool IsZeroVal =
6646 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
6647 if (!TLI.findOptimalMemOpLowering(
6648 MemOps, TLI.getMaxStoresPerMemset(OptSize),
6649 MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
6650 DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes()))
6651 return SDValue();
6652
6653 if (DstAlignCanChange) {
6654 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
6655 Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty);
6656 if (NewAlign > Alignment) {
6657 // Give the stack frame object a larger alignment if needed.
6658 if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
6659 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6660 Alignment = NewAlign;
6661 }
6662 }
6663
6664 SmallVector<SDValue, 8> OutChains;
6665 uint64_t DstOff = 0;
6666 unsigned NumMemOps = MemOps.size();
6667
6668 // Find the largest store and generate the bit pattern for it.
6669 EVT LargestVT = MemOps[0];
6670 for (unsigned i = 1; i < NumMemOps; i++)
6671 if (MemOps[i].bitsGT(LargestVT))
6672 LargestVT = MemOps[i];
6673 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
6674
6675 for (unsigned i = 0; i < NumMemOps; i++) {
6676 EVT VT = MemOps[i];
6677 unsigned VTSize = VT.getSizeInBits() / 8;
6678 if (VTSize > Size) {
6679 // Issuing an unaligned load / store pair that overlaps with the previous
6680 // pair. Adjust the offset accordingly.
6681 assert(i == NumMemOps-1 && i != 0);
6682 DstOff -= VTSize - Size;
6683 }
6684
6685 // If this store is smaller than the largest store see whether we can get
6686 // the smaller value for free with a truncate.
6687 SDValue Value = MemSetValue;
6688 if (VT.bitsLT(LargestVT)) {
6689 if (!LargestVT.isVector() && !VT.isVector() &&
6690 TLI.isTruncateFree(LargestVT, VT))
6691 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
6692 else
6693 Value = getMemsetValue(Src, VT, DAG, dl);
6694 }
6695 assert(Value.getValueType() == VT && "Value with wrong type.");
6696 SDValue Store = DAG.getStore(
6697 Chain, dl, Value,
6698 DAG.getMemBasePlusOffset(Dst, TypeSize::Fixed(DstOff), dl),
6699 DstPtrInfo.getWithOffset(DstOff), Alignment,
6700 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
6701 OutChains.push_back(Store);
6702 DstOff += VT.getSizeInBits() / 8;
6703 Size -= VTSize;
6704 }
6705
6706 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6707 }
6708
checkAddrSpaceIsValidForLibcall(const TargetLowering * TLI,unsigned AS)6709 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
6710 unsigned AS) {
6711 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6712 // pointer operands can be losslessly bitcasted to pointers of address space 0
6713 if (AS != 0 && !TLI->getTargetMachine().isNoopAddrSpaceCast(AS, 0)) {
6714 report_fatal_error("cannot lower memory intrinsic in address space " +
6715 Twine(AS));
6716 }
6717 }
6718
getMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool AlwaysInline,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6719 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
6720 SDValue Src, SDValue Size, Align Alignment,
6721 bool isVol, bool AlwaysInline, bool isTailCall,
6722 MachinePointerInfo DstPtrInfo,
6723 MachinePointerInfo SrcPtrInfo) {
6724 // Check to see if we should lower the memcpy to loads and stores first.
6725 // For cases within the target-specified limits, this is the best choice.
6726 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6727 if (ConstantSize) {
6728 // Memcpy with size zero? Just return the original chain.
6729 if (ConstantSize->isNullValue())
6730 return Chain;
6731
6732 SDValue Result = getMemcpyLoadsAndStores(
6733 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
6734 isVol, false, DstPtrInfo, SrcPtrInfo);
6735 if (Result.getNode())
6736 return Result;
6737 }
6738
6739 // Then check to see if we should lower the memcpy with target-specific
6740 // code. If the target chooses to do this, this is the next best.
6741 if (TSI) {
6742 SDValue Result = TSI->EmitTargetCodeForMemcpy(
6743 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline,
6744 DstPtrInfo, SrcPtrInfo);
6745 if (Result.getNode())
6746 return Result;
6747 }
6748
6749 // If we really need inline code and the target declined to provide it,
6750 // use a (potentially long) sequence of loads and stores.
6751 if (AlwaysInline) {
6752 assert(ConstantSize && "AlwaysInline requires a constant size!");
6753 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6754 ConstantSize->getZExtValue(), Alignment,
6755 isVol, true, DstPtrInfo, SrcPtrInfo);
6756 }
6757
6758 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6759 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6760
6761 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
6762 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
6763 // respect volatile, so they may do things like read or write memory
6764 // beyond the given memory regions. But fixing this isn't easy, and most
6765 // people don't care.
6766
6767 // Emit a library call.
6768 TargetLowering::ArgListTy Args;
6769 TargetLowering::ArgListEntry Entry;
6770 Entry.Ty = Type::getInt8PtrTy(*getContext());
6771 Entry.Node = Dst; Args.push_back(Entry);
6772 Entry.Node = Src; Args.push_back(Entry);
6773
6774 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6775 Entry.Node = Size; Args.push_back(Entry);
6776 // FIXME: pass in SDLoc
6777 TargetLowering::CallLoweringInfo CLI(*this);
6778 CLI.setDebugLoc(dl)
6779 .setChain(Chain)
6780 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
6781 Dst.getValueType().getTypeForEVT(*getContext()),
6782 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
6783 TLI->getPointerTy(getDataLayout())),
6784 std::move(Args))
6785 .setDiscardResult()
6786 .setTailCall(isTailCall);
6787
6788 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6789 return CallResult.second;
6790 }
6791
getAtomicMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6792 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
6793 SDValue Dst, unsigned DstAlign,
6794 SDValue Src, unsigned SrcAlign,
6795 SDValue Size, Type *SizeTy,
6796 unsigned ElemSz, bool isTailCall,
6797 MachinePointerInfo DstPtrInfo,
6798 MachinePointerInfo SrcPtrInfo) {
6799 // Emit a library call.
6800 TargetLowering::ArgListTy Args;
6801 TargetLowering::ArgListEntry Entry;
6802 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6803 Entry.Node = Dst;
6804 Args.push_back(Entry);
6805
6806 Entry.Node = Src;
6807 Args.push_back(Entry);
6808
6809 Entry.Ty = SizeTy;
6810 Entry.Node = Size;
6811 Args.push_back(Entry);
6812
6813 RTLIB::Libcall LibraryCall =
6814 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6815 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6816 report_fatal_error("Unsupported element size");
6817
6818 TargetLowering::CallLoweringInfo CLI(*this);
6819 CLI.setDebugLoc(dl)
6820 .setChain(Chain)
6821 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6822 Type::getVoidTy(*getContext()),
6823 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6824 TLI->getPointerTy(getDataLayout())),
6825 std::move(Args))
6826 .setDiscardResult()
6827 .setTailCall(isTailCall);
6828
6829 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6830 return CallResult.second;
6831 }
6832
getMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6833 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
6834 SDValue Src, SDValue Size, Align Alignment,
6835 bool isVol, bool isTailCall,
6836 MachinePointerInfo DstPtrInfo,
6837 MachinePointerInfo SrcPtrInfo) {
6838 // Check to see if we should lower the memmove to loads and stores first.
6839 // For cases within the target-specified limits, this is the best choice.
6840 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6841 if (ConstantSize) {
6842 // Memmove with size zero? Just return the original chain.
6843 if (ConstantSize->isNullValue())
6844 return Chain;
6845
6846 SDValue Result = getMemmoveLoadsAndStores(
6847 *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), Alignment,
6848 isVol, false, DstPtrInfo, SrcPtrInfo);
6849 if (Result.getNode())
6850 return Result;
6851 }
6852
6853 // Then check to see if we should lower the memmove with target-specific
6854 // code. If the target chooses to do this, this is the next best.
6855 if (TSI) {
6856 SDValue Result =
6857 TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size,
6858 Alignment, isVol, DstPtrInfo, SrcPtrInfo);
6859 if (Result.getNode())
6860 return Result;
6861 }
6862
6863 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6864 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6865
6866 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
6867 // not be safe. See memcpy above for more details.
6868
6869 // Emit a library call.
6870 TargetLowering::ArgListTy Args;
6871 TargetLowering::ArgListEntry Entry;
6872 Entry.Ty = Type::getInt8PtrTy(*getContext());
6873 Entry.Node = Dst; Args.push_back(Entry);
6874 Entry.Node = Src; Args.push_back(Entry);
6875
6876 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6877 Entry.Node = Size; Args.push_back(Entry);
6878 // FIXME: pass in SDLoc
6879 TargetLowering::CallLoweringInfo CLI(*this);
6880 CLI.setDebugLoc(dl)
6881 .setChain(Chain)
6882 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
6883 Dst.getValueType().getTypeForEVT(*getContext()),
6884 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
6885 TLI->getPointerTy(getDataLayout())),
6886 std::move(Args))
6887 .setDiscardResult()
6888 .setTailCall(isTailCall);
6889
6890 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6891 return CallResult.second;
6892 }
6893
getAtomicMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6894 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
6895 SDValue Dst, unsigned DstAlign,
6896 SDValue Src, unsigned SrcAlign,
6897 SDValue Size, Type *SizeTy,
6898 unsigned ElemSz, bool isTailCall,
6899 MachinePointerInfo DstPtrInfo,
6900 MachinePointerInfo SrcPtrInfo) {
6901 // Emit a library call.
6902 TargetLowering::ArgListTy Args;
6903 TargetLowering::ArgListEntry Entry;
6904 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6905 Entry.Node = Dst;
6906 Args.push_back(Entry);
6907
6908 Entry.Node = Src;
6909 Args.push_back(Entry);
6910
6911 Entry.Ty = SizeTy;
6912 Entry.Node = Size;
6913 Args.push_back(Entry);
6914
6915 RTLIB::Libcall LibraryCall =
6916 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6917 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6918 report_fatal_error("Unsupported element size");
6919
6920 TargetLowering::CallLoweringInfo CLI(*this);
6921 CLI.setDebugLoc(dl)
6922 .setChain(Chain)
6923 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6924 Type::getVoidTy(*getContext()),
6925 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6926 TLI->getPointerTy(getDataLayout())),
6927 std::move(Args))
6928 .setDiscardResult()
6929 .setTailCall(isTailCall);
6930
6931 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6932 return CallResult.second;
6933 }
6934
getMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,Align Alignment,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo)6935 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
6936 SDValue Src, SDValue Size, Align Alignment,
6937 bool isVol, bool isTailCall,
6938 MachinePointerInfo DstPtrInfo) {
6939 // Check to see if we should lower the memset to stores first.
6940 // For cases within the target-specified limits, this is the best choice.
6941 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6942 if (ConstantSize) {
6943 // Memset with size zero? Just return the original chain.
6944 if (ConstantSize->isNullValue())
6945 return Chain;
6946
6947 SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
6948 ConstantSize->getZExtValue(), Alignment,
6949 isVol, DstPtrInfo);
6950
6951 if (Result.getNode())
6952 return Result;
6953 }
6954
6955 // Then check to see if we should lower the memset with target-specific
6956 // code. If the target chooses to do this, this is the next best.
6957 if (TSI) {
6958 SDValue Result = TSI->EmitTargetCodeForMemset(
6959 *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo);
6960 if (Result.getNode())
6961 return Result;
6962 }
6963
6964 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6965
6966 // Emit a library call.
6967 TargetLowering::ArgListTy Args;
6968 TargetLowering::ArgListEntry Entry;
6969 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext());
6970 Args.push_back(Entry);
6971 Entry.Node = Src;
6972 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
6973 Args.push_back(Entry);
6974 Entry.Node = Size;
6975 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6976 Args.push_back(Entry);
6977
6978 // FIXME: pass in SDLoc
6979 TargetLowering::CallLoweringInfo CLI(*this);
6980 CLI.setDebugLoc(dl)
6981 .setChain(Chain)
6982 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
6983 Dst.getValueType().getTypeForEVT(*getContext()),
6984 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
6985 TLI->getPointerTy(getDataLayout())),
6986 std::move(Args))
6987 .setDiscardResult()
6988 .setTailCall(isTailCall);
6989
6990 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6991 return CallResult.second;
6992 }
6993
getAtomicMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Value,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo)6994 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
6995 SDValue Dst, unsigned DstAlign,
6996 SDValue Value, SDValue Size, Type *SizeTy,
6997 unsigned ElemSz, bool isTailCall,
6998 MachinePointerInfo DstPtrInfo) {
6999 // Emit a library call.
7000 TargetLowering::ArgListTy Args;
7001 TargetLowering::ArgListEntry Entry;
7002 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
7003 Entry.Node = Dst;
7004 Args.push_back(Entry);
7005
7006 Entry.Ty = Type::getInt8Ty(*getContext());
7007 Entry.Node = Value;
7008 Args.push_back(Entry);
7009
7010 Entry.Ty = SizeTy;
7011 Entry.Node = Size;
7012 Args.push_back(Entry);
7013
7014 RTLIB::Libcall LibraryCall =
7015 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
7016 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
7017 report_fatal_error("Unsupported element size");
7018
7019 TargetLowering::CallLoweringInfo CLI(*this);
7020 CLI.setDebugLoc(dl)
7021 .setChain(Chain)
7022 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
7023 Type::getVoidTy(*getContext()),
7024 getExternalSymbol(TLI->getLibcallName(LibraryCall),
7025 TLI->getPointerTy(getDataLayout())),
7026 std::move(Args))
7027 .setDiscardResult()
7028 .setTailCall(isTailCall);
7029
7030 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
7031 return CallResult.second;
7032 }
7033
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)7034 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
7035 SDVTList VTList, ArrayRef<SDValue> Ops,
7036 MachineMemOperand *MMO) {
7037 FoldingSetNodeID ID;
7038 ID.AddInteger(MemVT.getRawBits());
7039 AddNodeIDNode(ID, Opcode, VTList, Ops);
7040 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7041 void* IP = nullptr;
7042 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7043 cast<AtomicSDNode>(E)->refineAlignment(MMO);
7044 return SDValue(E, 0);
7045 }
7046
7047 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
7048 VTList, MemVT, MMO);
7049 createOperands(N, Ops);
7050
7051 CSEMap.InsertNode(N, IP);
7052 InsertNode(N);
7053 return SDValue(N, 0);
7054 }
7055
getAtomicCmpSwap(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO)7056 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
7057 EVT MemVT, SDVTList VTs, SDValue Chain,
7058 SDValue Ptr, SDValue Cmp, SDValue Swp,
7059 MachineMemOperand *MMO) {
7060 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
7061 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
7062 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
7063
7064 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
7065 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
7066 }
7067
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO)7068 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
7069 SDValue Chain, SDValue Ptr, SDValue Val,
7070 MachineMemOperand *MMO) {
7071 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
7072 Opcode == ISD::ATOMIC_LOAD_SUB ||
7073 Opcode == ISD::ATOMIC_LOAD_AND ||
7074 Opcode == ISD::ATOMIC_LOAD_CLR ||
7075 Opcode == ISD::ATOMIC_LOAD_OR ||
7076 Opcode == ISD::ATOMIC_LOAD_XOR ||
7077 Opcode == ISD::ATOMIC_LOAD_NAND ||
7078 Opcode == ISD::ATOMIC_LOAD_MIN ||
7079 Opcode == ISD::ATOMIC_LOAD_MAX ||
7080 Opcode == ISD::ATOMIC_LOAD_UMIN ||
7081 Opcode == ISD::ATOMIC_LOAD_UMAX ||
7082 Opcode == ISD::ATOMIC_LOAD_FADD ||
7083 Opcode == ISD::ATOMIC_LOAD_FSUB ||
7084 Opcode == ISD::ATOMIC_SWAP ||
7085 Opcode == ISD::ATOMIC_STORE) &&
7086 "Invalid Atomic Op");
7087
7088 EVT VT = Val.getValueType();
7089
7090 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
7091 getVTList(VT, MVT::Other);
7092 SDValue Ops[] = {Chain, Ptr, Val};
7093 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
7094 }
7095
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)7096 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
7097 EVT VT, SDValue Chain, SDValue Ptr,
7098 MachineMemOperand *MMO) {
7099 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
7100
7101 SDVTList VTs = getVTList(VT, MVT::Other);
7102 SDValue Ops[] = {Chain, Ptr};
7103 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
7104 }
7105
7106 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,const SDLoc & dl)7107 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
7108 if (Ops.size() == 1)
7109 return Ops[0];
7110
7111 SmallVector<EVT, 4> VTs;
7112 VTs.reserve(Ops.size());
7113 for (const SDValue &Op : Ops)
7114 VTs.push_back(Op.getValueType());
7115 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
7116 }
7117
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags Flags,uint64_t Size,const AAMDNodes & AAInfo)7118 SDValue SelectionDAG::getMemIntrinsicNode(
7119 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
7120 EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
7121 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
7122 if (!Size && MemVT.isScalableVector())
7123 Size = MemoryLocation::UnknownSize;
7124 else if (!Size)
7125 Size = MemVT.getStoreSize();
7126
7127 MachineFunction &MF = getMachineFunction();
7128 MachineMemOperand *MMO =
7129 MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo);
7130
7131 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
7132 }
7133
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)7134 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
7135 SDVTList VTList,
7136 ArrayRef<SDValue> Ops, EVT MemVT,
7137 MachineMemOperand *MMO) {
7138 assert((Opcode == ISD::INTRINSIC_VOID ||
7139 Opcode == ISD::INTRINSIC_W_CHAIN ||
7140 Opcode == ISD::PREFETCH ||
7141 ((int)Opcode <= std::numeric_limits<int>::max() &&
7142 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
7143 "Opcode is not a memory-accessing opcode!");
7144
7145 // Memoize the node unless it returns a flag.
7146 MemIntrinsicSDNode *N;
7147 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
7148 FoldingSetNodeID ID;
7149 AddNodeIDNode(ID, Opcode, VTList, Ops);
7150 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
7151 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
7152 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7153 void *IP = nullptr;
7154 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7155 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
7156 return SDValue(E, 0);
7157 }
7158
7159 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
7160 VTList, MemVT, MMO);
7161 createOperands(N, Ops);
7162
7163 CSEMap.InsertNode(N, IP);
7164 } else {
7165 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
7166 VTList, MemVT, MMO);
7167 createOperands(N, Ops);
7168 }
7169 InsertNode(N);
7170 SDValue V(N, 0);
7171 NewSDValueDbgMsg(V, "Creating new node: ", this);
7172 return V;
7173 }
7174
getLifetimeNode(bool IsStart,const SDLoc & dl,SDValue Chain,int FrameIndex,int64_t Size,int64_t Offset)7175 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
7176 SDValue Chain, int FrameIndex,
7177 int64_t Size, int64_t Offset) {
7178 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
7179 const auto VTs = getVTList(MVT::Other);
7180 SDValue Ops[2] = {
7181 Chain,
7182 getFrameIndex(FrameIndex,
7183 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
7184 true)};
7185
7186 FoldingSetNodeID ID;
7187 AddNodeIDNode(ID, Opcode, VTs, Ops);
7188 ID.AddInteger(FrameIndex);
7189 ID.AddInteger(Size);
7190 ID.AddInteger(Offset);
7191 void *IP = nullptr;
7192 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7193 return SDValue(E, 0);
7194
7195 LifetimeSDNode *N = newSDNode<LifetimeSDNode>(
7196 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset);
7197 createOperands(N, Ops);
7198 CSEMap.InsertNode(N, IP);
7199 InsertNode(N);
7200 SDValue V(N, 0);
7201 NewSDValueDbgMsg(V, "Creating new node: ", this);
7202 return V;
7203 }
7204
getPseudoProbeNode(const SDLoc & Dl,SDValue Chain,uint64_t Guid,uint64_t Index,uint32_t Attr)7205 SDValue SelectionDAG::getPseudoProbeNode(const SDLoc &Dl, SDValue Chain,
7206 uint64_t Guid, uint64_t Index,
7207 uint32_t Attr) {
7208 const unsigned Opcode = ISD::PSEUDO_PROBE;
7209 const auto VTs = getVTList(MVT::Other);
7210 SDValue Ops[] = {Chain};
7211 FoldingSetNodeID ID;
7212 AddNodeIDNode(ID, Opcode, VTs, Ops);
7213 ID.AddInteger(Guid);
7214 ID.AddInteger(Index);
7215 void *IP = nullptr;
7216 if (SDNode *E = FindNodeOrInsertPos(ID, Dl, IP))
7217 return SDValue(E, 0);
7218
7219 auto *N = newSDNode<PseudoProbeSDNode>(
7220 Opcode, Dl.getIROrder(), Dl.getDebugLoc(), VTs, Guid, Index, Attr);
7221 createOperands(N, Ops);
7222 CSEMap.InsertNode(N, IP);
7223 InsertNode(N);
7224 SDValue V(N, 0);
7225 NewSDValueDbgMsg(V, "Creating new node: ", this);
7226 return V;
7227 }
7228
7229 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
7230 /// MachinePointerInfo record from it. This is particularly useful because the
7231 /// code generator has many cases where it doesn't bother passing in a
7232 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,int64_t Offset=0)7233 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
7234 SelectionDAG &DAG, SDValue Ptr,
7235 int64_t Offset = 0) {
7236 // If this is FI+Offset, we can model it.
7237 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
7238 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
7239 FI->getIndex(), Offset);
7240
7241 // If this is (FI+Offset1)+Offset2, we can model it.
7242 if (Ptr.getOpcode() != ISD::ADD ||
7243 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
7244 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
7245 return Info;
7246
7247 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7248 return MachinePointerInfo::getFixedStack(
7249 DAG.getMachineFunction(), FI,
7250 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
7251 }
7252
7253 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
7254 /// MachinePointerInfo record from it. This is particularly useful because the
7255 /// code generator has many cases where it doesn't bother passing in a
7256 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,SDValue OffsetOp)7257 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
7258 SelectionDAG &DAG, SDValue Ptr,
7259 SDValue OffsetOp) {
7260 // If the 'Offset' value isn't a constant, we can't handle this.
7261 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
7262 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
7263 if (OffsetOp.isUndef())
7264 return InferPointerInfo(Info, DAG, Ptr);
7265 return Info;
7266 }
7267
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)7268 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
7269 EVT VT, const SDLoc &dl, SDValue Chain,
7270 SDValue Ptr, SDValue Offset,
7271 MachinePointerInfo PtrInfo, EVT MemVT,
7272 Align Alignment,
7273 MachineMemOperand::Flags MMOFlags,
7274 const AAMDNodes &AAInfo, const MDNode *Ranges) {
7275 assert(Chain.getValueType() == MVT::Other &&
7276 "Invalid chain type");
7277
7278 MMOFlags |= MachineMemOperand::MOLoad;
7279 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
7280 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
7281 // clients.
7282 if (PtrInfo.V.isNull())
7283 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
7284
7285 uint64_t Size = MemoryLocation::getSizeOrUnknown(MemVT.getStoreSize());
7286 MachineFunction &MF = getMachineFunction();
7287 MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MMOFlags, Size,
7288 Alignment, AAInfo, Ranges);
7289 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
7290 }
7291
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)7292 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
7293 EVT VT, const SDLoc &dl, SDValue Chain,
7294 SDValue Ptr, SDValue Offset, EVT MemVT,
7295 MachineMemOperand *MMO) {
7296 if (VT == MemVT) {
7297 ExtType = ISD::NON_EXTLOAD;
7298 } else if (ExtType == ISD::NON_EXTLOAD) {
7299 assert(VT == MemVT && "Non-extending load from different memory type!");
7300 } else {
7301 // Extending load.
7302 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
7303 "Should only be an extending load, not truncating!");
7304 assert(VT.isInteger() == MemVT.isInteger() &&
7305 "Cannot convert from FP to Int or Int -> FP!");
7306 assert(VT.isVector() == MemVT.isVector() &&
7307 "Cannot use an ext load to convert to or from a vector!");
7308 assert((!VT.isVector() ||
7309 VT.getVectorElementCount() == MemVT.getVectorElementCount()) &&
7310 "Cannot use an ext load to change the number of vector elements!");
7311 }
7312
7313 bool Indexed = AM != ISD::UNINDEXED;
7314 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
7315
7316 SDVTList VTs = Indexed ?
7317 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
7318 SDValue Ops[] = { Chain, Ptr, Offset };
7319 FoldingSetNodeID ID;
7320 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
7321 ID.AddInteger(MemVT.getRawBits());
7322 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
7323 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
7324 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7325 void *IP = nullptr;
7326 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7327 cast<LoadSDNode>(E)->refineAlignment(MMO);
7328 return SDValue(E, 0);
7329 }
7330 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7331 ExtType, MemVT, MMO);
7332 createOperands(N, Ops);
7333
7334 CSEMap.InsertNode(N, IP);
7335 InsertNode(N);
7336 SDValue V(N, 0);
7337 NewSDValueDbgMsg(V, "Creating new node: ", this);
7338 return V;
7339 }
7340
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)7341 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7342 SDValue Ptr, MachinePointerInfo PtrInfo,
7343 MaybeAlign Alignment,
7344 MachineMemOperand::Flags MMOFlags,
7345 const AAMDNodes &AAInfo, const MDNode *Ranges) {
7346 SDValue Undef = getUNDEF(Ptr.getValueType());
7347 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7348 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
7349 }
7350
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)7351 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7352 SDValue Ptr, MachineMemOperand *MMO) {
7353 SDValue Undef = getUNDEF(Ptr.getValueType());
7354 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
7355 VT, MMO);
7356 }
7357
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,MaybeAlign Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)7358 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7359 EVT VT, SDValue Chain, SDValue Ptr,
7360 MachinePointerInfo PtrInfo, EVT MemVT,
7361 MaybeAlign Alignment,
7362 MachineMemOperand::Flags MMOFlags,
7363 const AAMDNodes &AAInfo) {
7364 SDValue Undef = getUNDEF(Ptr.getValueType());
7365 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
7366 MemVT, Alignment, MMOFlags, AAInfo);
7367 }
7368
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)7369 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
7370 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
7371 MachineMemOperand *MMO) {
7372 SDValue Undef = getUNDEF(Ptr.getValueType());
7373 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
7374 MemVT, MMO);
7375 }
7376
getIndexedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7377 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
7378 SDValue Base, SDValue Offset,
7379 ISD::MemIndexedMode AM) {
7380 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
7381 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
7382 // Don't propagate the invariant or dereferenceable flags.
7383 auto MMOFlags =
7384 LD->getMemOperand()->getFlags() &
7385 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
7386 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
7387 LD->getChain(), Base, Offset, LD->getPointerInfo(),
7388 LD->getMemoryVT(), LD->getAlign(), MMOFlags, LD->getAAInfo());
7389 }
7390
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)7391 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7392 SDValue Ptr, MachinePointerInfo PtrInfo,
7393 Align Alignment,
7394 MachineMemOperand::Flags MMOFlags,
7395 const AAMDNodes &AAInfo) {
7396 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
7397
7398 MMOFlags |= MachineMemOperand::MOStore;
7399 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7400
7401 if (PtrInfo.V.isNull())
7402 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7403
7404 MachineFunction &MF = getMachineFunction();
7405 uint64_t Size =
7406 MemoryLocation::getSizeOrUnknown(Val.getValueType().getStoreSize());
7407 MachineMemOperand *MMO =
7408 MF.getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
7409 return getStore(Chain, dl, Val, Ptr, MMO);
7410 }
7411
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)7412 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7413 SDValue Ptr, MachineMemOperand *MMO) {
7414 assert(Chain.getValueType() == MVT::Other &&
7415 "Invalid chain type");
7416 EVT VT = Val.getValueType();
7417 SDVTList VTs = getVTList(MVT::Other);
7418 SDValue Undef = getUNDEF(Ptr.getValueType());
7419 SDValue Ops[] = { Chain, Val, Ptr, Undef };
7420 FoldingSetNodeID ID;
7421 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7422 ID.AddInteger(VT.getRawBits());
7423 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7424 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
7425 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7426 void *IP = nullptr;
7427 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7428 cast<StoreSDNode>(E)->refineAlignment(MMO);
7429 return SDValue(E, 0);
7430 }
7431 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7432 ISD::UNINDEXED, false, VT, MMO);
7433 createOperands(N, Ops);
7434
7435 CSEMap.InsertNode(N, IP);
7436 InsertNode(N);
7437 SDValue V(N, 0);
7438 NewSDValueDbgMsg(V, "Creating new node: ", this);
7439 return V;
7440 }
7441
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)7442 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7443 SDValue Ptr, MachinePointerInfo PtrInfo,
7444 EVT SVT, Align Alignment,
7445 MachineMemOperand::Flags MMOFlags,
7446 const AAMDNodes &AAInfo) {
7447 assert(Chain.getValueType() == MVT::Other &&
7448 "Invalid chain type");
7449
7450 MMOFlags |= MachineMemOperand::MOStore;
7451 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
7452
7453 if (PtrInfo.V.isNull())
7454 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
7455
7456 MachineFunction &MF = getMachineFunction();
7457 MachineMemOperand *MMO = MF.getMachineMemOperand(
7458 PtrInfo, MMOFlags, MemoryLocation::getSizeOrUnknown(SVT.getStoreSize()),
7459 Alignment, AAInfo);
7460 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
7461 }
7462
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)7463 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
7464 SDValue Ptr, EVT SVT,
7465 MachineMemOperand *MMO) {
7466 EVT VT = Val.getValueType();
7467
7468 assert(Chain.getValueType() == MVT::Other &&
7469 "Invalid chain type");
7470 if (VT == SVT)
7471 return getStore(Chain, dl, Val, Ptr, MMO);
7472
7473 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
7474 "Should only be a truncating store, not extending!");
7475 assert(VT.isInteger() == SVT.isInteger() &&
7476 "Can't do FP-INT conversion!");
7477 assert(VT.isVector() == SVT.isVector() &&
7478 "Cannot use trunc store to convert to or from a vector!");
7479 assert((!VT.isVector() ||
7480 VT.getVectorElementCount() == SVT.getVectorElementCount()) &&
7481 "Cannot use trunc store to change the number of vector elements!");
7482
7483 SDVTList VTs = getVTList(MVT::Other);
7484 SDValue Undef = getUNDEF(Ptr.getValueType());
7485 SDValue Ops[] = { Chain, Val, Ptr, Undef };
7486 FoldingSetNodeID ID;
7487 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7488 ID.AddInteger(SVT.getRawBits());
7489 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7490 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
7491 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7492 void *IP = nullptr;
7493 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7494 cast<StoreSDNode>(E)->refineAlignment(MMO);
7495 return SDValue(E, 0);
7496 }
7497 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7498 ISD::UNINDEXED, true, SVT, MMO);
7499 createOperands(N, Ops);
7500
7501 CSEMap.InsertNode(N, IP);
7502 InsertNode(N);
7503 SDValue V(N, 0);
7504 NewSDValueDbgMsg(V, "Creating new node: ", this);
7505 return V;
7506 }
7507
getIndexedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7508 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
7509 SDValue Base, SDValue Offset,
7510 ISD::MemIndexedMode AM) {
7511 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
7512 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
7513 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
7514 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
7515 FoldingSetNodeID ID;
7516 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7517 ID.AddInteger(ST->getMemoryVT().getRawBits());
7518 ID.AddInteger(ST->getRawSubclassData());
7519 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
7520 void *IP = nullptr;
7521 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7522 return SDValue(E, 0);
7523
7524 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7525 ST->isTruncatingStore(), ST->getMemoryVT(),
7526 ST->getMemOperand());
7527 createOperands(N, Ops);
7528
7529 CSEMap.InsertNode(N, IP);
7530 InsertNode(N);
7531 SDValue V(N, 0);
7532 NewSDValueDbgMsg(V, "Creating new node: ", this);
7533 return V;
7534 }
7535
getMaskedLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Base,SDValue Offset,SDValue Mask,SDValue PassThru,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,ISD::LoadExtType ExtTy,bool isExpanding)7536 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7537 SDValue Base, SDValue Offset, SDValue Mask,
7538 SDValue PassThru, EVT MemVT,
7539 MachineMemOperand *MMO,
7540 ISD::MemIndexedMode AM,
7541 ISD::LoadExtType ExtTy, bool isExpanding) {
7542 bool Indexed = AM != ISD::UNINDEXED;
7543 assert((Indexed || Offset.isUndef()) &&
7544 "Unindexed masked load with an offset!");
7545 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
7546 : getVTList(VT, MVT::Other);
7547 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
7548 FoldingSetNodeID ID;
7549 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
7550 ID.AddInteger(MemVT.getRawBits());
7551 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
7552 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
7553 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7554 void *IP = nullptr;
7555 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7556 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
7557 return SDValue(E, 0);
7558 }
7559 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7560 AM, ExtTy, isExpanding, MemVT, MMO);
7561 createOperands(N, Ops);
7562
7563 CSEMap.InsertNode(N, IP);
7564 InsertNode(N);
7565 SDValue V(N, 0);
7566 NewSDValueDbgMsg(V, "Creating new node: ", this);
7567 return V;
7568 }
7569
getIndexedMaskedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7570 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
7571 SDValue Base, SDValue Offset,
7572 ISD::MemIndexedMode AM) {
7573 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
7574 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
7575 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
7576 Offset, LD->getMask(), LD->getPassThru(),
7577 LD->getMemoryVT(), LD->getMemOperand(), AM,
7578 LD->getExtensionType(), LD->isExpandingLoad());
7579 }
7580
getMaskedStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Base,SDValue Offset,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,bool IsTruncating,bool IsCompressing)7581 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
7582 SDValue Val, SDValue Base, SDValue Offset,
7583 SDValue Mask, EVT MemVT,
7584 MachineMemOperand *MMO,
7585 ISD::MemIndexedMode AM, bool IsTruncating,
7586 bool IsCompressing) {
7587 assert(Chain.getValueType() == MVT::Other &&
7588 "Invalid chain type");
7589 bool Indexed = AM != ISD::UNINDEXED;
7590 assert((Indexed || Offset.isUndef()) &&
7591 "Unindexed masked store with an offset!");
7592 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
7593 : getVTList(MVT::Other);
7594 SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
7595 FoldingSetNodeID ID;
7596 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
7597 ID.AddInteger(MemVT.getRawBits());
7598 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
7599 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
7600 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7601 void *IP = nullptr;
7602 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7603 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
7604 return SDValue(E, 0);
7605 }
7606 auto *N =
7607 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7608 IsTruncating, IsCompressing, MemVT, MMO);
7609 createOperands(N, Ops);
7610
7611 CSEMap.InsertNode(N, IP);
7612 InsertNode(N);
7613 SDValue V(N, 0);
7614 NewSDValueDbgMsg(V, "Creating new node: ", this);
7615 return V;
7616 }
7617
getIndexedMaskedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7618 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
7619 SDValue Base, SDValue Offset,
7620 ISD::MemIndexedMode AM) {
7621 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
7622 assert(ST->getOffset().isUndef() &&
7623 "Masked store is already a indexed store!");
7624 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
7625 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
7626 AM, ST->isTruncatingStore(), ST->isCompressingStore());
7627 }
7628
getMaskedGather(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType,ISD::LoadExtType ExtTy)7629 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
7630 ArrayRef<SDValue> Ops,
7631 MachineMemOperand *MMO,
7632 ISD::MemIndexType IndexType,
7633 ISD::LoadExtType ExtTy) {
7634 assert(Ops.size() == 6 && "Incompatible number of operands");
7635
7636 FoldingSetNodeID ID;
7637 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
7638 ID.AddInteger(VT.getRawBits());
7639 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
7640 dl.getIROrder(), VTs, VT, MMO, IndexType, ExtTy));
7641 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7642 void *IP = nullptr;
7643 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7644 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
7645 return SDValue(E, 0);
7646 }
7647
7648 IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
7649 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7650 VTs, VT, MMO, IndexType, ExtTy);
7651 createOperands(N, Ops);
7652
7653 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
7654 "Incompatible type of the PassThru value in MaskedGatherSDNode");
7655 assert(N->getMask().getValueType().getVectorElementCount() ==
7656 N->getValueType(0).getVectorElementCount() &&
7657 "Vector width mismatch between mask and data");
7658 assert(N->getIndex().getValueType().getVectorElementCount().isScalable() ==
7659 N->getValueType(0).getVectorElementCount().isScalable() &&
7660 "Scalable flags of index and data do not match");
7661 assert(ElementCount::isKnownGE(
7662 N->getIndex().getValueType().getVectorElementCount(),
7663 N->getValueType(0).getVectorElementCount()) &&
7664 "Vector width mismatch between index and data");
7665 assert(isa<ConstantSDNode>(N->getScale()) &&
7666 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7667 "Scale should be a constant power of 2");
7668
7669 CSEMap.InsertNode(N, IP);
7670 InsertNode(N);
7671 SDValue V(N, 0);
7672 NewSDValueDbgMsg(V, "Creating new node: ", this);
7673 return V;
7674 }
7675
getMaskedScatter(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType,bool IsTrunc)7676 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
7677 ArrayRef<SDValue> Ops,
7678 MachineMemOperand *MMO,
7679 ISD::MemIndexType IndexType,
7680 bool IsTrunc) {
7681 assert(Ops.size() == 6 && "Incompatible number of operands");
7682
7683 FoldingSetNodeID ID;
7684 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
7685 ID.AddInteger(VT.getRawBits());
7686 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
7687 dl.getIROrder(), VTs, VT, MMO, IndexType, IsTrunc));
7688 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7689 void *IP = nullptr;
7690 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7691 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
7692 return SDValue(E, 0);
7693 }
7694
7695 IndexType = TLI->getCanonicalIndexType(IndexType, VT, Ops[4]);
7696 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7697 VTs, VT, MMO, IndexType, IsTrunc);
7698 createOperands(N, Ops);
7699
7700 assert(N->getMask().getValueType().getVectorElementCount() ==
7701 N->getValue().getValueType().getVectorElementCount() &&
7702 "Vector width mismatch between mask and data");
7703 assert(
7704 N->getIndex().getValueType().getVectorElementCount().isScalable() ==
7705 N->getValue().getValueType().getVectorElementCount().isScalable() &&
7706 "Scalable flags of index and data do not match");
7707 assert(ElementCount::isKnownGE(
7708 N->getIndex().getValueType().getVectorElementCount(),
7709 N->getValue().getValueType().getVectorElementCount()) &&
7710 "Vector width mismatch between index and data");
7711 assert(isa<ConstantSDNode>(N->getScale()) &&
7712 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7713 "Scale should be a constant power of 2");
7714
7715 CSEMap.InsertNode(N, IP);
7716 InsertNode(N);
7717 SDValue V(N, 0);
7718 NewSDValueDbgMsg(V, "Creating new node: ", this);
7719 return V;
7720 }
7721
simplifySelect(SDValue Cond,SDValue T,SDValue F)7722 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
7723 // select undef, T, F --> T (if T is a constant), otherwise F
7724 // select, ?, undef, F --> F
7725 // select, ?, T, undef --> T
7726 if (Cond.isUndef())
7727 return isConstantValueOfAnyType(T) ? T : F;
7728 if (T.isUndef())
7729 return F;
7730 if (F.isUndef())
7731 return T;
7732
7733 // select true, T, F --> T
7734 // select false, T, F --> F
7735 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
7736 return CondC->isNullValue() ? F : T;
7737
7738 // TODO: This should simplify VSELECT with constant condition using something
7739 // like this (but check boolean contents to be complete?):
7740 // if (ISD::isBuildVectorAllOnes(Cond.getNode()))
7741 // return T;
7742 // if (ISD::isBuildVectorAllZeros(Cond.getNode()))
7743 // return F;
7744
7745 // select ?, T, T --> T
7746 if (T == F)
7747 return T;
7748
7749 return SDValue();
7750 }
7751
simplifyShift(SDValue X,SDValue Y)7752 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
7753 // shift undef, Y --> 0 (can always assume that the undef value is 0)
7754 if (X.isUndef())
7755 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
7756 // shift X, undef --> undef (because it may shift by the bitwidth)
7757 if (Y.isUndef())
7758 return getUNDEF(X.getValueType());
7759
7760 // shift 0, Y --> 0
7761 // shift X, 0 --> X
7762 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
7763 return X;
7764
7765 // shift X, C >= bitwidth(X) --> undef
7766 // All vector elements must be too big (or undef) to avoid partial undefs.
7767 auto isShiftTooBig = [X](ConstantSDNode *Val) {
7768 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
7769 };
7770 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
7771 return getUNDEF(X.getValueType());
7772
7773 return SDValue();
7774 }
7775
simplifyFPBinop(unsigned Opcode,SDValue X,SDValue Y,SDNodeFlags Flags)7776 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
7777 SDNodeFlags Flags) {
7778 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
7779 // (an undef operand can be chosen to be Nan/Inf), then the result of this
7780 // operation is poison. That result can be relaxed to undef.
7781 ConstantFPSDNode *XC = isConstOrConstSplatFP(X, /* AllowUndefs */ true);
7782 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
7783 bool HasNan = (XC && XC->getValueAPF().isNaN()) ||
7784 (YC && YC->getValueAPF().isNaN());
7785 bool HasInf = (XC && XC->getValueAPF().isInfinity()) ||
7786 (YC && YC->getValueAPF().isInfinity());
7787
7788 if (Flags.hasNoNaNs() && (HasNan || X.isUndef() || Y.isUndef()))
7789 return getUNDEF(X.getValueType());
7790
7791 if (Flags.hasNoInfs() && (HasInf || X.isUndef() || Y.isUndef()))
7792 return getUNDEF(X.getValueType());
7793
7794 if (!YC)
7795 return SDValue();
7796
7797 // X + -0.0 --> X
7798 if (Opcode == ISD::FADD)
7799 if (YC->getValueAPF().isNegZero())
7800 return X;
7801
7802 // X - +0.0 --> X
7803 if (Opcode == ISD::FSUB)
7804 if (YC->getValueAPF().isPosZero())
7805 return X;
7806
7807 // X * 1.0 --> X
7808 // X / 1.0 --> X
7809 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
7810 if (YC->getValueAPF().isExactlyValue(1.0))
7811 return X;
7812
7813 // X * 0.0 --> 0.0
7814 if (Opcode == ISD::FMUL && Flags.hasNoNaNs() && Flags.hasNoSignedZeros())
7815 if (YC->getValueAPF().isZero())
7816 return getConstantFP(0.0, SDLoc(Y), Y.getValueType());
7817
7818 return SDValue();
7819 }
7820
getVAArg(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)7821 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
7822 SDValue Ptr, SDValue SV, unsigned Align) {
7823 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
7824 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
7825 }
7826
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDUse> Ops)7827 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7828 ArrayRef<SDUse> Ops) {
7829 switch (Ops.size()) {
7830 case 0: return getNode(Opcode, DL, VT);
7831 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
7832 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
7833 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
7834 default: break;
7835 }
7836
7837 // Copy from an SDUse array into an SDValue array for use with
7838 // the regular getNode logic.
7839 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
7840 return getNode(Opcode, DL, VT, NewOps);
7841 }
7842
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops)7843 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7844 ArrayRef<SDValue> Ops) {
7845 SDNodeFlags Flags;
7846 if (Inserter)
7847 Flags = Inserter->getFlags();
7848 return getNode(Opcode, DL, VT, Ops, Flags);
7849 }
7850
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)7851 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7852 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7853 unsigned NumOps = Ops.size();
7854 switch (NumOps) {
7855 case 0: return getNode(Opcode, DL, VT);
7856 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
7857 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
7858 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
7859 default: break;
7860 }
7861
7862 #ifndef NDEBUG
7863 for (auto &Op : Ops)
7864 assert(Op.getOpcode() != ISD::DELETED_NODE &&
7865 "Operand is DELETED_NODE!");
7866 #endif
7867
7868 switch (Opcode) {
7869 default: break;
7870 case ISD::BUILD_VECTOR:
7871 // Attempt to simplify BUILD_VECTOR.
7872 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
7873 return V;
7874 break;
7875 case ISD::CONCAT_VECTORS:
7876 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
7877 return V;
7878 break;
7879 case ISD::SELECT_CC:
7880 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
7881 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
7882 "LHS and RHS of condition must have same type!");
7883 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7884 "True and False arms of SelectCC must have same type!");
7885 assert(Ops[2].getValueType() == VT &&
7886 "select_cc node must be of same type as true and false value!");
7887 break;
7888 case ISD::BR_CC:
7889 assert(NumOps == 5 && "BR_CC takes 5 operands!");
7890 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7891 "LHS/RHS of comparison should match types!");
7892 break;
7893 }
7894
7895 // Memoize nodes.
7896 SDNode *N;
7897 SDVTList VTs = getVTList(VT);
7898
7899 if (VT != MVT::Glue) {
7900 FoldingSetNodeID ID;
7901 AddNodeIDNode(ID, Opcode, VTs, Ops);
7902 void *IP = nullptr;
7903
7904 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7905 return SDValue(E, 0);
7906
7907 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7908 createOperands(N, Ops);
7909
7910 CSEMap.InsertNode(N, IP);
7911 } else {
7912 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7913 createOperands(N, Ops);
7914 }
7915
7916 N->setFlags(Flags);
7917 InsertNode(N);
7918 SDValue V(N, 0);
7919 NewSDValueDbgMsg(V, "Creating new node: ", this);
7920 return V;
7921 }
7922
getNode(unsigned Opcode,const SDLoc & DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)7923 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7924 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
7925 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
7926 }
7927
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops)7928 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7929 ArrayRef<SDValue> Ops) {
7930 SDNodeFlags Flags;
7931 if (Inserter)
7932 Flags = Inserter->getFlags();
7933 return getNode(Opcode, DL, VTList, Ops, Flags);
7934 }
7935
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)7936 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7937 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7938 if (VTList.NumVTs == 1)
7939 return getNode(Opcode, DL, VTList.VTs[0], Ops);
7940
7941 #ifndef NDEBUG
7942 for (auto &Op : Ops)
7943 assert(Op.getOpcode() != ISD::DELETED_NODE &&
7944 "Operand is DELETED_NODE!");
7945 #endif
7946
7947 switch (Opcode) {
7948 case ISD::STRICT_FP_EXTEND:
7949 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
7950 "Invalid STRICT_FP_EXTEND!");
7951 assert(VTList.VTs[0].isFloatingPoint() &&
7952 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
7953 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7954 "STRICT_FP_EXTEND result type should be vector iff the operand "
7955 "type is vector!");
7956 assert((!VTList.VTs[0].isVector() ||
7957 VTList.VTs[0].getVectorNumElements() ==
7958 Ops[1].getValueType().getVectorNumElements()) &&
7959 "Vector element count mismatch!");
7960 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
7961 "Invalid fpext node, dst <= src!");
7962 break;
7963 case ISD::STRICT_FP_ROUND:
7964 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
7965 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7966 "STRICT_FP_ROUND result type should be vector iff the operand "
7967 "type is vector!");
7968 assert((!VTList.VTs[0].isVector() ||
7969 VTList.VTs[0].getVectorNumElements() ==
7970 Ops[1].getValueType().getVectorNumElements()) &&
7971 "Vector element count mismatch!");
7972 assert(VTList.VTs[0].isFloatingPoint() &&
7973 Ops[1].getValueType().isFloatingPoint() &&
7974 VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
7975 isa<ConstantSDNode>(Ops[2]) &&
7976 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
7977 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
7978 "Invalid STRICT_FP_ROUND!");
7979 break;
7980 #if 0
7981 // FIXME: figure out how to safely handle things like
7982 // int foo(int x) { return 1 << (x & 255); }
7983 // int bar() { return foo(256); }
7984 case ISD::SRA_PARTS:
7985 case ISD::SRL_PARTS:
7986 case ISD::SHL_PARTS:
7987 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
7988 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
7989 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7990 else if (N3.getOpcode() == ISD::AND)
7991 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
7992 // If the and is only masking out bits that cannot effect the shift,
7993 // eliminate the and.
7994 unsigned NumBits = VT.getScalarSizeInBits()*2;
7995 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
7996 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7997 }
7998 break;
7999 #endif
8000 }
8001
8002 // Memoize the node unless it returns a flag.
8003 SDNode *N;
8004 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
8005 FoldingSetNodeID ID;
8006 AddNodeIDNode(ID, Opcode, VTList, Ops);
8007 void *IP = nullptr;
8008 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
8009 return SDValue(E, 0);
8010
8011 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
8012 createOperands(N, Ops);
8013 CSEMap.InsertNode(N, IP);
8014 } else {
8015 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
8016 createOperands(N, Ops);
8017 }
8018
8019 N->setFlags(Flags);
8020 InsertNode(N);
8021 SDValue V(N, 0);
8022 NewSDValueDbgMsg(V, "Creating new node: ", this);
8023 return V;
8024 }
8025
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList)8026 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
8027 SDVTList VTList) {
8028 return getNode(Opcode, DL, VTList, None);
8029 }
8030
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1)8031 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8032 SDValue N1) {
8033 SDValue Ops[] = { N1 };
8034 return getNode(Opcode, DL, VTList, Ops);
8035 }
8036
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2)8037 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8038 SDValue N1, SDValue N2) {
8039 SDValue Ops[] = { N1, N2 };
8040 return getNode(Opcode, DL, VTList, Ops);
8041 }
8042
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)8043 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8044 SDValue N1, SDValue N2, SDValue N3) {
8045 SDValue Ops[] = { N1, N2, N3 };
8046 return getNode(Opcode, DL, VTList, Ops);
8047 }
8048
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)8049 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8050 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
8051 SDValue Ops[] = { N1, N2, N3, N4 };
8052 return getNode(Opcode, DL, VTList, Ops);
8053 }
8054
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)8055 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
8056 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
8057 SDValue N5) {
8058 SDValue Ops[] = { N1, N2, N3, N4, N5 };
8059 return getNode(Opcode, DL, VTList, Ops);
8060 }
8061
getVTList(EVT VT)8062 SDVTList SelectionDAG::getVTList(EVT VT) {
8063 return makeVTList(SDNode::getValueTypeList(VT), 1);
8064 }
8065
getVTList(EVT VT1,EVT VT2)8066 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
8067 FoldingSetNodeID ID;
8068 ID.AddInteger(2U);
8069 ID.AddInteger(VT1.getRawBits());
8070 ID.AddInteger(VT2.getRawBits());
8071
8072 void *IP = nullptr;
8073 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8074 if (!Result) {
8075 EVT *Array = Allocator.Allocate<EVT>(2);
8076 Array[0] = VT1;
8077 Array[1] = VT2;
8078 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
8079 VTListMap.InsertNode(Result, IP);
8080 }
8081 return Result->getSDVTList();
8082 }
8083
getVTList(EVT VT1,EVT VT2,EVT VT3)8084 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
8085 FoldingSetNodeID ID;
8086 ID.AddInteger(3U);
8087 ID.AddInteger(VT1.getRawBits());
8088 ID.AddInteger(VT2.getRawBits());
8089 ID.AddInteger(VT3.getRawBits());
8090
8091 void *IP = nullptr;
8092 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8093 if (!Result) {
8094 EVT *Array = Allocator.Allocate<EVT>(3);
8095 Array[0] = VT1;
8096 Array[1] = VT2;
8097 Array[2] = VT3;
8098 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
8099 VTListMap.InsertNode(Result, IP);
8100 }
8101 return Result->getSDVTList();
8102 }
8103
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)8104 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
8105 FoldingSetNodeID ID;
8106 ID.AddInteger(4U);
8107 ID.AddInteger(VT1.getRawBits());
8108 ID.AddInteger(VT2.getRawBits());
8109 ID.AddInteger(VT3.getRawBits());
8110 ID.AddInteger(VT4.getRawBits());
8111
8112 void *IP = nullptr;
8113 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8114 if (!Result) {
8115 EVT *Array = Allocator.Allocate<EVT>(4);
8116 Array[0] = VT1;
8117 Array[1] = VT2;
8118 Array[2] = VT3;
8119 Array[3] = VT4;
8120 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
8121 VTListMap.InsertNode(Result, IP);
8122 }
8123 return Result->getSDVTList();
8124 }
8125
getVTList(ArrayRef<EVT> VTs)8126 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
8127 unsigned NumVTs = VTs.size();
8128 FoldingSetNodeID ID;
8129 ID.AddInteger(NumVTs);
8130 for (unsigned index = 0; index < NumVTs; index++) {
8131 ID.AddInteger(VTs[index].getRawBits());
8132 }
8133
8134 void *IP = nullptr;
8135 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
8136 if (!Result) {
8137 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
8138 llvm::copy(VTs, Array);
8139 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
8140 VTListMap.InsertNode(Result, IP);
8141 }
8142 return Result->getSDVTList();
8143 }
8144
8145
8146 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
8147 /// specified operands. If the resultant node already exists in the DAG,
8148 /// this does not modify the specified node, instead it returns the node that
8149 /// already exists. If the resultant node does not exist in the DAG, the
8150 /// input node is returned. As a degenerate case, if you specify the same
8151 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)8152 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
8153 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
8154
8155 // Check to see if there is no change.
8156 if (Op == N->getOperand(0)) return N;
8157
8158 // See if the modified node already exists.
8159 void *InsertPos = nullptr;
8160 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
8161 return Existing;
8162
8163 // Nope it doesn't. Remove the node from its current place in the maps.
8164 if (InsertPos)
8165 if (!RemoveNodeFromCSEMaps(N))
8166 InsertPos = nullptr;
8167
8168 // Now we update the operands.
8169 N->OperandList[0].set(Op);
8170
8171 updateDivergence(N);
8172 // If this gets put into a CSE map, add it.
8173 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
8174 return N;
8175 }
8176
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)8177 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
8178 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
8179
8180 // Check to see if there is no change.
8181 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
8182 return N; // No operands changed, just return the input node.
8183
8184 // See if the modified node already exists.
8185 void *InsertPos = nullptr;
8186 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
8187 return Existing;
8188
8189 // Nope it doesn't. Remove the node from its current place in the maps.
8190 if (InsertPos)
8191 if (!RemoveNodeFromCSEMaps(N))
8192 InsertPos = nullptr;
8193
8194 // Now we update the operands.
8195 if (N->OperandList[0] != Op1)
8196 N->OperandList[0].set(Op1);
8197 if (N->OperandList[1] != Op2)
8198 N->OperandList[1].set(Op2);
8199
8200 updateDivergence(N);
8201 // If this gets put into a CSE map, add it.
8202 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
8203 return N;
8204 }
8205
8206 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)8207 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
8208 SDValue Ops[] = { Op1, Op2, Op3 };
8209 return UpdateNodeOperands(N, Ops);
8210 }
8211
8212 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)8213 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
8214 SDValue Op3, SDValue Op4) {
8215 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
8216 return UpdateNodeOperands(N, Ops);
8217 }
8218
8219 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)8220 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
8221 SDValue Op3, SDValue Op4, SDValue Op5) {
8222 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
8223 return UpdateNodeOperands(N, Ops);
8224 }
8225
8226 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)8227 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
8228 unsigned NumOps = Ops.size();
8229 assert(N->getNumOperands() == NumOps &&
8230 "Update with wrong number of operands");
8231
8232 // If no operands changed just return the input node.
8233 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
8234 return N;
8235
8236 // See if the modified node already exists.
8237 void *InsertPos = nullptr;
8238 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
8239 return Existing;
8240
8241 // Nope it doesn't. Remove the node from its current place in the maps.
8242 if (InsertPos)
8243 if (!RemoveNodeFromCSEMaps(N))
8244 InsertPos = nullptr;
8245
8246 // Now we update the operands.
8247 for (unsigned i = 0; i != NumOps; ++i)
8248 if (N->OperandList[i] != Ops[i])
8249 N->OperandList[i].set(Ops[i]);
8250
8251 updateDivergence(N);
8252 // If this gets put into a CSE map, add it.
8253 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
8254 return N;
8255 }
8256
8257 /// DropOperands - Release the operands and set this node to have
8258 /// zero operands.
DropOperands()8259 void SDNode::DropOperands() {
8260 // Unlike the code in MorphNodeTo that does this, we don't need to
8261 // watch for dead nodes here.
8262 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
8263 SDUse &Use = *I++;
8264 Use.set(SDValue());
8265 }
8266 }
8267
setNodeMemRefs(MachineSDNode * N,ArrayRef<MachineMemOperand * > NewMemRefs)8268 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
8269 ArrayRef<MachineMemOperand *> NewMemRefs) {
8270 if (NewMemRefs.empty()) {
8271 N->clearMemRefs();
8272 return;
8273 }
8274
8275 // Check if we can avoid allocating by storing a single reference directly.
8276 if (NewMemRefs.size() == 1) {
8277 N->MemRefs = NewMemRefs[0];
8278 N->NumMemRefs = 1;
8279 return;
8280 }
8281
8282 MachineMemOperand **MemRefsBuffer =
8283 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
8284 llvm::copy(NewMemRefs, MemRefsBuffer);
8285 N->MemRefs = MemRefsBuffer;
8286 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
8287 }
8288
8289 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
8290 /// machine opcode.
8291 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)8292 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8293 EVT VT) {
8294 SDVTList VTs = getVTList(VT);
8295 return SelectNodeTo(N, MachineOpc, VTs, None);
8296 }
8297
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)8298 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8299 EVT VT, SDValue Op1) {
8300 SDVTList VTs = getVTList(VT);
8301 SDValue Ops[] = { Op1 };
8302 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8303 }
8304
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)8305 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8306 EVT VT, SDValue Op1,
8307 SDValue Op2) {
8308 SDVTList VTs = getVTList(VT);
8309 SDValue Ops[] = { Op1, Op2 };
8310 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8311 }
8312
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)8313 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8314 EVT VT, SDValue Op1,
8315 SDValue Op2, SDValue Op3) {
8316 SDVTList VTs = getVTList(VT);
8317 SDValue Ops[] = { Op1, Op2, Op3 };
8318 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8319 }
8320
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)8321 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8322 EVT VT, ArrayRef<SDValue> Ops) {
8323 SDVTList VTs = getVTList(VT);
8324 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8325 }
8326
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)8327 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8328 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
8329 SDVTList VTs = getVTList(VT1, VT2);
8330 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8331 }
8332
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)8333 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8334 EVT VT1, EVT VT2) {
8335 SDVTList VTs = getVTList(VT1, VT2);
8336 return SelectNodeTo(N, MachineOpc, VTs, None);
8337 }
8338
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)8339 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8340 EVT VT1, EVT VT2, EVT VT3,
8341 ArrayRef<SDValue> Ops) {
8342 SDVTList VTs = getVTList(VT1, VT2, VT3);
8343 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8344 }
8345
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)8346 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8347 EVT VT1, EVT VT2,
8348 SDValue Op1, SDValue Op2) {
8349 SDVTList VTs = getVTList(VT1, VT2);
8350 SDValue Ops[] = { Op1, Op2 };
8351 return SelectNodeTo(N, MachineOpc, VTs, Ops);
8352 }
8353
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)8354 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
8355 SDVTList VTs,ArrayRef<SDValue> Ops) {
8356 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
8357 // Reset the NodeID to -1.
8358 New->setNodeId(-1);
8359 if (New != N) {
8360 ReplaceAllUsesWith(N, New);
8361 RemoveDeadNode(N);
8362 }
8363 return New;
8364 }
8365
8366 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
8367 /// the line number information on the merged node since it is not possible to
8368 /// preserve the information that operation is associated with multiple lines.
8369 /// This will make the debugger working better at -O0, were there is a higher
8370 /// probability having other instructions associated with that line.
8371 ///
8372 /// For IROrder, we keep the smaller of the two
UpdateSDLocOnMergeSDNode(SDNode * N,const SDLoc & OLoc)8373 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
8374 DebugLoc NLoc = N->getDebugLoc();
8375 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
8376 N->setDebugLoc(DebugLoc());
8377 }
8378 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
8379 N->setIROrder(Order);
8380 return N;
8381 }
8382
8383 /// MorphNodeTo - This *mutates* the specified node to have the specified
8384 /// return type, opcode, and operands.
8385 ///
8386 /// Note that MorphNodeTo returns the resultant node. If there is already a
8387 /// node of the specified opcode and operands, it returns that node instead of
8388 /// the current one. Note that the SDLoc need not be the same.
8389 ///
8390 /// Using MorphNodeTo is faster than creating a new node and swapping it in
8391 /// with ReplaceAllUsesWith both because it often avoids allocating a new
8392 /// node, and because it doesn't require CSE recalculation for any of
8393 /// the node's users.
8394 ///
8395 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
8396 /// As a consequence it isn't appropriate to use from within the DAG combiner or
8397 /// the legalizer which maintain worklists that would need to be updated when
8398 /// deleting things.
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)8399 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
8400 SDVTList VTs, ArrayRef<SDValue> Ops) {
8401 // If an identical node already exists, use it.
8402 void *IP = nullptr;
8403 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
8404 FoldingSetNodeID ID;
8405 AddNodeIDNode(ID, Opc, VTs, Ops);
8406 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
8407 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
8408 }
8409
8410 if (!RemoveNodeFromCSEMaps(N))
8411 IP = nullptr;
8412
8413 // Start the morphing.
8414 N->NodeType = Opc;
8415 N->ValueList = VTs.VTs;
8416 N->NumValues = VTs.NumVTs;
8417
8418 // Clear the operands list, updating used nodes to remove this from their
8419 // use list. Keep track of any operands that become dead as a result.
8420 SmallPtrSet<SDNode*, 16> DeadNodeSet;
8421 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
8422 SDUse &Use = *I++;
8423 SDNode *Used = Use.getNode();
8424 Use.set(SDValue());
8425 if (Used->use_empty())
8426 DeadNodeSet.insert(Used);
8427 }
8428
8429 // For MachineNode, initialize the memory references information.
8430 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
8431 MN->clearMemRefs();
8432
8433 // Swap for an appropriately sized array from the recycler.
8434 removeOperands(N);
8435 createOperands(N, Ops);
8436
8437 // Delete any nodes that are still dead after adding the uses for the
8438 // new operands.
8439 if (!DeadNodeSet.empty()) {
8440 SmallVector<SDNode *, 16> DeadNodes;
8441 for (SDNode *N : DeadNodeSet)
8442 if (N->use_empty())
8443 DeadNodes.push_back(N);
8444 RemoveDeadNodes(DeadNodes);
8445 }
8446
8447 if (IP)
8448 CSEMap.InsertNode(N, IP); // Memoize the new node.
8449 return N;
8450 }
8451
mutateStrictFPToFP(SDNode * Node)8452 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
8453 unsigned OrigOpc = Node->getOpcode();
8454 unsigned NewOpc;
8455 switch (OrigOpc) {
8456 default:
8457 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
8458 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8459 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
8460 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8461 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
8462 #include "llvm/IR/ConstrainedOps.def"
8463 }
8464
8465 assert(Node->getNumValues() == 2 && "Unexpected number of results!");
8466
8467 // We're taking this node out of the chain, so we need to re-link things.
8468 SDValue InputChain = Node->getOperand(0);
8469 SDValue OutputChain = SDValue(Node, 1);
8470 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
8471
8472 SmallVector<SDValue, 3> Ops;
8473 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
8474 Ops.push_back(Node->getOperand(i));
8475
8476 SDVTList VTs = getVTList(Node->getValueType(0));
8477 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
8478
8479 // MorphNodeTo can operate in two ways: if an existing node with the
8480 // specified operands exists, it can just return it. Otherwise, it
8481 // updates the node in place to have the requested operands.
8482 if (Res == Node) {
8483 // If we updated the node in place, reset the node ID. To the isel,
8484 // this should be just like a newly allocated machine node.
8485 Res->setNodeId(-1);
8486 } else {
8487 ReplaceAllUsesWith(Node, Res);
8488 RemoveDeadNode(Node);
8489 }
8490
8491 return Res;
8492 }
8493
8494 /// getMachineNode - These are used for target selectors to create a new node
8495 /// with specified return type(s), MachineInstr opcode, and operands.
8496 ///
8497 /// Note that getMachineNode returns the resultant node. If there is already a
8498 /// node of the specified opcode and operands, it returns that node instead of
8499 /// the current one.
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT)8500 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8501 EVT VT) {
8502 SDVTList VTs = getVTList(VT);
8503 return getMachineNode(Opcode, dl, VTs, None);
8504 }
8505
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1)8506 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8507 EVT VT, SDValue Op1) {
8508 SDVTList VTs = getVTList(VT);
8509 SDValue Ops[] = { Op1 };
8510 return getMachineNode(Opcode, dl, VTs, Ops);
8511 }
8512
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2)8513 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8514 EVT VT, SDValue Op1, SDValue Op2) {
8515 SDVTList VTs = getVTList(VT);
8516 SDValue Ops[] = { Op1, Op2 };
8517 return getMachineNode(Opcode, dl, VTs, Ops);
8518 }
8519
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)8520 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8521 EVT VT, SDValue Op1, SDValue Op2,
8522 SDValue Op3) {
8523 SDVTList VTs = getVTList(VT);
8524 SDValue Ops[] = { Op1, Op2, Op3 };
8525 return getMachineNode(Opcode, dl, VTs, Ops);
8526 }
8527
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,ArrayRef<SDValue> Ops)8528 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8529 EVT VT, ArrayRef<SDValue> Ops) {
8530 SDVTList VTs = getVTList(VT);
8531 return getMachineNode(Opcode, dl, VTs, Ops);
8532 }
8533
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)8534 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8535 EVT VT1, EVT VT2, SDValue Op1,
8536 SDValue Op2) {
8537 SDVTList VTs = getVTList(VT1, VT2);
8538 SDValue Ops[] = { Op1, Op2 };
8539 return getMachineNode(Opcode, dl, VTs, Ops);
8540 }
8541
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)8542 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8543 EVT VT1, EVT VT2, SDValue Op1,
8544 SDValue Op2, SDValue Op3) {
8545 SDVTList VTs = getVTList(VT1, VT2);
8546 SDValue Ops[] = { Op1, Op2, Op3 };
8547 return getMachineNode(Opcode, dl, VTs, Ops);
8548 }
8549
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)8550 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8551 EVT VT1, EVT VT2,
8552 ArrayRef<SDValue> Ops) {
8553 SDVTList VTs = getVTList(VT1, VT2);
8554 return getMachineNode(Opcode, dl, VTs, Ops);
8555 }
8556
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)8557 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8558 EVT VT1, EVT VT2, EVT VT3,
8559 SDValue Op1, SDValue Op2) {
8560 SDVTList VTs = getVTList(VT1, VT2, VT3);
8561 SDValue Ops[] = { Op1, Op2 };
8562 return getMachineNode(Opcode, dl, VTs, Ops);
8563 }
8564
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)8565 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8566 EVT VT1, EVT VT2, EVT VT3,
8567 SDValue Op1, SDValue Op2,
8568 SDValue Op3) {
8569 SDVTList VTs = getVTList(VT1, VT2, VT3);
8570 SDValue Ops[] = { Op1, Op2, Op3 };
8571 return getMachineNode(Opcode, dl, VTs, Ops);
8572 }
8573
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)8574 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8575 EVT VT1, EVT VT2, EVT VT3,
8576 ArrayRef<SDValue> Ops) {
8577 SDVTList VTs = getVTList(VT1, VT2, VT3);
8578 return getMachineNode(Opcode, dl, VTs, Ops);
8579 }
8580
getMachineNode(unsigned Opcode,const SDLoc & dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)8581 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8582 ArrayRef<EVT> ResultTys,
8583 ArrayRef<SDValue> Ops) {
8584 SDVTList VTs = getVTList(ResultTys);
8585 return getMachineNode(Opcode, dl, VTs, Ops);
8586 }
8587
getMachineNode(unsigned Opcode,const SDLoc & DL,SDVTList VTs,ArrayRef<SDValue> Ops)8588 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
8589 SDVTList VTs,
8590 ArrayRef<SDValue> Ops) {
8591 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
8592 MachineSDNode *N;
8593 void *IP = nullptr;
8594
8595 if (DoCSE) {
8596 FoldingSetNodeID ID;
8597 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
8598 IP = nullptr;
8599 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
8600 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
8601 }
8602 }
8603
8604 // Allocate a new MachineSDNode.
8605 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8606 createOperands(N, Ops);
8607
8608 if (DoCSE)
8609 CSEMap.InsertNode(N, IP);
8610
8611 InsertNode(N);
8612 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
8613 return N;
8614 }
8615
8616 /// getTargetExtractSubreg - A convenience function for creating
8617 /// TargetOpcode::EXTRACT_SUBREG nodes.
getTargetExtractSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand)8618 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8619 SDValue Operand) {
8620 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8621 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
8622 VT, Operand, SRIdxVal);
8623 return SDValue(Subreg, 0);
8624 }
8625
8626 /// getTargetInsertSubreg - A convenience function for creating
8627 /// TargetOpcode::INSERT_SUBREG nodes.
getTargetInsertSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand,SDValue Subreg)8628 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8629 SDValue Operand, SDValue Subreg) {
8630 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8631 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
8632 VT, Operand, Subreg, SRIdxVal);
8633 return SDValue(Result, 0);
8634 }
8635
8636 /// getNodeIfExists - Get the specified node if it's already available, or
8637 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops)8638 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
8639 ArrayRef<SDValue> Ops) {
8640 SDNodeFlags Flags;
8641 if (Inserter)
8642 Flags = Inserter->getFlags();
8643 return getNodeIfExists(Opcode, VTList, Ops, Flags);
8644 }
8645
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)8646 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
8647 ArrayRef<SDValue> Ops,
8648 const SDNodeFlags Flags) {
8649 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
8650 FoldingSetNodeID ID;
8651 AddNodeIDNode(ID, Opcode, VTList, Ops);
8652 void *IP = nullptr;
8653 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
8654 E->intersectFlagsWith(Flags);
8655 return E;
8656 }
8657 }
8658 return nullptr;
8659 }
8660
8661 /// doesNodeExist - Check if a node exists without modifying its flags.
doesNodeExist(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops)8662 bool SelectionDAG::doesNodeExist(unsigned Opcode, SDVTList VTList,
8663 ArrayRef<SDValue> Ops) {
8664 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
8665 FoldingSetNodeID ID;
8666 AddNodeIDNode(ID, Opcode, VTList, Ops);
8667 void *IP = nullptr;
8668 if (FindNodeOrInsertPos(ID, SDLoc(), IP))
8669 return true;
8670 }
8671 return false;
8672 }
8673
8674 /// getDbgValue - Creates a SDDbgValue node.
8675 ///
8676 /// SDNode
getDbgValue(DIVariable * Var,DIExpression * Expr,SDNode * N,unsigned R,bool IsIndirect,const DebugLoc & DL,unsigned O)8677 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
8678 SDNode *N, unsigned R, bool IsIndirect,
8679 const DebugLoc &DL, unsigned O) {
8680 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8681 "Expected inlined-at fields to agree");
8682 return new (DbgInfo->getAlloc())
8683 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromNode(N, R),
8684 {}, IsIndirect, DL, O,
8685 /*IsVariadic=*/false);
8686 }
8687
8688 /// Constant
getConstantDbgValue(DIVariable * Var,DIExpression * Expr,const Value * C,const DebugLoc & DL,unsigned O)8689 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
8690 DIExpression *Expr,
8691 const Value *C,
8692 const DebugLoc &DL, unsigned O) {
8693 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8694 "Expected inlined-at fields to agree");
8695 return new (DbgInfo->getAlloc())
8696 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromConst(C), {},
8697 /*IsIndirect=*/false, DL, O,
8698 /*IsVariadic=*/false);
8699 }
8700
8701 /// FrameIndex
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,bool IsIndirect,const DebugLoc & DL,unsigned O)8702 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
8703 DIExpression *Expr, unsigned FI,
8704 bool IsIndirect,
8705 const DebugLoc &DL,
8706 unsigned O) {
8707 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8708 "Expected inlined-at fields to agree");
8709 return getFrameIndexDbgValue(Var, Expr, FI, {}, IsIndirect, DL, O);
8710 }
8711
8712 /// FrameIndex with dependencies
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,ArrayRef<SDNode * > Dependencies,bool IsIndirect,const DebugLoc & DL,unsigned O)8713 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
8714 DIExpression *Expr, unsigned FI,
8715 ArrayRef<SDNode *> Dependencies,
8716 bool IsIndirect,
8717 const DebugLoc &DL,
8718 unsigned O) {
8719 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8720 "Expected inlined-at fields to agree");
8721 return new (DbgInfo->getAlloc())
8722 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromFrameIdx(FI),
8723 Dependencies, IsIndirect, DL, O,
8724 /*IsVariadic=*/false);
8725 }
8726
8727 /// VReg
getVRegDbgValue(DIVariable * Var,DIExpression * Expr,unsigned VReg,bool IsIndirect,const DebugLoc & DL,unsigned O)8728 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
8729 unsigned VReg, bool IsIndirect,
8730 const DebugLoc &DL, unsigned O) {
8731 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8732 "Expected inlined-at fields to agree");
8733 return new (DbgInfo->getAlloc())
8734 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, SDDbgOperand::fromVReg(VReg),
8735 {}, IsIndirect, DL, O,
8736 /*IsVariadic=*/false);
8737 }
8738
getDbgValueList(DIVariable * Var,DIExpression * Expr,ArrayRef<SDDbgOperand> Locs,ArrayRef<SDNode * > Dependencies,bool IsIndirect,const DebugLoc & DL,unsigned O,bool IsVariadic)8739 SDDbgValue *SelectionDAG::getDbgValueList(DIVariable *Var, DIExpression *Expr,
8740 ArrayRef<SDDbgOperand> Locs,
8741 ArrayRef<SDNode *> Dependencies,
8742 bool IsIndirect, const DebugLoc &DL,
8743 unsigned O, bool IsVariadic) {
8744 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8745 "Expected inlined-at fields to agree");
8746 return new (DbgInfo->getAlloc())
8747 SDDbgValue(DbgInfo->getAlloc(), Var, Expr, Locs, Dependencies, IsIndirect,
8748 DL, O, IsVariadic);
8749 }
8750
transferDbgValues(SDValue From,SDValue To,unsigned OffsetInBits,unsigned SizeInBits,bool InvalidateDbg)8751 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
8752 unsigned OffsetInBits, unsigned SizeInBits,
8753 bool InvalidateDbg) {
8754 SDNode *FromNode = From.getNode();
8755 SDNode *ToNode = To.getNode();
8756 assert(FromNode && ToNode && "Can't modify dbg values");
8757
8758 // PR35338
8759 // TODO: assert(From != To && "Redundant dbg value transfer");
8760 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
8761 if (From == To || FromNode == ToNode)
8762 return;
8763
8764 if (!FromNode->getHasDebugValue())
8765 return;
8766
8767 SDDbgOperand FromLocOp =
8768 SDDbgOperand::fromNode(From.getNode(), From.getResNo());
8769 SDDbgOperand ToLocOp = SDDbgOperand::fromNode(To.getNode(), To.getResNo());
8770
8771 SmallVector<SDDbgValue *, 2> ClonedDVs;
8772 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
8773 if (Dbg->isInvalidated())
8774 continue;
8775
8776 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
8777
8778 // Create a new location ops vector that is equal to the old vector, but
8779 // with each instance of FromLocOp replaced with ToLocOp.
8780 bool Changed = false;
8781 auto NewLocOps = Dbg->copyLocationOps();
8782 std::replace_if(
8783 NewLocOps.begin(), NewLocOps.end(),
8784 [&Changed, FromLocOp](const SDDbgOperand &Op) {
8785 bool Match = Op == FromLocOp;
8786 Changed |= Match;
8787 return Match;
8788 },
8789 ToLocOp);
8790 // Ignore this SDDbgValue if we didn't find a matching location.
8791 if (!Changed)
8792 continue;
8793
8794 DIVariable *Var = Dbg->getVariable();
8795 auto *Expr = Dbg->getExpression();
8796 // If a fragment is requested, update the expression.
8797 if (SizeInBits) {
8798 // When splitting a larger (e.g., sign-extended) value whose
8799 // lower bits are described with an SDDbgValue, do not attempt
8800 // to transfer the SDDbgValue to the upper bits.
8801 if (auto FI = Expr->getFragmentInfo())
8802 if (OffsetInBits + SizeInBits > FI->SizeInBits)
8803 continue;
8804 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
8805 SizeInBits);
8806 if (!Fragment)
8807 continue;
8808 Expr = *Fragment;
8809 }
8810
8811 auto AdditionalDependencies = Dbg->getAdditionalDependencies();
8812 // Clone the SDDbgValue and move it to To.
8813 SDDbgValue *Clone = getDbgValueList(
8814 Var, Expr, NewLocOps, AdditionalDependencies, Dbg->isIndirect(),
8815 Dbg->getDebugLoc(), std::max(ToNode->getIROrder(), Dbg->getOrder()),
8816 Dbg->isVariadic());
8817 ClonedDVs.push_back(Clone);
8818
8819 if (InvalidateDbg) {
8820 // Invalidate value and indicate the SDDbgValue should not be emitted.
8821 Dbg->setIsInvalidated();
8822 Dbg->setIsEmitted();
8823 }
8824 }
8825
8826 for (SDDbgValue *Dbg : ClonedDVs) {
8827 assert(is_contained(Dbg->getSDNodes(), ToNode) &&
8828 "Transferred DbgValues should depend on the new SDNode");
8829 AddDbgValue(Dbg, false);
8830 }
8831 }
8832
salvageDebugInfo(SDNode & N)8833 void SelectionDAG::salvageDebugInfo(SDNode &N) {
8834 if (!N.getHasDebugValue())
8835 return;
8836
8837 SmallVector<SDDbgValue *, 2> ClonedDVs;
8838 for (auto DV : GetDbgValues(&N)) {
8839 if (DV->isInvalidated())
8840 continue;
8841 switch (N.getOpcode()) {
8842 default:
8843 break;
8844 case ISD::ADD:
8845 SDValue N0 = N.getOperand(0);
8846 SDValue N1 = N.getOperand(1);
8847 if (!isConstantIntBuildVectorOrConstantInt(N0) &&
8848 isConstantIntBuildVectorOrConstantInt(N1)) {
8849 uint64_t Offset = N.getConstantOperandVal(1);
8850
8851 // Rewrite an ADD constant node into a DIExpression. Since we are
8852 // performing arithmetic to compute the variable's *value* in the
8853 // DIExpression, we need to mark the expression with a
8854 // DW_OP_stack_value.
8855 auto *DIExpr = DV->getExpression();
8856 auto NewLocOps = DV->copyLocationOps();
8857 bool Changed = false;
8858 for (size_t i = 0; i < NewLocOps.size(); ++i) {
8859 // We're not given a ResNo to compare against because the whole
8860 // node is going away. We know that any ISD::ADD only has one
8861 // result, so we can assume any node match is using the result.
8862 if (NewLocOps[i].getKind() != SDDbgOperand::SDNODE ||
8863 NewLocOps[i].getSDNode() != &N)
8864 continue;
8865 NewLocOps[i] = SDDbgOperand::fromNode(N0.getNode(), N0.getResNo());
8866 SmallVector<uint64_t, 3> ExprOps;
8867 DIExpression::appendOffset(ExprOps, Offset);
8868 DIExpr = DIExpression::appendOpsToArg(DIExpr, ExprOps, i, true);
8869 Changed = true;
8870 }
8871 (void)Changed;
8872 assert(Changed && "Salvage target doesn't use N");
8873
8874 auto AdditionalDependencies = DV->getAdditionalDependencies();
8875 SDDbgValue *Clone = getDbgValueList(DV->getVariable(), DIExpr,
8876 NewLocOps, AdditionalDependencies,
8877 DV->isIndirect(), DV->getDebugLoc(),
8878 DV->getOrder(), DV->isVariadic());
8879 ClonedDVs.push_back(Clone);
8880 DV->setIsInvalidated();
8881 DV->setIsEmitted();
8882 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
8883 N0.getNode()->dumprFull(this);
8884 dbgs() << " into " << *DIExpr << '\n');
8885 }
8886 }
8887 }
8888
8889 for (SDDbgValue *Dbg : ClonedDVs) {
8890 assert(!Dbg->getSDNodes().empty() &&
8891 "Salvaged DbgValue should depend on a new SDNode");
8892 AddDbgValue(Dbg, false);
8893 }
8894 }
8895
8896 /// Creates a SDDbgLabel node.
getDbgLabel(DILabel * Label,const DebugLoc & DL,unsigned O)8897 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
8898 const DebugLoc &DL, unsigned O) {
8899 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
8900 "Expected inlined-at fields to agree");
8901 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
8902 }
8903
8904 namespace {
8905
8906 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
8907 /// pointed to by a use iterator is deleted, increment the use iterator
8908 /// so that it doesn't dangle.
8909 ///
8910 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
8911 SDNode::use_iterator &UI;
8912 SDNode::use_iterator &UE;
8913
NodeDeleted(SDNode * N,SDNode * E)8914 void NodeDeleted(SDNode *N, SDNode *E) override {
8915 // Increment the iterator as needed.
8916 while (UI != UE && N == *UI)
8917 ++UI;
8918 }
8919
8920 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)8921 RAUWUpdateListener(SelectionDAG &d,
8922 SDNode::use_iterator &ui,
8923 SDNode::use_iterator &ue)
8924 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
8925 };
8926
8927 } // end anonymous namespace
8928
8929 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8930 /// This can cause recursive merging of nodes in the DAG.
8931 ///
8932 /// This version assumes From has a single result value.
8933 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)8934 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
8935 SDNode *From = FromN.getNode();
8936 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
8937 "Cannot replace with this method!");
8938 assert(From != To.getNode() && "Cannot replace uses of with self");
8939
8940 // Preserve Debug Values
8941 transferDbgValues(FromN, To);
8942
8943 // Iterate over all the existing uses of From. New uses will be added
8944 // to the beginning of the use list, which we avoid visiting.
8945 // This specifically avoids visiting uses of From that arise while the
8946 // replacement is happening, because any such uses would be the result
8947 // of CSE: If an existing node looks like From after one of its operands
8948 // is replaced by To, we don't want to replace of all its users with To
8949 // too. See PR3018 for more info.
8950 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8951 RAUWUpdateListener Listener(*this, UI, UE);
8952 while (UI != UE) {
8953 SDNode *User = *UI;
8954
8955 // This node is about to morph, remove its old self from the CSE maps.
8956 RemoveNodeFromCSEMaps(User);
8957
8958 // A user can appear in a use list multiple times, and when this
8959 // happens the uses are usually next to each other in the list.
8960 // To help reduce the number of CSE recomputations, process all
8961 // the uses of this user that we can find this way.
8962 do {
8963 SDUse &Use = UI.getUse();
8964 ++UI;
8965 Use.set(To);
8966 if (To->isDivergent() != From->isDivergent())
8967 updateDivergence(User);
8968 } while (UI != UE && *UI == User);
8969 // Now that we have modified User, add it back to the CSE maps. If it
8970 // already exists there, recursively merge the results together.
8971 AddModifiedNodeToCSEMaps(User);
8972 }
8973
8974 // If we just RAUW'd the root, take note.
8975 if (FromN == getRoot())
8976 setRoot(To);
8977 }
8978
8979 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8980 /// This can cause recursive merging of nodes in the DAG.
8981 ///
8982 /// This version assumes that for each value of From, there is a
8983 /// corresponding value in To in the same position with the same type.
8984 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)8985 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
8986 #ifndef NDEBUG
8987 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8988 assert((!From->hasAnyUseOfValue(i) ||
8989 From->getValueType(i) == To->getValueType(i)) &&
8990 "Cannot use this version of ReplaceAllUsesWith!");
8991 #endif
8992
8993 // Handle the trivial case.
8994 if (From == To)
8995 return;
8996
8997 // Preserve Debug Info. Only do this if there's a use.
8998 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8999 if (From->hasAnyUseOfValue(i)) {
9000 assert((i < To->getNumValues()) && "Invalid To location");
9001 transferDbgValues(SDValue(From, i), SDValue(To, i));
9002 }
9003
9004 // Iterate over just the existing users of From. See the comments in
9005 // the ReplaceAllUsesWith above.
9006 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
9007 RAUWUpdateListener Listener(*this, UI, UE);
9008 while (UI != UE) {
9009 SDNode *User = *UI;
9010
9011 // This node is about to morph, remove its old self from the CSE maps.
9012 RemoveNodeFromCSEMaps(User);
9013
9014 // A user can appear in a use list multiple times, and when this
9015 // happens the uses are usually next to each other in the list.
9016 // To help reduce the number of CSE recomputations, process all
9017 // the uses of this user that we can find this way.
9018 do {
9019 SDUse &Use = UI.getUse();
9020 ++UI;
9021 Use.setNode(To);
9022 if (To->isDivergent() != From->isDivergent())
9023 updateDivergence(User);
9024 } while (UI != UE && *UI == User);
9025
9026 // Now that we have modified User, add it back to the CSE maps. If it
9027 // already exists there, recursively merge the results together.
9028 AddModifiedNodeToCSEMaps(User);
9029 }
9030
9031 // If we just RAUW'd the root, take note.
9032 if (From == getRoot().getNode())
9033 setRoot(SDValue(To, getRoot().getResNo()));
9034 }
9035
9036 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
9037 /// This can cause recursive merging of nodes in the DAG.
9038 ///
9039 /// This version can replace From with any result values. To must match the
9040 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)9041 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
9042 if (From->getNumValues() == 1) // Handle the simple case efficiently.
9043 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
9044
9045 // Preserve Debug Info.
9046 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
9047 transferDbgValues(SDValue(From, i), To[i]);
9048
9049 // Iterate over just the existing users of From. See the comments in
9050 // the ReplaceAllUsesWith above.
9051 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
9052 RAUWUpdateListener Listener(*this, UI, UE);
9053 while (UI != UE) {
9054 SDNode *User = *UI;
9055
9056 // This node is about to morph, remove its old self from the CSE maps.
9057 RemoveNodeFromCSEMaps(User);
9058
9059 // A user can appear in a use list multiple times, and when this happens the
9060 // uses are usually next to each other in the list. To help reduce the
9061 // number of CSE and divergence recomputations, process all the uses of this
9062 // user that we can find this way.
9063 bool To_IsDivergent = false;
9064 do {
9065 SDUse &Use = UI.getUse();
9066 const SDValue &ToOp = To[Use.getResNo()];
9067 ++UI;
9068 Use.set(ToOp);
9069 To_IsDivergent |= ToOp->isDivergent();
9070 } while (UI != UE && *UI == User);
9071
9072 if (To_IsDivergent != From->isDivergent())
9073 updateDivergence(User);
9074
9075 // Now that we have modified User, add it back to the CSE maps. If it
9076 // already exists there, recursively merge the results together.
9077 AddModifiedNodeToCSEMaps(User);
9078 }
9079
9080 // If we just RAUW'd the root, take note.
9081 if (From == getRoot().getNode())
9082 setRoot(SDValue(To[getRoot().getResNo()]));
9083 }
9084
9085 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
9086 /// uses of other values produced by From.getNode() alone. The Deleted
9087 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)9088 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
9089 // Handle the really simple, really trivial case efficiently.
9090 if (From == To) return;
9091
9092 // Handle the simple, trivial, case efficiently.
9093 if (From.getNode()->getNumValues() == 1) {
9094 ReplaceAllUsesWith(From, To);
9095 return;
9096 }
9097
9098 // Preserve Debug Info.
9099 transferDbgValues(From, To);
9100
9101 // Iterate over just the existing users of From. See the comments in
9102 // the ReplaceAllUsesWith above.
9103 SDNode::use_iterator UI = From.getNode()->use_begin(),
9104 UE = From.getNode()->use_end();
9105 RAUWUpdateListener Listener(*this, UI, UE);
9106 while (UI != UE) {
9107 SDNode *User = *UI;
9108 bool UserRemovedFromCSEMaps = false;
9109
9110 // A user can appear in a use list multiple times, and when this
9111 // happens the uses are usually next to each other in the list.
9112 // To help reduce the number of CSE recomputations, process all
9113 // the uses of this user that we can find this way.
9114 do {
9115 SDUse &Use = UI.getUse();
9116
9117 // Skip uses of different values from the same node.
9118 if (Use.getResNo() != From.getResNo()) {
9119 ++UI;
9120 continue;
9121 }
9122
9123 // If this node hasn't been modified yet, it's still in the CSE maps,
9124 // so remove its old self from the CSE maps.
9125 if (!UserRemovedFromCSEMaps) {
9126 RemoveNodeFromCSEMaps(User);
9127 UserRemovedFromCSEMaps = true;
9128 }
9129
9130 ++UI;
9131 Use.set(To);
9132 if (To->isDivergent() != From->isDivergent())
9133 updateDivergence(User);
9134 } while (UI != UE && *UI == User);
9135 // We are iterating over all uses of the From node, so if a use
9136 // doesn't use the specific value, no changes are made.
9137 if (!UserRemovedFromCSEMaps)
9138 continue;
9139
9140 // Now that we have modified User, add it back to the CSE maps. If it
9141 // already exists there, recursively merge the results together.
9142 AddModifiedNodeToCSEMaps(User);
9143 }
9144
9145 // If we just RAUW'd the root, take note.
9146 if (From == getRoot())
9147 setRoot(To);
9148 }
9149
9150 namespace {
9151
9152 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
9153 /// to record information about a use.
9154 struct UseMemo {
9155 SDNode *User;
9156 unsigned Index;
9157 SDUse *Use;
9158 };
9159
9160 /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)9161 bool operator<(const UseMemo &L, const UseMemo &R) {
9162 return (intptr_t)L.User < (intptr_t)R.User;
9163 }
9164
9165 } // end anonymous namespace
9166
calculateDivergence(SDNode * N)9167 bool SelectionDAG::calculateDivergence(SDNode *N) {
9168 if (TLI->isSDNodeAlwaysUniform(N)) {
9169 assert(!TLI->isSDNodeSourceOfDivergence(N, FLI, DA) &&
9170 "Conflicting divergence information!");
9171 return false;
9172 }
9173 if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA))
9174 return true;
9175 for (auto &Op : N->ops()) {
9176 if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
9177 return true;
9178 }
9179 return false;
9180 }
9181
updateDivergence(SDNode * N)9182 void SelectionDAG::updateDivergence(SDNode *N) {
9183 SmallVector<SDNode *, 16> Worklist(1, N);
9184 do {
9185 N = Worklist.pop_back_val();
9186 bool IsDivergent = calculateDivergence(N);
9187 if (N->SDNodeBits.IsDivergent != IsDivergent) {
9188 N->SDNodeBits.IsDivergent = IsDivergent;
9189 llvm::append_range(Worklist, N->uses());
9190 }
9191 } while (!Worklist.empty());
9192 }
9193
CreateTopologicalOrder(std::vector<SDNode * > & Order)9194 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
9195 DenseMap<SDNode *, unsigned> Degree;
9196 Order.reserve(AllNodes.size());
9197 for (auto &N : allnodes()) {
9198 unsigned NOps = N.getNumOperands();
9199 Degree[&N] = NOps;
9200 if (0 == NOps)
9201 Order.push_back(&N);
9202 }
9203 for (size_t I = 0; I != Order.size(); ++I) {
9204 SDNode *N = Order[I];
9205 for (auto U : N->uses()) {
9206 unsigned &UnsortedOps = Degree[U];
9207 if (0 == --UnsortedOps)
9208 Order.push_back(U);
9209 }
9210 }
9211 }
9212
9213 #ifndef NDEBUG
VerifyDAGDiverence()9214 void SelectionDAG::VerifyDAGDiverence() {
9215 std::vector<SDNode *> TopoOrder;
9216 CreateTopologicalOrder(TopoOrder);
9217 for (auto *N : TopoOrder) {
9218 assert(calculateDivergence(N) == N->isDivergent() &&
9219 "Divergence bit inconsistency detected");
9220 }
9221 }
9222 #endif
9223
9224 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
9225 /// uses of other values produced by From.getNode() alone. The same value
9226 /// may appear in both the From and To list. The Deleted vector is
9227 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)9228 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
9229 const SDValue *To,
9230 unsigned Num){
9231 // Handle the simple, trivial case efficiently.
9232 if (Num == 1)
9233 return ReplaceAllUsesOfValueWith(*From, *To);
9234
9235 transferDbgValues(*From, *To);
9236
9237 // Read up all the uses and make records of them. This helps
9238 // processing new uses that are introduced during the
9239 // replacement process.
9240 SmallVector<UseMemo, 4> Uses;
9241 for (unsigned i = 0; i != Num; ++i) {
9242 unsigned FromResNo = From[i].getResNo();
9243 SDNode *FromNode = From[i].getNode();
9244 for (SDNode::use_iterator UI = FromNode->use_begin(),
9245 E = FromNode->use_end(); UI != E; ++UI) {
9246 SDUse &Use = UI.getUse();
9247 if (Use.getResNo() == FromResNo) {
9248 UseMemo Memo = { *UI, i, &Use };
9249 Uses.push_back(Memo);
9250 }
9251 }
9252 }
9253
9254 // Sort the uses, so that all the uses from a given User are together.
9255 llvm::sort(Uses);
9256
9257 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
9258 UseIndex != UseIndexEnd; ) {
9259 // We know that this user uses some value of From. If it is the right
9260 // value, update it.
9261 SDNode *User = Uses[UseIndex].User;
9262
9263 // This node is about to morph, remove its old self from the CSE maps.
9264 RemoveNodeFromCSEMaps(User);
9265
9266 // The Uses array is sorted, so all the uses for a given User
9267 // are next to each other in the list.
9268 // To help reduce the number of CSE recomputations, process all
9269 // the uses of this user that we can find this way.
9270 do {
9271 unsigned i = Uses[UseIndex].Index;
9272 SDUse &Use = *Uses[UseIndex].Use;
9273 ++UseIndex;
9274
9275 Use.set(To[i]);
9276 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
9277
9278 // Now that we have modified User, add it back to the CSE maps. If it
9279 // already exists there, recursively merge the results together.
9280 AddModifiedNodeToCSEMaps(User);
9281 }
9282 }
9283
9284 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
9285 /// based on their topological order. It returns the maximum id and a vector
9286 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()9287 unsigned SelectionDAG::AssignTopologicalOrder() {
9288 unsigned DAGSize = 0;
9289
9290 // SortedPos tracks the progress of the algorithm. Nodes before it are
9291 // sorted, nodes after it are unsorted. When the algorithm completes
9292 // it is at the end of the list.
9293 allnodes_iterator SortedPos = allnodes_begin();
9294
9295 // Visit all the nodes. Move nodes with no operands to the front of
9296 // the list immediately. Annotate nodes that do have operands with their
9297 // operand count. Before we do this, the Node Id fields of the nodes
9298 // may contain arbitrary values. After, the Node Id fields for nodes
9299 // before SortedPos will contain the topological sort index, and the
9300 // Node Id fields for nodes At SortedPos and after will contain the
9301 // count of outstanding operands.
9302 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
9303 SDNode *N = &*I++;
9304 checkForCycles(N, this);
9305 unsigned Degree = N->getNumOperands();
9306 if (Degree == 0) {
9307 // A node with no uses, add it to the result array immediately.
9308 N->setNodeId(DAGSize++);
9309 allnodes_iterator Q(N);
9310 if (Q != SortedPos)
9311 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
9312 assert(SortedPos != AllNodes.end() && "Overran node list");
9313 ++SortedPos;
9314 } else {
9315 // Temporarily use the Node Id as scratch space for the degree count.
9316 N->setNodeId(Degree);
9317 }
9318 }
9319
9320 // Visit all the nodes. As we iterate, move nodes into sorted order,
9321 // such that by the time the end is reached all nodes will be sorted.
9322 for (SDNode &Node : allnodes()) {
9323 SDNode *N = &Node;
9324 checkForCycles(N, this);
9325 // N is in sorted position, so all its uses have one less operand
9326 // that needs to be sorted.
9327 for (SDNode *P : N->uses()) {
9328 unsigned Degree = P->getNodeId();
9329 assert(Degree != 0 && "Invalid node degree");
9330 --Degree;
9331 if (Degree == 0) {
9332 // All of P's operands are sorted, so P may sorted now.
9333 P->setNodeId(DAGSize++);
9334 if (P->getIterator() != SortedPos)
9335 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
9336 assert(SortedPos != AllNodes.end() && "Overran node list");
9337 ++SortedPos;
9338 } else {
9339 // Update P's outstanding operand count.
9340 P->setNodeId(Degree);
9341 }
9342 }
9343 if (Node.getIterator() == SortedPos) {
9344 #ifndef NDEBUG
9345 allnodes_iterator I(N);
9346 SDNode *S = &*++I;
9347 dbgs() << "Overran sorted position:\n";
9348 S->dumprFull(this); dbgs() << "\n";
9349 dbgs() << "Checking if this is due to cycles\n";
9350 checkForCycles(this, true);
9351 #endif
9352 llvm_unreachable(nullptr);
9353 }
9354 }
9355
9356 assert(SortedPos == AllNodes.end() &&
9357 "Topological sort incomplete!");
9358 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
9359 "First node in topological sort is not the entry token!");
9360 assert(AllNodes.front().getNodeId() == 0 &&
9361 "First node in topological sort has non-zero id!");
9362 assert(AllNodes.front().getNumOperands() == 0 &&
9363 "First node in topological sort has operands!");
9364 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
9365 "Last node in topologic sort has unexpected id!");
9366 assert(AllNodes.back().use_empty() &&
9367 "Last node in topologic sort has users!");
9368 assert(DAGSize == allnodes_size() && "Node count mismatch!");
9369 return DAGSize;
9370 }
9371
9372 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
9373 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,bool isParameter)9374 void SelectionDAG::AddDbgValue(SDDbgValue *DB, bool isParameter) {
9375 for (SDNode *SD : DB->getSDNodes()) {
9376 if (!SD)
9377 continue;
9378 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
9379 SD->setHasDebugValue(true);
9380 }
9381 DbgInfo->add(DB, isParameter);
9382 }
9383
AddDbgLabel(SDDbgLabel * DB)9384 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) { DbgInfo->add(DB); }
9385
makeEquivalentMemoryOrdering(SDValue OldChain,SDValue NewMemOpChain)9386 SDValue SelectionDAG::makeEquivalentMemoryOrdering(SDValue OldChain,
9387 SDValue NewMemOpChain) {
9388 assert(isa<MemSDNode>(NewMemOpChain) && "Expected a memop node");
9389 assert(NewMemOpChain.getValueType() == MVT::Other && "Expected a token VT");
9390 // The new memory operation must have the same position as the old load in
9391 // terms of memory dependency. Create a TokenFactor for the old load and new
9392 // memory operation and update uses of the old load's output chain to use that
9393 // TokenFactor.
9394 if (OldChain == NewMemOpChain || OldChain.use_empty())
9395 return NewMemOpChain;
9396
9397 SDValue TokenFactor = getNode(ISD::TokenFactor, SDLoc(OldChain), MVT::Other,
9398 OldChain, NewMemOpChain);
9399 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
9400 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewMemOpChain);
9401 return TokenFactor;
9402 }
9403
makeEquivalentMemoryOrdering(LoadSDNode * OldLoad,SDValue NewMemOp)9404 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
9405 SDValue NewMemOp) {
9406 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
9407 SDValue OldChain = SDValue(OldLoad, 1);
9408 SDValue NewMemOpChain = NewMemOp.getValue(1);
9409 return makeEquivalentMemoryOrdering(OldChain, NewMemOpChain);
9410 }
9411
getSymbolFunctionGlobalAddress(SDValue Op,Function ** OutFunction)9412 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
9413 Function **OutFunction) {
9414 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
9415
9416 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
9417 auto *Module = MF->getFunction().getParent();
9418 auto *Function = Module->getFunction(Symbol);
9419
9420 if (OutFunction != nullptr)
9421 *OutFunction = Function;
9422
9423 if (Function != nullptr) {
9424 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
9425 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
9426 }
9427
9428 std::string ErrorStr;
9429 raw_string_ostream ErrorFormatter(ErrorStr);
9430
9431 ErrorFormatter << "Undefined external symbol ";
9432 ErrorFormatter << '"' << Symbol << '"';
9433 ErrorFormatter.flush();
9434
9435 report_fatal_error(ErrorStr);
9436 }
9437
9438 //===----------------------------------------------------------------------===//
9439 // SDNode Class
9440 //===----------------------------------------------------------------------===//
9441
isNullConstant(SDValue V)9442 bool llvm::isNullConstant(SDValue V) {
9443 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9444 return Const != nullptr && Const->isNullValue();
9445 }
9446
isNullFPConstant(SDValue V)9447 bool llvm::isNullFPConstant(SDValue V) {
9448 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
9449 return Const != nullptr && Const->isZero() && !Const->isNegative();
9450 }
9451
isAllOnesConstant(SDValue V)9452 bool llvm::isAllOnesConstant(SDValue V) {
9453 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9454 return Const != nullptr && Const->isAllOnesValue();
9455 }
9456
isOneConstant(SDValue V)9457 bool llvm::isOneConstant(SDValue V) {
9458 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
9459 return Const != nullptr && Const->isOne();
9460 }
9461
peekThroughBitcasts(SDValue V)9462 SDValue llvm::peekThroughBitcasts(SDValue V) {
9463 while (V.getOpcode() == ISD::BITCAST)
9464 V = V.getOperand(0);
9465 return V;
9466 }
9467
peekThroughOneUseBitcasts(SDValue V)9468 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
9469 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
9470 V = V.getOperand(0);
9471 return V;
9472 }
9473
peekThroughExtractSubvectors(SDValue V)9474 SDValue llvm::peekThroughExtractSubvectors(SDValue V) {
9475 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
9476 V = V.getOperand(0);
9477 return V;
9478 }
9479
isBitwiseNot(SDValue V,bool AllowUndefs)9480 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
9481 if (V.getOpcode() != ISD::XOR)
9482 return false;
9483 V = peekThroughBitcasts(V.getOperand(1));
9484 unsigned NumBits = V.getScalarValueSizeInBits();
9485 ConstantSDNode *C =
9486 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
9487 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits);
9488 }
9489
isConstOrConstSplat(SDValue N,bool AllowUndefs,bool AllowTruncation)9490 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
9491 bool AllowTruncation) {
9492 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
9493 return CN;
9494
9495 // SplatVectors can truncate their operands. Ignore that case here unless
9496 // AllowTruncation is set.
9497 if (N->getOpcode() == ISD::SPLAT_VECTOR) {
9498 EVT VecEltVT = N->getValueType(0).getVectorElementType();
9499 if (auto *CN = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
9500 EVT CVT = CN->getValueType(0);
9501 assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension");
9502 if (AllowTruncation || CVT == VecEltVT)
9503 return CN;
9504 }
9505 }
9506
9507 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9508 BitVector UndefElements;
9509 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
9510
9511 // BuildVectors can truncate their operands. Ignore that case here unless
9512 // AllowTruncation is set.
9513 if (CN && (UndefElements.none() || AllowUndefs)) {
9514 EVT CVT = CN->getValueType(0);
9515 EVT NSVT = N.getValueType().getScalarType();
9516 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
9517 if (AllowTruncation || (CVT == NSVT))
9518 return CN;
9519 }
9520 }
9521
9522 return nullptr;
9523 }
9524
isConstOrConstSplat(SDValue N,const APInt & DemandedElts,bool AllowUndefs,bool AllowTruncation)9525 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
9526 bool AllowUndefs,
9527 bool AllowTruncation) {
9528 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
9529 return CN;
9530
9531 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9532 BitVector UndefElements;
9533 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
9534
9535 // BuildVectors can truncate their operands. Ignore that case here unless
9536 // AllowTruncation is set.
9537 if (CN && (UndefElements.none() || AllowUndefs)) {
9538 EVT CVT = CN->getValueType(0);
9539 EVT NSVT = N.getValueType().getScalarType();
9540 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
9541 if (AllowTruncation || (CVT == NSVT))
9542 return CN;
9543 }
9544 }
9545
9546 return nullptr;
9547 }
9548
isConstOrConstSplatFP(SDValue N,bool AllowUndefs)9549 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
9550 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
9551 return CN;
9552
9553 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9554 BitVector UndefElements;
9555 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
9556 if (CN && (UndefElements.none() || AllowUndefs))
9557 return CN;
9558 }
9559
9560 if (N.getOpcode() == ISD::SPLAT_VECTOR)
9561 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N.getOperand(0)))
9562 return CN;
9563
9564 return nullptr;
9565 }
9566
isConstOrConstSplatFP(SDValue N,const APInt & DemandedElts,bool AllowUndefs)9567 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
9568 const APInt &DemandedElts,
9569 bool AllowUndefs) {
9570 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
9571 return CN;
9572
9573 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
9574 BitVector UndefElements;
9575 ConstantFPSDNode *CN =
9576 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
9577 if (CN && (UndefElements.none() || AllowUndefs))
9578 return CN;
9579 }
9580
9581 return nullptr;
9582 }
9583
isNullOrNullSplat(SDValue N,bool AllowUndefs)9584 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
9585 // TODO: may want to use peekThroughBitcast() here.
9586 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
9587 return C && C->isNullValue();
9588 }
9589
isOneOrOneSplat(SDValue N,bool AllowUndefs)9590 bool llvm::isOneOrOneSplat(SDValue N, bool AllowUndefs) {
9591 // TODO: may want to use peekThroughBitcast() here.
9592 unsigned BitWidth = N.getScalarValueSizeInBits();
9593 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
9594 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth;
9595 }
9596
isAllOnesOrAllOnesSplat(SDValue N,bool AllowUndefs)9597 bool llvm::isAllOnesOrAllOnesSplat(SDValue N, bool AllowUndefs) {
9598 N = peekThroughBitcasts(N);
9599 unsigned BitWidth = N.getScalarValueSizeInBits();
9600 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
9601 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth;
9602 }
9603
~HandleSDNode()9604 HandleSDNode::~HandleSDNode() {
9605 DropOperands();
9606 }
9607
GlobalAddressSDNode(unsigned Opc,unsigned Order,const DebugLoc & DL,const GlobalValue * GA,EVT VT,int64_t o,unsigned TF)9608 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
9609 const DebugLoc &DL,
9610 const GlobalValue *GA, EVT VT,
9611 int64_t o, unsigned TF)
9612 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
9613 TheGlobal = GA;
9614 }
9615
AddrSpaceCastSDNode(unsigned Order,const DebugLoc & dl,EVT VT,unsigned SrcAS,unsigned DestAS)9616 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
9617 EVT VT, unsigned SrcAS,
9618 unsigned DestAS)
9619 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
9620 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
9621
MemSDNode(unsigned Opc,unsigned Order,const DebugLoc & dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)9622 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
9623 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
9624 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
9625 MemSDNodeBits.IsVolatile = MMO->isVolatile();
9626 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
9627 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
9628 MemSDNodeBits.IsInvariant = MMO->isInvariant();
9629
9630 // We check here that the size of the memory operand fits within the size of
9631 // the MMO. This is because the MMO might indicate only a possible address
9632 // range instead of specifying the affected memory addresses precisely.
9633 // TODO: Make MachineMemOperands aware of scalable vectors.
9634 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() &&
9635 "Size mismatch!");
9636 }
9637
9638 /// Profile - Gather unique data for the node.
9639 ///
Profile(FoldingSetNodeID & ID) const9640 void SDNode::Profile(FoldingSetNodeID &ID) const {
9641 AddNodeIDNode(ID, this);
9642 }
9643
9644 namespace {
9645
9646 struct EVTArray {
9647 std::vector<EVT> VTs;
9648
EVTArray__anon682509e81111::EVTArray9649 EVTArray() {
9650 VTs.reserve(MVT::LAST_VALUETYPE);
9651 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
9652 VTs.push_back(MVT((MVT::SimpleValueType)i));
9653 }
9654 };
9655
9656 } // end anonymous namespace
9657
9658 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
9659 static ManagedStatic<EVTArray> SimpleVTArray;
9660 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
9661
9662 /// getValueTypeList - Return a pointer to the specified value type.
9663 ///
getValueTypeList(EVT VT)9664 const EVT *SDNode::getValueTypeList(EVT VT) {
9665 if (VT.isExtended()) {
9666 sys::SmartScopedLock<true> Lock(*VTMutex);
9667 return &(*EVTs->insert(VT).first);
9668 }
9669 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE && "Value type out of range!");
9670 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
9671 }
9672
9673 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
9674 /// indicated value. This method ignores uses of other values defined by this
9675 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const9676 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
9677 assert(Value < getNumValues() && "Bad value!");
9678
9679 // TODO: Only iterate over uses of a given value of the node
9680 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
9681 if (UI.getUse().getResNo() == Value) {
9682 if (NUses == 0)
9683 return false;
9684 --NUses;
9685 }
9686 }
9687
9688 // Found exactly the right number of uses?
9689 return NUses == 0;
9690 }
9691
9692 /// hasAnyUseOfValue - Return true if there are any use of the indicated
9693 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const9694 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
9695 assert(Value < getNumValues() && "Bad value!");
9696
9697 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
9698 if (UI.getUse().getResNo() == Value)
9699 return true;
9700
9701 return false;
9702 }
9703
9704 /// isOnlyUserOf - Return true if this node is the only use of N.
isOnlyUserOf(const SDNode * N) const9705 bool SDNode::isOnlyUserOf(const SDNode *N) const {
9706 bool Seen = false;
9707 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9708 SDNode *User = *I;
9709 if (User == this)
9710 Seen = true;
9711 else
9712 return false;
9713 }
9714
9715 return Seen;
9716 }
9717
9718 /// Return true if the only users of N are contained in Nodes.
areOnlyUsersOf(ArrayRef<const SDNode * > Nodes,const SDNode * N)9719 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
9720 bool Seen = false;
9721 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9722 SDNode *User = *I;
9723 if (llvm::is_contained(Nodes, User))
9724 Seen = true;
9725 else
9726 return false;
9727 }
9728
9729 return Seen;
9730 }
9731
9732 /// isOperand - Return true if this node is an operand of N.
isOperandOf(const SDNode * N) const9733 bool SDValue::isOperandOf(const SDNode *N) const {
9734 return is_contained(N->op_values(), *this);
9735 }
9736
isOperandOf(const SDNode * N) const9737 bool SDNode::isOperandOf(const SDNode *N) const {
9738 return any_of(N->op_values(),
9739 [this](SDValue Op) { return this == Op.getNode(); });
9740 }
9741
9742 /// reachesChainWithoutSideEffects - Return true if this operand (which must
9743 /// be a chain) reaches the specified operand without crossing any
9744 /// side-effecting instructions on any chain path. In practice, this looks
9745 /// through token factors and non-volatile loads. In order to remain efficient,
9746 /// this only looks a couple of nodes in, it does not do an exhaustive search.
9747 ///
9748 /// Note that we only need to examine chains when we're searching for
9749 /// side-effects; SelectionDAG requires that all side-effects are represented
9750 /// by chains, even if another operand would force a specific ordering. This
9751 /// constraint is necessary to allow transformations like splitting loads.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const9752 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
9753 unsigned Depth) const {
9754 if (*this == Dest) return true;
9755
9756 // Don't search too deeply, we just want to be able to see through
9757 // TokenFactor's etc.
9758 if (Depth == 0) return false;
9759
9760 // If this is a token factor, all inputs to the TF happen in parallel.
9761 if (getOpcode() == ISD::TokenFactor) {
9762 // First, try a shallow search.
9763 if (is_contained((*this)->ops(), Dest)) {
9764 // We found the chain we want as an operand of this TokenFactor.
9765 // Essentially, we reach the chain without side-effects if we could
9766 // serialize the TokenFactor into a simple chain of operations with
9767 // Dest as the last operation. This is automatically true if the
9768 // chain has one use: there are no other ordering constraints.
9769 // If the chain has more than one use, we give up: some other
9770 // use of Dest might force a side-effect between Dest and the current
9771 // node.
9772 if (Dest.hasOneUse())
9773 return true;
9774 }
9775 // Next, try a deep search: check whether every operand of the TokenFactor
9776 // reaches Dest.
9777 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
9778 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
9779 });
9780 }
9781
9782 // Loads don't have side effects, look through them.
9783 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
9784 if (Ld->isUnordered())
9785 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
9786 }
9787 return false;
9788 }
9789
hasPredecessor(const SDNode * N) const9790 bool SDNode::hasPredecessor(const SDNode *N) const {
9791 SmallPtrSet<const SDNode *, 32> Visited;
9792 SmallVector<const SDNode *, 16> Worklist;
9793 Worklist.push_back(this);
9794 return hasPredecessorHelper(N, Visited, Worklist);
9795 }
9796
intersectFlagsWith(const SDNodeFlags Flags)9797 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
9798 this->Flags.intersectWith(Flags);
9799 }
9800
9801 SDValue
matchBinOpReduction(SDNode * Extract,ISD::NodeType & BinOp,ArrayRef<ISD::NodeType> CandidateBinOps,bool AllowPartials)9802 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
9803 ArrayRef<ISD::NodeType> CandidateBinOps,
9804 bool AllowPartials) {
9805 // The pattern must end in an extract from index 0.
9806 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9807 !isNullConstant(Extract->getOperand(1)))
9808 return SDValue();
9809
9810 // Match against one of the candidate binary ops.
9811 SDValue Op = Extract->getOperand(0);
9812 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
9813 return Op.getOpcode() == unsigned(BinOp);
9814 }))
9815 return SDValue();
9816
9817 // Floating-point reductions may require relaxed constraints on the final step
9818 // of the reduction because they may reorder intermediate operations.
9819 unsigned CandidateBinOp = Op.getOpcode();
9820 if (Op.getValueType().isFloatingPoint()) {
9821 SDNodeFlags Flags = Op->getFlags();
9822 switch (CandidateBinOp) {
9823 case ISD::FADD:
9824 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
9825 return SDValue();
9826 break;
9827 default:
9828 llvm_unreachable("Unhandled FP opcode for binop reduction");
9829 }
9830 }
9831
9832 // Matching failed - attempt to see if we did enough stages that a partial
9833 // reduction from a subvector is possible.
9834 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
9835 if (!AllowPartials || !Op)
9836 return SDValue();
9837 EVT OpVT = Op.getValueType();
9838 EVT OpSVT = OpVT.getScalarType();
9839 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
9840 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
9841 return SDValue();
9842 BinOp = (ISD::NodeType)CandidateBinOp;
9843 return getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op,
9844 getVectorIdxConstant(0, SDLoc(Op)));
9845 };
9846
9847 // At each stage, we're looking for something that looks like:
9848 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
9849 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
9850 // i32 undef, i32 undef, i32 undef, i32 undef>
9851 // %a = binop <8 x i32> %op, %s
9852 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
9853 // we expect something like:
9854 // <4,5,6,7,u,u,u,u>
9855 // <2,3,u,u,u,u,u,u>
9856 // <1,u,u,u,u,u,u,u>
9857 // While a partial reduction match would be:
9858 // <2,3,u,u,u,u,u,u>
9859 // <1,u,u,u,u,u,u,u>
9860 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
9861 SDValue PrevOp;
9862 for (unsigned i = 0; i < Stages; ++i) {
9863 unsigned MaskEnd = (1 << i);
9864
9865 if (Op.getOpcode() != CandidateBinOp)
9866 return PartialReduction(PrevOp, MaskEnd);
9867
9868 SDValue Op0 = Op.getOperand(0);
9869 SDValue Op1 = Op.getOperand(1);
9870
9871 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
9872 if (Shuffle) {
9873 Op = Op1;
9874 } else {
9875 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
9876 Op = Op0;
9877 }
9878
9879 // The first operand of the shuffle should be the same as the other operand
9880 // of the binop.
9881 if (!Shuffle || Shuffle->getOperand(0) != Op)
9882 return PartialReduction(PrevOp, MaskEnd);
9883
9884 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
9885 for (int Index = 0; Index < (int)MaskEnd; ++Index)
9886 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
9887 return PartialReduction(PrevOp, MaskEnd);
9888
9889 PrevOp = Op;
9890 }
9891
9892 // Handle subvector reductions, which tend to appear after the shuffle
9893 // reduction stages.
9894 while (Op.getOpcode() == CandidateBinOp) {
9895 unsigned NumElts = Op.getValueType().getVectorNumElements();
9896 SDValue Op0 = Op.getOperand(0);
9897 SDValue Op1 = Op.getOperand(1);
9898 if (Op0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
9899 Op1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
9900 Op0.getOperand(0) != Op1.getOperand(0))
9901 break;
9902 SDValue Src = Op0.getOperand(0);
9903 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
9904 if (NumSrcElts != (2 * NumElts))
9905 break;
9906 if (!(Op0.getConstantOperandAPInt(1) == 0 &&
9907 Op1.getConstantOperandAPInt(1) == NumElts) &&
9908 !(Op1.getConstantOperandAPInt(1) == 0 &&
9909 Op0.getConstantOperandAPInt(1) == NumElts))
9910 break;
9911 Op = Src;
9912 }
9913
9914 BinOp = (ISD::NodeType)CandidateBinOp;
9915 return Op;
9916 }
9917
UnrollVectorOp(SDNode * N,unsigned ResNE)9918 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
9919 assert(N->getNumValues() == 1 &&
9920 "Can't unroll a vector with multiple results!");
9921
9922 EVT VT = N->getValueType(0);
9923 unsigned NE = VT.getVectorNumElements();
9924 EVT EltVT = VT.getVectorElementType();
9925 SDLoc dl(N);
9926
9927 SmallVector<SDValue, 8> Scalars;
9928 SmallVector<SDValue, 4> Operands(N->getNumOperands());
9929
9930 // If ResNE is 0, fully unroll the vector op.
9931 if (ResNE == 0)
9932 ResNE = NE;
9933 else if (NE > ResNE)
9934 NE = ResNE;
9935
9936 unsigned i;
9937 for (i= 0; i != NE; ++i) {
9938 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
9939 SDValue Operand = N->getOperand(j);
9940 EVT OperandVT = Operand.getValueType();
9941 if (OperandVT.isVector()) {
9942 // A vector operand; extract a single element.
9943 EVT OperandEltVT = OperandVT.getVectorElementType();
9944 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT,
9945 Operand, getVectorIdxConstant(i, dl));
9946 } else {
9947 // A scalar operand; just use it as is.
9948 Operands[j] = Operand;
9949 }
9950 }
9951
9952 switch (N->getOpcode()) {
9953 default: {
9954 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
9955 N->getFlags()));
9956 break;
9957 }
9958 case ISD::VSELECT:
9959 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
9960 break;
9961 case ISD::SHL:
9962 case ISD::SRA:
9963 case ISD::SRL:
9964 case ISD::ROTL:
9965 case ISD::ROTR:
9966 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
9967 getShiftAmountOperand(Operands[0].getValueType(),
9968 Operands[1])));
9969 break;
9970 case ISD::SIGN_EXTEND_INREG: {
9971 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
9972 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
9973 Operands[0],
9974 getValueType(ExtVT)));
9975 }
9976 }
9977 }
9978
9979 for (; i < ResNE; ++i)
9980 Scalars.push_back(getUNDEF(EltVT));
9981
9982 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
9983 return getBuildVector(VecVT, dl, Scalars);
9984 }
9985
UnrollVectorOverflowOp(SDNode * N,unsigned ResNE)9986 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
9987 SDNode *N, unsigned ResNE) {
9988 unsigned Opcode = N->getOpcode();
9989 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
9990 Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
9991 Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
9992 "Expected an overflow opcode");
9993
9994 EVT ResVT = N->getValueType(0);
9995 EVT OvVT = N->getValueType(1);
9996 EVT ResEltVT = ResVT.getVectorElementType();
9997 EVT OvEltVT = OvVT.getVectorElementType();
9998 SDLoc dl(N);
9999
10000 // If ResNE is 0, fully unroll the vector op.
10001 unsigned NE = ResVT.getVectorNumElements();
10002 if (ResNE == 0)
10003 ResNE = NE;
10004 else if (NE > ResNE)
10005 NE = ResNE;
10006
10007 SmallVector<SDValue, 8> LHSScalars;
10008 SmallVector<SDValue, 8> RHSScalars;
10009 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
10010 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
10011
10012 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
10013 SDVTList VTs = getVTList(ResEltVT, SVT);
10014 SmallVector<SDValue, 8> ResScalars;
10015 SmallVector<SDValue, 8> OvScalars;
10016 for (unsigned i = 0; i < NE; ++i) {
10017 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
10018 SDValue Ov =
10019 getSelect(dl, OvEltVT, Res.getValue(1),
10020 getBoolConstant(true, dl, OvEltVT, ResVT),
10021 getConstant(0, dl, OvEltVT));
10022
10023 ResScalars.push_back(Res);
10024 OvScalars.push_back(Ov);
10025 }
10026
10027 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
10028 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
10029
10030 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
10031 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
10032 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
10033 getBuildVector(NewOvVT, dl, OvScalars));
10034 }
10035
areNonVolatileConsecutiveLoads(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const10036 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
10037 LoadSDNode *Base,
10038 unsigned Bytes,
10039 int Dist) const {
10040 if (LD->isVolatile() || Base->isVolatile())
10041 return false;
10042 // TODO: probably too restrictive for atomics, revisit
10043 if (!LD->isSimple())
10044 return false;
10045 if (LD->isIndexed() || Base->isIndexed())
10046 return false;
10047 if (LD->getChain() != Base->getChain())
10048 return false;
10049 EVT VT = LD->getValueType(0);
10050 if (VT.getSizeInBits() / 8 != Bytes)
10051 return false;
10052
10053 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
10054 auto LocDecomp = BaseIndexOffset::match(LD, *this);
10055
10056 int64_t Offset = 0;
10057 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
10058 return (Dist * Bytes == Offset);
10059 return false;
10060 }
10061
10062 /// InferPtrAlignment - Infer alignment of a load / store address. Return None
10063 /// if it cannot be inferred.
InferPtrAlign(SDValue Ptr) const10064 MaybeAlign SelectionDAG::InferPtrAlign(SDValue Ptr) const {
10065 // If this is a GlobalAddress + cst, return the alignment.
10066 const GlobalValue *GV = nullptr;
10067 int64_t GVOffset = 0;
10068 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
10069 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
10070 KnownBits Known(PtrWidth);
10071 llvm::computeKnownBits(GV, Known, getDataLayout());
10072 unsigned AlignBits = Known.countMinTrailingZeros();
10073 if (AlignBits)
10074 return commonAlignment(Align(1ull << std::min(31U, AlignBits)), GVOffset);
10075 }
10076
10077 // If this is a direct reference to a stack slot, use information about the
10078 // stack slot's alignment.
10079 int FrameIdx = INT_MIN;
10080 int64_t FrameOffset = 0;
10081 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
10082 FrameIdx = FI->getIndex();
10083 } else if (isBaseWithConstantOffset(Ptr) &&
10084 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
10085 // Handle FI+Cst
10086 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
10087 FrameOffset = Ptr.getConstantOperandVal(1);
10088 }
10089
10090 if (FrameIdx != INT_MIN) {
10091 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
10092 return commonAlignment(MFI.getObjectAlign(FrameIdx), FrameOffset);
10093 }
10094
10095 return None;
10096 }
10097
10098 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
10099 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const10100 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
10101 // Currently all types are split in half.
10102 EVT LoVT, HiVT;
10103 if (!VT.isVector())
10104 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
10105 else
10106 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
10107
10108 return std::make_pair(LoVT, HiVT);
10109 }
10110
10111 /// GetDependentSplitDestVTs - Compute the VTs needed for the low/hi parts of a
10112 /// type, dependent on an enveloping VT that has been split into two identical
10113 /// pieces. Sets the HiIsEmpty flag when hi type has zero storage size.
10114 std::pair<EVT, EVT>
GetDependentSplitDestVTs(const EVT & VT,const EVT & EnvVT,bool * HiIsEmpty) const10115 SelectionDAG::GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
10116 bool *HiIsEmpty) const {
10117 EVT EltTp = VT.getVectorElementType();
10118 // Examples:
10119 // custom VL=8 with enveloping VL=8/8 yields 8/0 (hi empty)
10120 // custom VL=9 with enveloping VL=8/8 yields 8/1
10121 // custom VL=10 with enveloping VL=8/8 yields 8/2
10122 // etc.
10123 ElementCount VTNumElts = VT.getVectorElementCount();
10124 ElementCount EnvNumElts = EnvVT.getVectorElementCount();
10125 assert(VTNumElts.isScalable() == EnvNumElts.isScalable() &&
10126 "Mixing fixed width and scalable vectors when enveloping a type");
10127 EVT LoVT, HiVT;
10128 if (VTNumElts.getKnownMinValue() > EnvNumElts.getKnownMinValue()) {
10129 LoVT = EnvVT;
10130 HiVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts - EnvNumElts);
10131 *HiIsEmpty = false;
10132 } else {
10133 // Flag that hi type has zero storage size, but return split envelop type
10134 // (this would be easier if vector types with zero elements were allowed).
10135 LoVT = EVT::getVectorVT(*getContext(), EltTp, VTNumElts);
10136 HiVT = EnvVT;
10137 *HiIsEmpty = true;
10138 }
10139 return std::make_pair(LoVT, HiVT);
10140 }
10141
10142 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
10143 /// low/high part.
10144 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)10145 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
10146 const EVT &HiVT) {
10147 assert(LoVT.isScalableVector() == HiVT.isScalableVector() &&
10148 LoVT.isScalableVector() == N.getValueType().isScalableVector() &&
10149 "Splitting vector with an invalid mixture of fixed and scalable "
10150 "vector types");
10151 assert(LoVT.getVectorMinNumElements() + HiVT.getVectorMinNumElements() <=
10152 N.getValueType().getVectorMinNumElements() &&
10153 "More vector elements requested than available!");
10154 SDValue Lo, Hi;
10155 Lo =
10156 getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, getVectorIdxConstant(0, DL));
10157 // For scalable vectors it is safe to use LoVT.getVectorMinNumElements()
10158 // (rather than having to use ElementCount), because EXTRACT_SUBVECTOR scales
10159 // IDX with the runtime scaling factor of the result vector type. For
10160 // fixed-width result vectors, that runtime scaling factor is 1.
10161 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
10162 getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
10163 return std::make_pair(Lo, Hi);
10164 }
10165
10166 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
WidenVector(const SDValue & N,const SDLoc & DL)10167 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
10168 EVT VT = N.getValueType();
10169 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
10170 NextPowerOf2(VT.getVectorNumElements()));
10171 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N,
10172 getVectorIdxConstant(0, DL));
10173 }
10174
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count,EVT EltVT)10175 void SelectionDAG::ExtractVectorElements(SDValue Op,
10176 SmallVectorImpl<SDValue> &Args,
10177 unsigned Start, unsigned Count,
10178 EVT EltVT) {
10179 EVT VT = Op.getValueType();
10180 if (Count == 0)
10181 Count = VT.getVectorNumElements();
10182 if (EltVT == EVT())
10183 EltVT = VT.getVectorElementType();
10184 SDLoc SL(Op);
10185 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
10186 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Op,
10187 getVectorIdxConstant(i, SL)));
10188 }
10189 }
10190
10191 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const10192 unsigned GlobalAddressSDNode::getAddressSpace() const {
10193 return getGlobal()->getType()->getAddressSpace();
10194 }
10195
getType() const10196 Type *ConstantPoolSDNode::getType() const {
10197 if (isMachineConstantPoolEntry())
10198 return Val.MachineCPVal->getType();
10199 return Val.ConstVal->getType();
10200 }
10201
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool IsBigEndian) const10202 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
10203 unsigned &SplatBitSize,
10204 bool &HasAnyUndefs,
10205 unsigned MinSplatBits,
10206 bool IsBigEndian) const {
10207 EVT VT = getValueType(0);
10208 assert(VT.isVector() && "Expected a vector type");
10209 unsigned VecWidth = VT.getSizeInBits();
10210 if (MinSplatBits > VecWidth)
10211 return false;
10212
10213 // FIXME: The widths are based on this node's type, but build vectors can
10214 // truncate their operands.
10215 SplatValue = APInt(VecWidth, 0);
10216 SplatUndef = APInt(VecWidth, 0);
10217
10218 // Get the bits. Bits with undefined values (when the corresponding element
10219 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
10220 // in SplatValue. If any of the values are not constant, give up and return
10221 // false.
10222 unsigned int NumOps = getNumOperands();
10223 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
10224 unsigned EltWidth = VT.getScalarSizeInBits();
10225
10226 for (unsigned j = 0; j < NumOps; ++j) {
10227 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
10228 SDValue OpVal = getOperand(i);
10229 unsigned BitPos = j * EltWidth;
10230
10231 if (OpVal.isUndef())
10232 SplatUndef.setBits(BitPos, BitPos + EltWidth);
10233 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
10234 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
10235 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
10236 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
10237 else
10238 return false;
10239 }
10240
10241 // The build_vector is all constants or undefs. Find the smallest element
10242 // size that splats the vector.
10243 HasAnyUndefs = (SplatUndef != 0);
10244
10245 // FIXME: This does not work for vectors with elements less than 8 bits.
10246 while (VecWidth > 8) {
10247 unsigned HalfSize = VecWidth / 2;
10248 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
10249 APInt LowValue = SplatValue.trunc(HalfSize);
10250 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
10251 APInt LowUndef = SplatUndef.trunc(HalfSize);
10252
10253 // If the two halves do not match (ignoring undef bits), stop here.
10254 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
10255 MinSplatBits > HalfSize)
10256 break;
10257
10258 SplatValue = HighValue | LowValue;
10259 SplatUndef = HighUndef & LowUndef;
10260
10261 VecWidth = HalfSize;
10262 }
10263
10264 SplatBitSize = VecWidth;
10265 return true;
10266 }
10267
getSplatValue(const APInt & DemandedElts,BitVector * UndefElements) const10268 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
10269 BitVector *UndefElements) const {
10270 unsigned NumOps = getNumOperands();
10271 if (UndefElements) {
10272 UndefElements->clear();
10273 UndefElements->resize(NumOps);
10274 }
10275 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
10276 if (!DemandedElts)
10277 return SDValue();
10278 SDValue Splatted;
10279 for (unsigned i = 0; i != NumOps; ++i) {
10280 if (!DemandedElts[i])
10281 continue;
10282 SDValue Op = getOperand(i);
10283 if (Op.isUndef()) {
10284 if (UndefElements)
10285 (*UndefElements)[i] = true;
10286 } else if (!Splatted) {
10287 Splatted = Op;
10288 } else if (Splatted != Op) {
10289 return SDValue();
10290 }
10291 }
10292
10293 if (!Splatted) {
10294 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros();
10295 assert(getOperand(FirstDemandedIdx).isUndef() &&
10296 "Can only have a splat without a constant for all undefs.");
10297 return getOperand(FirstDemandedIdx);
10298 }
10299
10300 return Splatted;
10301 }
10302
getSplatValue(BitVector * UndefElements) const10303 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
10304 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
10305 return getSplatValue(DemandedElts, UndefElements);
10306 }
10307
getRepeatedSequence(const APInt & DemandedElts,SmallVectorImpl<SDValue> & Sequence,BitVector * UndefElements) const10308 bool BuildVectorSDNode::getRepeatedSequence(const APInt &DemandedElts,
10309 SmallVectorImpl<SDValue> &Sequence,
10310 BitVector *UndefElements) const {
10311 unsigned NumOps = getNumOperands();
10312 Sequence.clear();
10313 if (UndefElements) {
10314 UndefElements->clear();
10315 UndefElements->resize(NumOps);
10316 }
10317 assert(NumOps == DemandedElts.getBitWidth() && "Unexpected vector size");
10318 if (!DemandedElts || NumOps < 2 || !isPowerOf2_32(NumOps))
10319 return false;
10320
10321 // Set the undefs even if we don't find a sequence (like getSplatValue).
10322 if (UndefElements)
10323 for (unsigned I = 0; I != NumOps; ++I)
10324 if (DemandedElts[I] && getOperand(I).isUndef())
10325 (*UndefElements)[I] = true;
10326
10327 // Iteratively widen the sequence length looking for repetitions.
10328 for (unsigned SeqLen = 1; SeqLen < NumOps; SeqLen *= 2) {
10329 Sequence.append(SeqLen, SDValue());
10330 for (unsigned I = 0; I != NumOps; ++I) {
10331 if (!DemandedElts[I])
10332 continue;
10333 SDValue &SeqOp = Sequence[I % SeqLen];
10334 SDValue Op = getOperand(I);
10335 if (Op.isUndef()) {
10336 if (!SeqOp)
10337 SeqOp = Op;
10338 continue;
10339 }
10340 if (SeqOp && !SeqOp.isUndef() && SeqOp != Op) {
10341 Sequence.clear();
10342 break;
10343 }
10344 SeqOp = Op;
10345 }
10346 if (!Sequence.empty())
10347 return true;
10348 }
10349
10350 assert(Sequence.empty() && "Failed to empty non-repeating sequence pattern");
10351 return false;
10352 }
10353
getRepeatedSequence(SmallVectorImpl<SDValue> & Sequence,BitVector * UndefElements) const10354 bool BuildVectorSDNode::getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
10355 BitVector *UndefElements) const {
10356 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
10357 return getRepeatedSequence(DemandedElts, Sequence, UndefElements);
10358 }
10359
10360 ConstantSDNode *
getConstantSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const10361 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
10362 BitVector *UndefElements) const {
10363 return dyn_cast_or_null<ConstantSDNode>(
10364 getSplatValue(DemandedElts, UndefElements));
10365 }
10366
10367 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const10368 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
10369 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
10370 }
10371
10372 ConstantFPSDNode *
getConstantFPSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const10373 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts,
10374 BitVector *UndefElements) const {
10375 return dyn_cast_or_null<ConstantFPSDNode>(
10376 getSplatValue(DemandedElts, UndefElements));
10377 }
10378
10379 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const10380 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
10381 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
10382 }
10383
10384 int32_t
getConstantFPSplatPow2ToLog2Int(BitVector * UndefElements,uint32_t BitWidth) const10385 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
10386 uint32_t BitWidth) const {
10387 if (ConstantFPSDNode *CN =
10388 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
10389 bool IsExact;
10390 APSInt IntVal(BitWidth);
10391 const APFloat &APF = CN->getValueAPF();
10392 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
10393 APFloat::opOK ||
10394 !IsExact)
10395 return -1;
10396
10397 return IntVal.exactLogBase2();
10398 }
10399 return -1;
10400 }
10401
isConstant() const10402 bool BuildVectorSDNode::isConstant() const {
10403 for (const SDValue &Op : op_values()) {
10404 unsigned Opc = Op.getOpcode();
10405 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
10406 return false;
10407 }
10408 return true;
10409 }
10410
isSplatMask(const int * Mask,EVT VT)10411 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
10412 // Find the first non-undef value in the shuffle mask.
10413 unsigned i, e;
10414 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
10415 /* search */;
10416
10417 // If all elements are undefined, this shuffle can be considered a splat
10418 // (although it should eventually get simplified away completely).
10419 if (i == e)
10420 return true;
10421
10422 // Make sure all remaining elements are either undef or the same as the first
10423 // non-undef value.
10424 for (int Idx = Mask[i]; i != e; ++i)
10425 if (Mask[i] >= 0 && Mask[i] != Idx)
10426 return false;
10427 return true;
10428 }
10429
10430 // Returns the SDNode if it is a constant integer BuildVector
10431 // or constant integer.
isConstantIntBuildVectorOrConstantInt(SDValue N) const10432 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) const {
10433 if (isa<ConstantSDNode>(N))
10434 return N.getNode();
10435 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
10436 return N.getNode();
10437 // Treat a GlobalAddress supporting constant offset folding as a
10438 // constant integer.
10439 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
10440 if (GA->getOpcode() == ISD::GlobalAddress &&
10441 TLI->isOffsetFoldingLegal(GA))
10442 return GA;
10443 if ((N.getOpcode() == ISD::SPLAT_VECTOR) &&
10444 isa<ConstantSDNode>(N.getOperand(0)))
10445 return N.getNode();
10446 return nullptr;
10447 }
10448
10449 // Returns the SDNode if it is a constant float BuildVector
10450 // or constant float.
isConstantFPBuildVectorOrConstantFP(SDValue N) const10451 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) const {
10452 if (isa<ConstantFPSDNode>(N))
10453 return N.getNode();
10454
10455 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
10456 return N.getNode();
10457
10458 return nullptr;
10459 }
10460
createOperands(SDNode * Node,ArrayRef<SDValue> Vals)10461 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
10462 assert(!Node->OperandList && "Node already has operands");
10463 assert(SDNode::getMaxNumOperands() >= Vals.size() &&
10464 "too many operands to fit into SDNode");
10465 SDUse *Ops = OperandRecycler.allocate(
10466 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
10467
10468 bool IsDivergent = false;
10469 for (unsigned I = 0; I != Vals.size(); ++I) {
10470 Ops[I].setUser(Node);
10471 Ops[I].setInitial(Vals[I]);
10472 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
10473 IsDivergent |= Ops[I].getNode()->isDivergent();
10474 }
10475 Node->NumOperands = Vals.size();
10476 Node->OperandList = Ops;
10477 if (!TLI->isSDNodeAlwaysUniform(Node)) {
10478 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
10479 Node->SDNodeBits.IsDivergent = IsDivergent;
10480 }
10481 checkForCycles(Node);
10482 }
10483
getTokenFactor(const SDLoc & DL,SmallVectorImpl<SDValue> & Vals)10484 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
10485 SmallVectorImpl<SDValue> &Vals) {
10486 size_t Limit = SDNode::getMaxNumOperands();
10487 while (Vals.size() > Limit) {
10488 unsigned SliceIdx = Vals.size() - Limit;
10489 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
10490 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
10491 Vals.erase(Vals.begin() + SliceIdx, Vals.end());
10492 Vals.emplace_back(NewTF);
10493 }
10494 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
10495 }
10496
getNeutralElement(unsigned Opcode,const SDLoc & DL,EVT VT,SDNodeFlags Flags)10497 SDValue SelectionDAG::getNeutralElement(unsigned Opcode, const SDLoc &DL,
10498 EVT VT, SDNodeFlags Flags) {
10499 switch (Opcode) {
10500 default:
10501 return SDValue();
10502 case ISD::ADD:
10503 case ISD::OR:
10504 case ISD::XOR:
10505 case ISD::UMAX:
10506 return getConstant(0, DL, VT);
10507 case ISD::MUL:
10508 return getConstant(1, DL, VT);
10509 case ISD::AND:
10510 case ISD::UMIN:
10511 return getAllOnesConstant(DL, VT);
10512 case ISD::SMAX:
10513 return getConstant(APInt::getSignedMinValue(VT.getSizeInBits()), DL, VT);
10514 case ISD::SMIN:
10515 return getConstant(APInt::getSignedMaxValue(VT.getSizeInBits()), DL, VT);
10516 case ISD::FADD:
10517 return getConstantFP(-0.0, DL, VT);
10518 case ISD::FMUL:
10519 return getConstantFP(1.0, DL, VT);
10520 case ISD::FMINNUM:
10521 case ISD::FMAXNUM: {
10522 // Neutral element for fminnum is NaN, Inf or FLT_MAX, depending on FMF.
10523 const fltSemantics &Semantics = EVTToAPFloatSemantics(VT);
10524 APFloat NeutralAF = !Flags.hasNoNaNs() ? APFloat::getQNaN(Semantics) :
10525 !Flags.hasNoInfs() ? APFloat::getInf(Semantics) :
10526 APFloat::getLargest(Semantics);
10527 if (Opcode == ISD::FMAXNUM)
10528 NeutralAF.changeSign();
10529
10530 return getConstantFP(NeutralAF, DL, VT);
10531 }
10532 }
10533 }
10534
10535 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallPtrSetImpl<const SDNode * > & Checked,const llvm::SelectionDAG * DAG)10536 static void checkForCyclesHelper(const SDNode *N,
10537 SmallPtrSetImpl<const SDNode*> &Visited,
10538 SmallPtrSetImpl<const SDNode*> &Checked,
10539 const llvm::SelectionDAG *DAG) {
10540 // If this node has already been checked, don't check it again.
10541 if (Checked.count(N))
10542 return;
10543
10544 // If a node has already been visited on this depth-first walk, reject it as
10545 // a cycle.
10546 if (!Visited.insert(N).second) {
10547 errs() << "Detected cycle in SelectionDAG\n";
10548 dbgs() << "Offending node:\n";
10549 N->dumprFull(DAG); dbgs() << "\n";
10550 abort();
10551 }
10552
10553 for (const SDValue &Op : N->op_values())
10554 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
10555
10556 Checked.insert(N);
10557 Visited.erase(N);
10558 }
10559 #endif
10560
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)10561 void llvm::checkForCycles(const llvm::SDNode *N,
10562 const llvm::SelectionDAG *DAG,
10563 bool force) {
10564 #ifndef NDEBUG
10565 bool check = force;
10566 #ifdef EXPENSIVE_CHECKS
10567 check = true;
10568 #endif // EXPENSIVE_CHECKS
10569 if (check) {
10570 assert(N && "Checking nonexistent SDNode");
10571 SmallPtrSet<const SDNode*, 32> visited;
10572 SmallPtrSet<const SDNode*, 32> checked;
10573 checkForCyclesHelper(N, visited, checked, DAG);
10574 }
10575 #endif // !NDEBUG
10576 }
10577
checkForCycles(const llvm::SelectionDAG * DAG,bool force)10578 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
10579 checkForCycles(DAG->getRoot().getNode(), DAG, force);
10580 }
10581