1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the SelectionDAG class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/SelectionDAG.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/APSInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/FoldingSet.h"
21 #include "llvm/ADT/None.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/Analysis/BlockFrequencyInfo.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Analysis/ProfileSummaryInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/CodeGen/ISDOpcodes.h"
32 #include "llvm/CodeGen/MachineBasicBlock.h"
33 #include "llvm/CodeGen/MachineConstantPool.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineMemOperand.h"
37 #include "llvm/CodeGen/RuntimeLibcalls.h"
38 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
39 #include "llvm/CodeGen/SelectionDAGNodes.h"
40 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
41 #include "llvm/CodeGen/TargetLowering.h"
42 #include "llvm/CodeGen/TargetRegisterInfo.h"
43 #include "llvm/CodeGen/TargetSubtargetInfo.h"
44 #include "llvm/CodeGen/ValueTypes.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfoMetadata.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/GlobalValue.h"
53 #include "llvm/IR/Metadata.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/Support/Casting.h"
57 #include "llvm/Support/CodeGen.h"
58 #include "llvm/Support/Compiler.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/KnownBits.h"
62 #include "llvm/Support/MachineValueType.h"
63 #include "llvm/Support/ManagedStatic.h"
64 #include "llvm/Support/MathExtras.h"
65 #include "llvm/Support/Mutex.h"
66 #include "llvm/Support/raw_ostream.h"
67 #include "llvm/Target/TargetMachine.h"
68 #include "llvm/Target/TargetOptions.h"
69 #include "llvm/Transforms/Utils/SizeOpts.h"
70 #include <algorithm>
71 #include <cassert>
72 #include <cstdint>
73 #include <cstdlib>
74 #include <limits>
75 #include <set>
76 #include <string>
77 #include <utility>
78 #include <vector>
79
80 using namespace llvm;
81
82 /// makeVTList - Return an instance of the SDVTList struct initialized with the
83 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)84 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
85 SDVTList Res = {VTs, NumVTs};
86 return Res;
87 }
88
89 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)90 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)91 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
NodeInserted(SDNode *)92 void SelectionDAG::DAGUpdateListener::NodeInserted(SDNode *) {}
93
anchor()94 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
95
96 #define DEBUG_TYPE "selectiondag"
97
98 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
99 cl::Hidden, cl::init(true),
100 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
101
102 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
103 cl::desc("Number limit for gluing ld/st of memcpy."),
104 cl::Hidden, cl::init(0));
105
NewSDValueDbgMsg(SDValue V,StringRef Msg,SelectionDAG * G)106 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
107 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
108 }
109
110 //===----------------------------------------------------------------------===//
111 // ConstantFPSDNode Class
112 //===----------------------------------------------------------------------===//
113
114 /// isExactlyValue - We don't rely on operator== working on double values, as
115 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
116 /// As such, this method can be used to do an exact bit-for-bit comparison of
117 /// two floating point values.
isExactlyValue(const APFloat & V) const118 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
119 return getValueAPF().bitwiseIsEqual(V);
120 }
121
isValueValidForType(EVT VT,const APFloat & Val)122 bool ConstantFPSDNode::isValueValidForType(EVT VT,
123 const APFloat& Val) {
124 assert(VT.isFloatingPoint() && "Can only convert between FP types");
125
126 // convert modifies in place, so make a copy.
127 APFloat Val2 = APFloat(Val);
128 bool losesInfo;
129 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
130 APFloat::rmNearestTiesToEven,
131 &losesInfo);
132 return !losesInfo;
133 }
134
135 //===----------------------------------------------------------------------===//
136 // ISD Namespace
137 //===----------------------------------------------------------------------===//
138
isConstantSplatVector(const SDNode * N,APInt & SplatVal)139 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
140 auto *BV = dyn_cast<BuildVectorSDNode>(N);
141 if (!BV)
142 return false;
143
144 APInt SplatUndef;
145 unsigned SplatBitSize;
146 bool HasUndefs;
147 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
148 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
149 EltSize) &&
150 EltSize == SplatBitSize;
151 }
152
153 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
154 // specializations of the more general isConstantSplatVector()?
155
isBuildVectorAllOnes(const SDNode * N)156 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
157 // Look through a bit convert.
158 while (N->getOpcode() == ISD::BITCAST)
159 N = N->getOperand(0).getNode();
160
161 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
162
163 unsigned i = 0, e = N->getNumOperands();
164
165 // Skip over all of the undef values.
166 while (i != e && N->getOperand(i).isUndef())
167 ++i;
168
169 // Do not accept an all-undef vector.
170 if (i == e) return false;
171
172 // Do not accept build_vectors that aren't all constants or which have non-~0
173 // elements. We have to be a bit careful here, as the type of the constant
174 // may not be the same as the type of the vector elements due to type
175 // legalization (the elements are promoted to a legal type for the target and
176 // a vector of a type may be legal when the base element type is not).
177 // We only want to check enough bits to cover the vector elements, because
178 // we care if the resultant vector is all ones, not whether the individual
179 // constants are.
180 SDValue NotZero = N->getOperand(i);
181 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
182 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
183 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
184 return false;
185 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
186 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
187 return false;
188 } else
189 return false;
190
191 // Okay, we have at least one ~0 value, check to see if the rest match or are
192 // undefs. Even with the above element type twiddling, this should be OK, as
193 // the same type legalization should have applied to all the elements.
194 for (++i; i != e; ++i)
195 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
196 return false;
197 return true;
198 }
199
isBuildVectorAllZeros(const SDNode * N)200 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
201 // Look through a bit convert.
202 while (N->getOpcode() == ISD::BITCAST)
203 N = N->getOperand(0).getNode();
204
205 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
206
207 bool IsAllUndef = true;
208 for (const SDValue &Op : N->op_values()) {
209 if (Op.isUndef())
210 continue;
211 IsAllUndef = false;
212 // Do not accept build_vectors that aren't all constants or which have non-0
213 // elements. We have to be a bit careful here, as the type of the constant
214 // may not be the same as the type of the vector elements due to type
215 // legalization (the elements are promoted to a legal type for the target
216 // and a vector of a type may be legal when the base element type is not).
217 // We only want to check enough bits to cover the vector elements, because
218 // we care if the resultant vector is all zeros, not whether the individual
219 // constants are.
220 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
221 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
222 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
223 return false;
224 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
225 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
226 return false;
227 } else
228 return false;
229 }
230
231 // Do not accept an all-undef vector.
232 if (IsAllUndef)
233 return false;
234 return true;
235 }
236
isBuildVectorOfConstantSDNodes(const SDNode * N)237 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
238 if (N->getOpcode() != ISD::BUILD_VECTOR)
239 return false;
240
241 for (const SDValue &Op : N->op_values()) {
242 if (Op.isUndef())
243 continue;
244 if (!isa<ConstantSDNode>(Op))
245 return false;
246 }
247 return true;
248 }
249
isBuildVectorOfConstantFPSDNodes(const SDNode * N)250 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
251 if (N->getOpcode() != ISD::BUILD_VECTOR)
252 return false;
253
254 for (const SDValue &Op : N->op_values()) {
255 if (Op.isUndef())
256 continue;
257 if (!isa<ConstantFPSDNode>(Op))
258 return false;
259 }
260 return true;
261 }
262
allOperandsUndef(const SDNode * N)263 bool ISD::allOperandsUndef(const SDNode *N) {
264 // Return false if the node has no operands.
265 // This is "logically inconsistent" with the definition of "all" but
266 // is probably the desired behavior.
267 if (N->getNumOperands() == 0)
268 return false;
269 return all_of(N->op_values(), [](SDValue Op) { return Op.isUndef(); });
270 }
271
matchUnaryPredicate(SDValue Op,std::function<bool (ConstantSDNode *)> Match,bool AllowUndefs)272 bool ISD::matchUnaryPredicate(SDValue Op,
273 std::function<bool(ConstantSDNode *)> Match,
274 bool AllowUndefs) {
275 // FIXME: Add support for scalar UNDEF cases?
276 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
277 return Match(Cst);
278
279 // FIXME: Add support for vector UNDEF cases?
280 if (ISD::BUILD_VECTOR != Op.getOpcode())
281 return false;
282
283 EVT SVT = Op.getValueType().getScalarType();
284 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
285 if (AllowUndefs && Op.getOperand(i).isUndef()) {
286 if (!Match(nullptr))
287 return false;
288 continue;
289 }
290
291 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
292 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
293 return false;
294 }
295 return true;
296 }
297
matchBinaryPredicate(SDValue LHS,SDValue RHS,std::function<bool (ConstantSDNode *,ConstantSDNode *)> Match,bool AllowUndefs,bool AllowTypeMismatch)298 bool ISD::matchBinaryPredicate(
299 SDValue LHS, SDValue RHS,
300 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
301 bool AllowUndefs, bool AllowTypeMismatch) {
302 if (!AllowTypeMismatch && LHS.getValueType() != RHS.getValueType())
303 return false;
304
305 // TODO: Add support for scalar UNDEF cases?
306 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
307 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
308 return Match(LHSCst, RHSCst);
309
310 // TODO: Add support for vector UNDEF cases?
311 if (ISD::BUILD_VECTOR != LHS.getOpcode() ||
312 ISD::BUILD_VECTOR != RHS.getOpcode())
313 return false;
314
315 EVT SVT = LHS.getValueType().getScalarType();
316 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
317 SDValue LHSOp = LHS.getOperand(i);
318 SDValue RHSOp = RHS.getOperand(i);
319 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
320 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
321 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
322 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
323 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
324 return false;
325 if (!AllowTypeMismatch && (LHSOp.getValueType() != SVT ||
326 LHSOp.getValueType() != RHSOp.getValueType()))
327 return false;
328 if (!Match(LHSCst, RHSCst))
329 return false;
330 }
331 return true;
332 }
333
getExtForLoadExtType(bool IsFP,ISD::LoadExtType ExtType)334 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
335 switch (ExtType) {
336 case ISD::EXTLOAD:
337 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
338 case ISD::SEXTLOAD:
339 return ISD::SIGN_EXTEND;
340 case ISD::ZEXTLOAD:
341 return ISD::ZERO_EXTEND;
342 default:
343 break;
344 }
345
346 llvm_unreachable("Invalid LoadExtType");
347 }
348
getSetCCSwappedOperands(ISD::CondCode Operation)349 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
350 // To perform this operation, we just need to swap the L and G bits of the
351 // operation.
352 unsigned OldL = (Operation >> 2) & 1;
353 unsigned OldG = (Operation >> 1) & 1;
354 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
355 (OldL << 1) | // New G bit
356 (OldG << 2)); // New L bit.
357 }
358
getSetCCInverseImpl(ISD::CondCode Op,bool isIntegerLike)359 static ISD::CondCode getSetCCInverseImpl(ISD::CondCode Op, bool isIntegerLike) {
360 unsigned Operation = Op;
361 if (isIntegerLike)
362 Operation ^= 7; // Flip L, G, E bits, but not U.
363 else
364 Operation ^= 15; // Flip all of the condition bits.
365
366 if (Operation > ISD::SETTRUE2)
367 Operation &= ~8; // Don't let N and U bits get set.
368
369 return ISD::CondCode(Operation);
370 }
371
getSetCCInverse(ISD::CondCode Op,EVT Type)372 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, EVT Type) {
373 return getSetCCInverseImpl(Op, Type.isInteger());
374 }
375
getSetCCInverse(ISD::CondCode Op,bool isIntegerLike)376 ISD::CondCode ISD::GlobalISel::getSetCCInverse(ISD::CondCode Op,
377 bool isIntegerLike) {
378 return getSetCCInverseImpl(Op, isIntegerLike);
379 }
380
381 /// For an integer comparison, return 1 if the comparison is a signed operation
382 /// and 2 if the result is an unsigned comparison. Return zero if the operation
383 /// does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)384 static int isSignedOp(ISD::CondCode Opcode) {
385 switch (Opcode) {
386 default: llvm_unreachable("Illegal integer setcc operation!");
387 case ISD::SETEQ:
388 case ISD::SETNE: return 0;
389 case ISD::SETLT:
390 case ISD::SETLE:
391 case ISD::SETGT:
392 case ISD::SETGE: return 1;
393 case ISD::SETULT:
394 case ISD::SETULE:
395 case ISD::SETUGT:
396 case ISD::SETUGE: return 2;
397 }
398 }
399
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)400 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
401 EVT Type) {
402 bool IsInteger = Type.isInteger();
403 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
404 // Cannot fold a signed integer setcc with an unsigned integer setcc.
405 return ISD::SETCC_INVALID;
406
407 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
408
409 // If the N and U bits get set, then the resultant comparison DOES suddenly
410 // care about orderedness, and it is true when ordered.
411 if (Op > ISD::SETTRUE2)
412 Op &= ~16; // Clear the U bit if the N bit is set.
413
414 // Canonicalize illegal integer setcc's.
415 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
416 Op = ISD::SETNE;
417
418 return ISD::CondCode(Op);
419 }
420
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,EVT Type)421 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
422 EVT Type) {
423 bool IsInteger = Type.isInteger();
424 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
425 // Cannot fold a signed setcc with an unsigned setcc.
426 return ISD::SETCC_INVALID;
427
428 // Combine all of the condition bits.
429 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
430
431 // Canonicalize illegal integer setcc's.
432 if (IsInteger) {
433 switch (Result) {
434 default: break;
435 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
436 case ISD::SETOEQ: // SETEQ & SETU[LG]E
437 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
438 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
439 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
440 }
441 }
442
443 return Result;
444 }
445
446 //===----------------------------------------------------------------------===//
447 // SDNode Profile Support
448 //===----------------------------------------------------------------------===//
449
450 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)451 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
452 ID.AddInteger(OpC);
453 }
454
455 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
456 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)457 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
458 ID.AddPointer(VTList.VTs);
459 }
460
461 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)462 static void AddNodeIDOperands(FoldingSetNodeID &ID,
463 ArrayRef<SDValue> Ops) {
464 for (auto& Op : Ops) {
465 ID.AddPointer(Op.getNode());
466 ID.AddInteger(Op.getResNo());
467 }
468 }
469
470 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)471 static void AddNodeIDOperands(FoldingSetNodeID &ID,
472 ArrayRef<SDUse> Ops) {
473 for (auto& Op : Ops) {
474 ID.AddPointer(Op.getNode());
475 ID.AddInteger(Op.getResNo());
476 }
477 }
478
AddNodeIDNode(FoldingSetNodeID & ID,unsigned short OpC,SDVTList VTList,ArrayRef<SDValue> OpList)479 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
480 SDVTList VTList, ArrayRef<SDValue> OpList) {
481 AddNodeIDOpcode(ID, OpC);
482 AddNodeIDValueTypes(ID, VTList);
483 AddNodeIDOperands(ID, OpList);
484 }
485
486 /// If this is an SDNode with special info, add this info to the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)487 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
488 switch (N->getOpcode()) {
489 case ISD::TargetExternalSymbol:
490 case ISD::ExternalSymbol:
491 case ISD::MCSymbol:
492 llvm_unreachable("Should only be used on nodes with operands");
493 default: break; // Normal nodes don't need extra info.
494 case ISD::TargetConstant:
495 case ISD::Constant: {
496 const ConstantSDNode *C = cast<ConstantSDNode>(N);
497 ID.AddPointer(C->getConstantIntValue());
498 ID.AddBoolean(C->isOpaque());
499 break;
500 }
501 case ISD::TargetConstantFP:
502 case ISD::ConstantFP:
503 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
504 break;
505 case ISD::TargetGlobalAddress:
506 case ISD::GlobalAddress:
507 case ISD::TargetGlobalTLSAddress:
508 case ISD::GlobalTLSAddress: {
509 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
510 ID.AddPointer(GA->getGlobal());
511 ID.AddInteger(GA->getOffset());
512 ID.AddInteger(GA->getTargetFlags());
513 break;
514 }
515 case ISD::BasicBlock:
516 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
517 break;
518 case ISD::Register:
519 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
520 break;
521 case ISD::RegisterMask:
522 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
523 break;
524 case ISD::SRCVALUE:
525 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
526 break;
527 case ISD::FrameIndex:
528 case ISD::TargetFrameIndex:
529 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
530 break;
531 case ISD::LIFETIME_START:
532 case ISD::LIFETIME_END:
533 if (cast<LifetimeSDNode>(N)->hasOffset()) {
534 ID.AddInteger(cast<LifetimeSDNode>(N)->getSize());
535 ID.AddInteger(cast<LifetimeSDNode>(N)->getOffset());
536 }
537 break;
538 case ISD::JumpTable:
539 case ISD::TargetJumpTable:
540 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
541 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
542 break;
543 case ISD::ConstantPool:
544 case ISD::TargetConstantPool: {
545 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
546 ID.AddInteger(CP->getAlignment());
547 ID.AddInteger(CP->getOffset());
548 if (CP->isMachineConstantPoolEntry())
549 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
550 else
551 ID.AddPointer(CP->getConstVal());
552 ID.AddInteger(CP->getTargetFlags());
553 break;
554 }
555 case ISD::TargetIndex: {
556 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
557 ID.AddInteger(TI->getIndex());
558 ID.AddInteger(TI->getOffset());
559 ID.AddInteger(TI->getTargetFlags());
560 break;
561 }
562 case ISD::LOAD: {
563 const LoadSDNode *LD = cast<LoadSDNode>(N);
564 ID.AddInteger(LD->getMemoryVT().getRawBits());
565 ID.AddInteger(LD->getRawSubclassData());
566 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
567 break;
568 }
569 case ISD::STORE: {
570 const StoreSDNode *ST = cast<StoreSDNode>(N);
571 ID.AddInteger(ST->getMemoryVT().getRawBits());
572 ID.AddInteger(ST->getRawSubclassData());
573 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
574 break;
575 }
576 case ISD::MLOAD: {
577 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
578 ID.AddInteger(MLD->getMemoryVT().getRawBits());
579 ID.AddInteger(MLD->getRawSubclassData());
580 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
581 break;
582 }
583 case ISD::MSTORE: {
584 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
585 ID.AddInteger(MST->getMemoryVT().getRawBits());
586 ID.AddInteger(MST->getRawSubclassData());
587 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
588 break;
589 }
590 case ISD::MGATHER: {
591 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
592 ID.AddInteger(MG->getMemoryVT().getRawBits());
593 ID.AddInteger(MG->getRawSubclassData());
594 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
595 break;
596 }
597 case ISD::MSCATTER: {
598 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
599 ID.AddInteger(MS->getMemoryVT().getRawBits());
600 ID.AddInteger(MS->getRawSubclassData());
601 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
602 break;
603 }
604 case ISD::ATOMIC_CMP_SWAP:
605 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
606 case ISD::ATOMIC_SWAP:
607 case ISD::ATOMIC_LOAD_ADD:
608 case ISD::ATOMIC_LOAD_SUB:
609 case ISD::ATOMIC_LOAD_AND:
610 case ISD::ATOMIC_LOAD_CLR:
611 case ISD::ATOMIC_LOAD_OR:
612 case ISD::ATOMIC_LOAD_XOR:
613 case ISD::ATOMIC_LOAD_NAND:
614 case ISD::ATOMIC_LOAD_MIN:
615 case ISD::ATOMIC_LOAD_MAX:
616 case ISD::ATOMIC_LOAD_UMIN:
617 case ISD::ATOMIC_LOAD_UMAX:
618 case ISD::ATOMIC_LOAD:
619 case ISD::ATOMIC_STORE: {
620 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
621 ID.AddInteger(AT->getMemoryVT().getRawBits());
622 ID.AddInteger(AT->getRawSubclassData());
623 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
624 break;
625 }
626 case ISD::PREFETCH: {
627 const MemSDNode *PF = cast<MemSDNode>(N);
628 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
629 break;
630 }
631 case ISD::VECTOR_SHUFFLE: {
632 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
633 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
634 i != e; ++i)
635 ID.AddInteger(SVN->getMaskElt(i));
636 break;
637 }
638 case ISD::TargetBlockAddress:
639 case ISD::BlockAddress: {
640 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
641 ID.AddPointer(BA->getBlockAddress());
642 ID.AddInteger(BA->getOffset());
643 ID.AddInteger(BA->getTargetFlags());
644 break;
645 }
646 } // end switch (N->getOpcode())
647
648 // Target specific memory nodes could also have address spaces to check.
649 if (N->isTargetMemoryOpcode())
650 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
651 }
652
653 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
654 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)655 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
656 AddNodeIDOpcode(ID, N->getOpcode());
657 // Add the return value info.
658 AddNodeIDValueTypes(ID, N->getVTList());
659 // Add the operand info.
660 AddNodeIDOperands(ID, N->ops());
661
662 // Handle SDNode leafs with special info.
663 AddNodeIDCustom(ID, N);
664 }
665
666 //===----------------------------------------------------------------------===//
667 // SelectionDAG Class
668 //===----------------------------------------------------------------------===//
669
670 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)671 static bool doNotCSE(SDNode *N) {
672 if (N->getValueType(0) == MVT::Glue)
673 return true; // Never CSE anything that produces a flag.
674
675 switch (N->getOpcode()) {
676 default: break;
677 case ISD::HANDLENODE:
678 case ISD::EH_LABEL:
679 return true; // Never CSE these nodes.
680 }
681
682 // Check that remaining values produced are not flags.
683 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
684 if (N->getValueType(i) == MVT::Glue)
685 return true; // Never CSE anything that produces a flag.
686
687 return false;
688 }
689
690 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
691 /// SelectionDAG.
RemoveDeadNodes()692 void SelectionDAG::RemoveDeadNodes() {
693 // Create a dummy node (which is not added to allnodes), that adds a reference
694 // to the root node, preventing it from being deleted.
695 HandleSDNode Dummy(getRoot());
696
697 SmallVector<SDNode*, 128> DeadNodes;
698
699 // Add all obviously-dead nodes to the DeadNodes worklist.
700 for (SDNode &Node : allnodes())
701 if (Node.use_empty())
702 DeadNodes.push_back(&Node);
703
704 RemoveDeadNodes(DeadNodes);
705
706 // If the root changed (e.g. it was a dead load, update the root).
707 setRoot(Dummy.getValue());
708 }
709
710 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
711 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)712 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
713
714 // Process the worklist, deleting the nodes and adding their uses to the
715 // worklist.
716 while (!DeadNodes.empty()) {
717 SDNode *N = DeadNodes.pop_back_val();
718 // Skip to next node if we've already managed to delete the node. This could
719 // happen if replacing a node causes a node previously added to the node to
720 // be deleted.
721 if (N->getOpcode() == ISD::DELETED_NODE)
722 continue;
723
724 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
725 DUL->NodeDeleted(N, nullptr);
726
727 // Take the node out of the appropriate CSE map.
728 RemoveNodeFromCSEMaps(N);
729
730 // Next, brutally remove the operand list. This is safe to do, as there are
731 // no cycles in the graph.
732 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
733 SDUse &Use = *I++;
734 SDNode *Operand = Use.getNode();
735 Use.set(SDValue());
736
737 // Now that we removed this operand, see if there are no uses of it left.
738 if (Operand->use_empty())
739 DeadNodes.push_back(Operand);
740 }
741
742 DeallocateNode(N);
743 }
744 }
745
RemoveDeadNode(SDNode * N)746 void SelectionDAG::RemoveDeadNode(SDNode *N){
747 SmallVector<SDNode*, 16> DeadNodes(1, N);
748
749 // Create a dummy node that adds a reference to the root node, preventing
750 // it from being deleted. (This matters if the root is an operand of the
751 // dead node.)
752 HandleSDNode Dummy(getRoot());
753
754 RemoveDeadNodes(DeadNodes);
755 }
756
DeleteNode(SDNode * N)757 void SelectionDAG::DeleteNode(SDNode *N) {
758 // First take this out of the appropriate CSE map.
759 RemoveNodeFromCSEMaps(N);
760
761 // Finally, remove uses due to operands of this node, remove from the
762 // AllNodes list, and delete the node.
763 DeleteNodeNotInCSEMaps(N);
764 }
765
DeleteNodeNotInCSEMaps(SDNode * N)766 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
767 assert(N->getIterator() != AllNodes.begin() &&
768 "Cannot delete the entry node!");
769 assert(N->use_empty() && "Cannot delete a node that is not dead!");
770
771 // Drop all of the operands and decrement used node's use counts.
772 N->DropOperands();
773
774 DeallocateNode(N);
775 }
776
erase(const SDNode * Node)777 void SDDbgInfo::erase(const SDNode *Node) {
778 DbgValMapType::iterator I = DbgValMap.find(Node);
779 if (I == DbgValMap.end())
780 return;
781 for (auto &Val: I->second)
782 Val->setIsInvalidated();
783 DbgValMap.erase(I);
784 }
785
DeallocateNode(SDNode * N)786 void SelectionDAG::DeallocateNode(SDNode *N) {
787 // If we have operands, deallocate them.
788 removeOperands(N);
789
790 NodeAllocator.Deallocate(AllNodes.remove(N));
791
792 // Set the opcode to DELETED_NODE to help catch bugs when node
793 // memory is reallocated.
794 // FIXME: There are places in SDag that have grown a dependency on the opcode
795 // value in the released node.
796 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
797 N->NodeType = ISD::DELETED_NODE;
798
799 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
800 // them and forget about that node.
801 DbgInfo->erase(N);
802 }
803
804 #ifndef NDEBUG
805 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
VerifySDNode(SDNode * N)806 static void VerifySDNode(SDNode *N) {
807 switch (N->getOpcode()) {
808 default:
809 break;
810 case ISD::BUILD_PAIR: {
811 EVT VT = N->getValueType(0);
812 assert(N->getNumValues() == 1 && "Too many results!");
813 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
814 "Wrong return type!");
815 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
816 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
817 "Mismatched operand types!");
818 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
819 "Wrong operand type!");
820 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
821 "Wrong return type size");
822 break;
823 }
824 case ISD::BUILD_VECTOR: {
825 assert(N->getNumValues() == 1 && "Too many results!");
826 assert(N->getValueType(0).isVector() && "Wrong return type!");
827 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
828 "Wrong number of operands!");
829 EVT EltVT = N->getValueType(0).getVectorElementType();
830 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
831 assert((I->getValueType() == EltVT ||
832 (EltVT.isInteger() && I->getValueType().isInteger() &&
833 EltVT.bitsLE(I->getValueType()))) &&
834 "Wrong operand type!");
835 assert(I->getValueType() == N->getOperand(0).getValueType() &&
836 "Operands must all have the same type");
837 }
838 break;
839 }
840 }
841 }
842 #endif // NDEBUG
843
844 /// Insert a newly allocated node into the DAG.
845 ///
846 /// Handles insertion into the all nodes list and CSE map, as well as
847 /// verification and other common operations when a new node is allocated.
InsertNode(SDNode * N)848 void SelectionDAG::InsertNode(SDNode *N) {
849 AllNodes.push_back(N);
850 #ifndef NDEBUG
851 N->PersistentId = NextPersistentId++;
852 VerifySDNode(N);
853 #endif
854 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
855 DUL->NodeInserted(N);
856 }
857
858 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
859 /// correspond to it. This is useful when we're about to delete or repurpose
860 /// the node. We don't want future request for structurally identical nodes
861 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)862 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
863 bool Erased = false;
864 switch (N->getOpcode()) {
865 case ISD::HANDLENODE: return false; // noop.
866 case ISD::CONDCODE:
867 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
868 "Cond code doesn't exist!");
869 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
870 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
871 break;
872 case ISD::ExternalSymbol:
873 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
874 break;
875 case ISD::TargetExternalSymbol: {
876 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
877 Erased = TargetExternalSymbols.erase(std::pair<std::string, unsigned>(
878 ESN->getSymbol(), ESN->getTargetFlags()));
879 break;
880 }
881 case ISD::MCSymbol: {
882 auto *MCSN = cast<MCSymbolSDNode>(N);
883 Erased = MCSymbols.erase(MCSN->getMCSymbol());
884 break;
885 }
886 case ISD::VALUETYPE: {
887 EVT VT = cast<VTSDNode>(N)->getVT();
888 if (VT.isExtended()) {
889 Erased = ExtendedValueTypeNodes.erase(VT);
890 } else {
891 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
892 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
893 }
894 break;
895 }
896 default:
897 // Remove it from the CSE Map.
898 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
899 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
900 Erased = CSEMap.RemoveNode(N);
901 break;
902 }
903 #ifndef NDEBUG
904 // Verify that the node was actually in one of the CSE maps, unless it has a
905 // flag result (which cannot be CSE'd) or is one of the special cases that are
906 // not subject to CSE.
907 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
908 !N->isMachineOpcode() && !doNotCSE(N)) {
909 N->dump(this);
910 dbgs() << "\n";
911 llvm_unreachable("Node is not in map!");
912 }
913 #endif
914 return Erased;
915 }
916
917 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
918 /// maps and modified in place. Add it back to the CSE maps, unless an identical
919 /// node already exists, in which case transfer all its users to the existing
920 /// node. This transfer can potentially trigger recursive merging.
921 void
AddModifiedNodeToCSEMaps(SDNode * N)922 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
923 // For node types that aren't CSE'd, just act as if no identical node
924 // already exists.
925 if (!doNotCSE(N)) {
926 SDNode *Existing = CSEMap.GetOrInsertNode(N);
927 if (Existing != N) {
928 // If there was already an existing matching node, use ReplaceAllUsesWith
929 // to replace the dead one with the existing one. This can cause
930 // recursive merging of other unrelated nodes down the line.
931 ReplaceAllUsesWith(N, Existing);
932
933 // N is now dead. Inform the listeners and delete it.
934 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
935 DUL->NodeDeleted(N, Existing);
936 DeleteNodeNotInCSEMaps(N);
937 return;
938 }
939 }
940
941 // If the node doesn't already exist, we updated it. Inform listeners.
942 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
943 DUL->NodeUpdated(N);
944 }
945
946 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
947 /// were replaced with those specified. If this node is never memoized,
948 /// return null, otherwise return a pointer to the slot it would take. If a
949 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)950 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
951 void *&InsertPos) {
952 if (doNotCSE(N))
953 return nullptr;
954
955 SDValue Ops[] = { Op };
956 FoldingSetNodeID ID;
957 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
958 AddNodeIDCustom(ID, N);
959 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
960 if (Node)
961 Node->intersectFlagsWith(N->getFlags());
962 return Node;
963 }
964
965 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
966 /// were replaced with those specified. If this node is never memoized,
967 /// return null, otherwise return a pointer to the slot it would take. If a
968 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)969 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
970 SDValue Op1, SDValue Op2,
971 void *&InsertPos) {
972 if (doNotCSE(N))
973 return nullptr;
974
975 SDValue Ops[] = { Op1, Op2 };
976 FoldingSetNodeID ID;
977 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
978 AddNodeIDCustom(ID, N);
979 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
980 if (Node)
981 Node->intersectFlagsWith(N->getFlags());
982 return Node;
983 }
984
985 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
986 /// were replaced with those specified. If this node is never memoized,
987 /// return null, otherwise return a pointer to the slot it would take. If a
988 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)989 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
990 void *&InsertPos) {
991 if (doNotCSE(N))
992 return nullptr;
993
994 FoldingSetNodeID ID;
995 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
996 AddNodeIDCustom(ID, N);
997 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
998 if (Node)
999 Node->intersectFlagsWith(N->getFlags());
1000 return Node;
1001 }
1002
getEVTAlignment(EVT VT) const1003 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
1004 Type *Ty = VT == MVT::iPTR ?
1005 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
1006 VT.getTypeForEVT(*getContext());
1007
1008 return getDataLayout().getABITypeAlignment(Ty);
1009 }
1010
1011 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOpt::Level OL)1012 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
1013 : TM(tm), OptLevel(OL),
1014 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
1015 Root(getEntryNode()) {
1016 InsertNode(&EntryNode);
1017 DbgInfo = new SDDbgInfo();
1018 }
1019
init(MachineFunction & NewMF,OptimizationRemarkEmitter & NewORE,Pass * PassPtr,const TargetLibraryInfo * LibraryInfo,LegacyDivergenceAnalysis * Divergence,ProfileSummaryInfo * PSIin,BlockFrequencyInfo * BFIin)1020 void SelectionDAG::init(MachineFunction &NewMF,
1021 OptimizationRemarkEmitter &NewORE,
1022 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1023 LegacyDivergenceAnalysis * Divergence,
1024 ProfileSummaryInfo *PSIin,
1025 BlockFrequencyInfo *BFIin) {
1026 MF = &NewMF;
1027 SDAGISelPass = PassPtr;
1028 ORE = &NewORE;
1029 TLI = getSubtarget().getTargetLowering();
1030 TSI = getSubtarget().getSelectionDAGInfo();
1031 LibInfo = LibraryInfo;
1032 Context = &MF->getFunction().getContext();
1033 DA = Divergence;
1034 PSI = PSIin;
1035 BFI = BFIin;
1036 }
1037
~SelectionDAG()1038 SelectionDAG::~SelectionDAG() {
1039 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1040 allnodes_clear();
1041 OperandRecycler.clear(OperandAllocator);
1042 delete DbgInfo;
1043 }
1044
shouldOptForSize() const1045 bool SelectionDAG::shouldOptForSize() const {
1046 return MF->getFunction().hasOptSize() ||
1047 llvm::shouldOptimizeForSize(FLI->MBB->getBasicBlock(), PSI, BFI);
1048 }
1049
allnodes_clear()1050 void SelectionDAG::allnodes_clear() {
1051 assert(&*AllNodes.begin() == &EntryNode);
1052 AllNodes.remove(AllNodes.begin());
1053 while (!AllNodes.empty())
1054 DeallocateNode(&AllNodes.front());
1055 #ifndef NDEBUG
1056 NextPersistentId = 0;
1057 #endif
1058 }
1059
FindNodeOrInsertPos(const FoldingSetNodeID & ID,void * & InsertPos)1060 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1061 void *&InsertPos) {
1062 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1063 if (N) {
1064 switch (N->getOpcode()) {
1065 default: break;
1066 case ISD::Constant:
1067 case ISD::ConstantFP:
1068 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1069 "debug location. Use another overload.");
1070 }
1071 }
1072 return N;
1073 }
1074
FindNodeOrInsertPos(const FoldingSetNodeID & ID,const SDLoc & DL,void * & InsertPos)1075 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1076 const SDLoc &DL, void *&InsertPos) {
1077 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1078 if (N) {
1079 switch (N->getOpcode()) {
1080 case ISD::Constant:
1081 case ISD::ConstantFP:
1082 // Erase debug location from the node if the node is used at several
1083 // different places. Do not propagate one location to all uses as it
1084 // will cause a worse single stepping debugging experience.
1085 if (N->getDebugLoc() != DL.getDebugLoc())
1086 N->setDebugLoc(DebugLoc());
1087 break;
1088 default:
1089 // When the node's point of use is located earlier in the instruction
1090 // sequence than its prior point of use, update its debug info to the
1091 // earlier location.
1092 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1093 N->setDebugLoc(DL.getDebugLoc());
1094 break;
1095 }
1096 }
1097 return N;
1098 }
1099
clear()1100 void SelectionDAG::clear() {
1101 allnodes_clear();
1102 OperandRecycler.clear(OperandAllocator);
1103 OperandAllocator.Reset();
1104 CSEMap.clear();
1105
1106 ExtendedValueTypeNodes.clear();
1107 ExternalSymbols.clear();
1108 TargetExternalSymbols.clear();
1109 MCSymbols.clear();
1110 SDCallSiteDbgInfo.clear();
1111 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1112 static_cast<CondCodeSDNode*>(nullptr));
1113 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1114 static_cast<SDNode*>(nullptr));
1115
1116 EntryNode.UseList = nullptr;
1117 InsertNode(&EntryNode);
1118 Root = getEntryNode();
1119 DbgInfo->clear();
1120 }
1121
getFPExtendOrRound(SDValue Op,const SDLoc & DL,EVT VT)1122 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1123 return VT.bitsGT(Op.getValueType())
1124 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1125 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1126 }
1127
1128 std::pair<SDValue, SDValue>
getStrictFPExtendOrRound(SDValue Op,SDValue Chain,const SDLoc & DL,EVT VT)1129 SelectionDAG::getStrictFPExtendOrRound(SDValue Op, SDValue Chain,
1130 const SDLoc &DL, EVT VT) {
1131 assert(!VT.bitsEq(Op.getValueType()) &&
1132 "Strict no-op FP extend/round not allowed.");
1133 SDValue Res =
1134 VT.bitsGT(Op.getValueType())
1135 ? getNode(ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other}, {Chain, Op})
1136 : getNode(ISD::STRICT_FP_ROUND, DL, {VT, MVT::Other},
1137 {Chain, Op, getIntPtrConstant(0, DL)});
1138
1139 return std::pair<SDValue, SDValue>(Res, SDValue(Res.getNode(), 1));
1140 }
1141
getAnyExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1142 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1143 return VT.bitsGT(Op.getValueType()) ?
1144 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1145 getNode(ISD::TRUNCATE, DL, VT, Op);
1146 }
1147
getSExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1148 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1149 return VT.bitsGT(Op.getValueType()) ?
1150 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1151 getNode(ISD::TRUNCATE, DL, VT, Op);
1152 }
1153
getZExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1154 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1155 return VT.bitsGT(Op.getValueType()) ?
1156 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1157 getNode(ISD::TRUNCATE, DL, VT, Op);
1158 }
1159
getBoolExtOrTrunc(SDValue Op,const SDLoc & SL,EVT VT,EVT OpVT)1160 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1161 EVT OpVT) {
1162 if (VT.bitsLE(Op.getValueType()))
1163 return getNode(ISD::TRUNCATE, SL, VT, Op);
1164
1165 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1166 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1167 }
1168
getZeroExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1169 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1170 assert(!VT.isVector() &&
1171 "getZeroExtendInReg should use the vector element type instead of "
1172 "the vector type!");
1173 if (Op.getValueType().getScalarType() == VT) return Op;
1174 unsigned BitWidth = Op.getScalarValueSizeInBits();
1175 APInt Imm = APInt::getLowBitsSet(BitWidth,
1176 VT.getSizeInBits());
1177 return getNode(ISD::AND, DL, Op.getValueType(), Op,
1178 getConstant(Imm, DL, Op.getValueType()));
1179 }
1180
getPtrExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1181 SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1182 // Only unsigned pointer semantics are supported right now. In the future this
1183 // might delegate to TLI to check pointer signedness.
1184 return getZExtOrTrunc(Op, DL, VT);
1185 }
1186
getPtrExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1187 SDValue SelectionDAG::getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1188 // Only unsigned pointer semantics are supported right now. In the future this
1189 // might delegate to TLI to check pointer signedness.
1190 return getZeroExtendInReg(Op, DL, VT);
1191 }
1192
1193 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
getNOT(const SDLoc & DL,SDValue Val,EVT VT)1194 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1195 EVT EltVT = VT.getScalarType();
1196 SDValue NegOne =
1197 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1198 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1199 }
1200
getLogicalNOT(const SDLoc & DL,SDValue Val,EVT VT)1201 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1202 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1203 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1204 }
1205
getBoolConstant(bool V,const SDLoc & DL,EVT VT,EVT OpVT)1206 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1207 EVT OpVT) {
1208 if (!V)
1209 return getConstant(0, DL, VT);
1210
1211 switch (TLI->getBooleanContents(OpVT)) {
1212 case TargetLowering::ZeroOrOneBooleanContent:
1213 case TargetLowering::UndefinedBooleanContent:
1214 return getConstant(1, DL, VT);
1215 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1216 return getAllOnesConstant(DL, VT);
1217 }
1218 llvm_unreachable("Unexpected boolean content enum!");
1219 }
1220
getConstant(uint64_t Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1221 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1222 bool isT, bool isO) {
1223 EVT EltVT = VT.getScalarType();
1224 assert((EltVT.getSizeInBits() >= 64 ||
1225 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1226 "getConstant with a uint64_t value that doesn't fit in the type!");
1227 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1228 }
1229
getConstant(const APInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1230 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1231 bool isT, bool isO) {
1232 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1233 }
1234
getConstant(const ConstantInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1235 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1236 EVT VT, bool isT, bool isO) {
1237 assert(VT.isInteger() && "Cannot create FP integer constant!");
1238
1239 EVT EltVT = VT.getScalarType();
1240 const ConstantInt *Elt = &Val;
1241
1242 // In some cases the vector type is legal but the element type is illegal and
1243 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1244 // inserted value (the type does not need to match the vector element type).
1245 // Any extra bits introduced will be truncated away.
1246 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1247 TargetLowering::TypePromoteInteger) {
1248 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1249 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1250 Elt = ConstantInt::get(*getContext(), NewVal);
1251 }
1252 // In other cases the element type is illegal and needs to be expanded, for
1253 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1254 // the value into n parts and use a vector type with n-times the elements.
1255 // Then bitcast to the type requested.
1256 // Legalizing constants too early makes the DAGCombiner's job harder so we
1257 // only legalize if the DAG tells us we must produce legal types.
1258 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1259 TLI->getTypeAction(*getContext(), EltVT) ==
1260 TargetLowering::TypeExpandInteger) {
1261 const APInt &NewVal = Elt->getValue();
1262 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1263 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1264 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1265 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1266
1267 // Check the temporary vector is the correct size. If this fails then
1268 // getTypeToTransformTo() probably returned a type whose size (in bits)
1269 // isn't a power-of-2 factor of the requested type size.
1270 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1271
1272 SmallVector<SDValue, 2> EltParts;
1273 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1274 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1275 .zextOrTrunc(ViaEltSizeInBits), DL,
1276 ViaEltVT, isT, isO));
1277 }
1278
1279 // EltParts is currently in little endian order. If we actually want
1280 // big-endian order then reverse it now.
1281 if (getDataLayout().isBigEndian())
1282 std::reverse(EltParts.begin(), EltParts.end());
1283
1284 // The elements must be reversed when the element order is different
1285 // to the endianness of the elements (because the BITCAST is itself a
1286 // vector shuffle in this situation). However, we do not need any code to
1287 // perform this reversal because getConstant() is producing a vector
1288 // splat.
1289 // This situation occurs in MIPS MSA.
1290
1291 SmallVector<SDValue, 8> Ops;
1292 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1293 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1294
1295 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1296 return V;
1297 }
1298
1299 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1300 "APInt size does not match type size!");
1301 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1302 FoldingSetNodeID ID;
1303 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1304 ID.AddPointer(Elt);
1305 ID.AddBoolean(isO);
1306 void *IP = nullptr;
1307 SDNode *N = nullptr;
1308 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1309 if (!VT.isVector())
1310 return SDValue(N, 0);
1311
1312 if (!N) {
1313 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1314 CSEMap.InsertNode(N, IP);
1315 InsertNode(N);
1316 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1317 }
1318
1319 SDValue Result(N, 0);
1320 if (VT.isScalableVector())
1321 Result = getSplatVector(VT, DL, Result);
1322 else if (VT.isVector())
1323 Result = getSplatBuildVector(VT, DL, Result);
1324
1325 return Result;
1326 }
1327
getIntPtrConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1328 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1329 bool isTarget) {
1330 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1331 }
1332
getShiftAmountConstant(uint64_t Val,EVT VT,const SDLoc & DL,bool LegalTypes)1333 SDValue SelectionDAG::getShiftAmountConstant(uint64_t Val, EVT VT,
1334 const SDLoc &DL, bool LegalTypes) {
1335 EVT ShiftVT = TLI->getShiftAmountTy(VT, getDataLayout(), LegalTypes);
1336 return getConstant(Val, DL, ShiftVT);
1337 }
1338
getConstantFP(const APFloat & V,const SDLoc & DL,EVT VT,bool isTarget)1339 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1340 bool isTarget) {
1341 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1342 }
1343
getConstantFP(const ConstantFP & V,const SDLoc & DL,EVT VT,bool isTarget)1344 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1345 EVT VT, bool isTarget) {
1346 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1347
1348 EVT EltVT = VT.getScalarType();
1349
1350 // Do the map lookup using the actual bit pattern for the floating point
1351 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1352 // we don't have issues with SNANs.
1353 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1354 FoldingSetNodeID ID;
1355 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1356 ID.AddPointer(&V);
1357 void *IP = nullptr;
1358 SDNode *N = nullptr;
1359 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1360 if (!VT.isVector())
1361 return SDValue(N, 0);
1362
1363 if (!N) {
1364 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1365 CSEMap.InsertNode(N, IP);
1366 InsertNode(N);
1367 }
1368
1369 SDValue Result(N, 0);
1370 if (VT.isVector())
1371 Result = getSplatBuildVector(VT, DL, Result);
1372 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1373 return Result;
1374 }
1375
getConstantFP(double Val,const SDLoc & DL,EVT VT,bool isTarget)1376 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1377 bool isTarget) {
1378 EVT EltVT = VT.getScalarType();
1379 if (EltVT == MVT::f32)
1380 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1381 else if (EltVT == MVT::f64)
1382 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1383 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1384 EltVT == MVT::f16) {
1385 bool Ignored;
1386 APFloat APF = APFloat(Val);
1387 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1388 &Ignored);
1389 return getConstantFP(APF, DL, VT, isTarget);
1390 } else
1391 llvm_unreachable("Unsupported type in getConstantFP");
1392 }
1393
getGlobalAddress(const GlobalValue * GV,const SDLoc & DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned TargetFlags)1394 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1395 EVT VT, int64_t Offset, bool isTargetGA,
1396 unsigned TargetFlags) {
1397 assert((TargetFlags == 0 || isTargetGA) &&
1398 "Cannot set target flags on target-independent globals");
1399
1400 // Truncate (with sign-extension) the offset value to the pointer size.
1401 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1402 if (BitWidth < 64)
1403 Offset = SignExtend64(Offset, BitWidth);
1404
1405 unsigned Opc;
1406 if (GV->isThreadLocal())
1407 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1408 else
1409 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1410
1411 FoldingSetNodeID ID;
1412 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1413 ID.AddPointer(GV);
1414 ID.AddInteger(Offset);
1415 ID.AddInteger(TargetFlags);
1416 void *IP = nullptr;
1417 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1418 return SDValue(E, 0);
1419
1420 auto *N = newSDNode<GlobalAddressSDNode>(
1421 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1422 CSEMap.InsertNode(N, IP);
1423 InsertNode(N);
1424 return SDValue(N, 0);
1425 }
1426
getFrameIndex(int FI,EVT VT,bool isTarget)1427 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1428 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1429 FoldingSetNodeID ID;
1430 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1431 ID.AddInteger(FI);
1432 void *IP = nullptr;
1433 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1434 return SDValue(E, 0);
1435
1436 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1437 CSEMap.InsertNode(N, IP);
1438 InsertNode(N);
1439 return SDValue(N, 0);
1440 }
1441
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned TargetFlags)1442 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1443 unsigned TargetFlags) {
1444 assert((TargetFlags == 0 || isTarget) &&
1445 "Cannot set target flags on target-independent jump tables");
1446 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1447 FoldingSetNodeID ID;
1448 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1449 ID.AddInteger(JTI);
1450 ID.AddInteger(TargetFlags);
1451 void *IP = nullptr;
1452 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1453 return SDValue(E, 0);
1454
1455 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1456 CSEMap.InsertNode(N, IP);
1457 InsertNode(N);
1458 return SDValue(N, 0);
1459 }
1460
getConstantPool(const Constant * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned TargetFlags)1461 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1462 unsigned Alignment, int Offset,
1463 bool isTarget,
1464 unsigned TargetFlags) {
1465 assert((TargetFlags == 0 || isTarget) &&
1466 "Cannot set target flags on target-independent globals");
1467 if (Alignment == 0)
1468 Alignment = shouldOptForSize()
1469 ? getDataLayout().getABITypeAlignment(C->getType())
1470 : getDataLayout().getPrefTypeAlignment(C->getType());
1471 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1472 FoldingSetNodeID ID;
1473 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1474 ID.AddInteger(Alignment);
1475 ID.AddInteger(Offset);
1476 ID.AddPointer(C);
1477 ID.AddInteger(TargetFlags);
1478 void *IP = nullptr;
1479 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1480 return SDValue(E, 0);
1481
1482 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1483 TargetFlags);
1484 CSEMap.InsertNode(N, IP);
1485 InsertNode(N);
1486 return SDValue(N, 0);
1487 }
1488
getConstantPool(MachineConstantPoolValue * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned TargetFlags)1489 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1490 unsigned Alignment, int Offset,
1491 bool isTarget,
1492 unsigned TargetFlags) {
1493 assert((TargetFlags == 0 || isTarget) &&
1494 "Cannot set target flags on target-independent globals");
1495 if (Alignment == 0)
1496 Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1497 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1498 FoldingSetNodeID ID;
1499 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1500 ID.AddInteger(Alignment);
1501 ID.AddInteger(Offset);
1502 C->addSelectionDAGCSEId(ID);
1503 ID.AddInteger(TargetFlags);
1504 void *IP = nullptr;
1505 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1506 return SDValue(E, 0);
1507
1508 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1509 TargetFlags);
1510 CSEMap.InsertNode(N, IP);
1511 InsertNode(N);
1512 return SDValue(N, 0);
1513 }
1514
getTargetIndex(int Index,EVT VT,int64_t Offset,unsigned TargetFlags)1515 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1516 unsigned TargetFlags) {
1517 FoldingSetNodeID ID;
1518 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1519 ID.AddInteger(Index);
1520 ID.AddInteger(Offset);
1521 ID.AddInteger(TargetFlags);
1522 void *IP = nullptr;
1523 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1524 return SDValue(E, 0);
1525
1526 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1527 CSEMap.InsertNode(N, IP);
1528 InsertNode(N);
1529 return SDValue(N, 0);
1530 }
1531
getBasicBlock(MachineBasicBlock * MBB)1532 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1533 FoldingSetNodeID ID;
1534 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1535 ID.AddPointer(MBB);
1536 void *IP = nullptr;
1537 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1538 return SDValue(E, 0);
1539
1540 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1541 CSEMap.InsertNode(N, IP);
1542 InsertNode(N);
1543 return SDValue(N, 0);
1544 }
1545
getValueType(EVT VT)1546 SDValue SelectionDAG::getValueType(EVT VT) {
1547 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1548 ValueTypeNodes.size())
1549 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1550
1551 SDNode *&N = VT.isExtended() ?
1552 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1553
1554 if (N) return SDValue(N, 0);
1555 N = newSDNode<VTSDNode>(VT);
1556 InsertNode(N);
1557 return SDValue(N, 0);
1558 }
1559
getExternalSymbol(const char * Sym,EVT VT)1560 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1561 SDNode *&N = ExternalSymbols[Sym];
1562 if (N) return SDValue(N, 0);
1563 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1564 InsertNode(N);
1565 return SDValue(N, 0);
1566 }
1567
getMCSymbol(MCSymbol * Sym,EVT VT)1568 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1569 SDNode *&N = MCSymbols[Sym];
1570 if (N)
1571 return SDValue(N, 0);
1572 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1573 InsertNode(N);
1574 return SDValue(N, 0);
1575 }
1576
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned TargetFlags)1577 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1578 unsigned TargetFlags) {
1579 SDNode *&N =
1580 TargetExternalSymbols[std::pair<std::string, unsigned>(Sym, TargetFlags)];
1581 if (N) return SDValue(N, 0);
1582 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1583 InsertNode(N);
1584 return SDValue(N, 0);
1585 }
1586
getCondCode(ISD::CondCode Cond)1587 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1588 if ((unsigned)Cond >= CondCodeNodes.size())
1589 CondCodeNodes.resize(Cond+1);
1590
1591 if (!CondCodeNodes[Cond]) {
1592 auto *N = newSDNode<CondCodeSDNode>(Cond);
1593 CondCodeNodes[Cond] = N;
1594 InsertNode(N);
1595 }
1596
1597 return SDValue(CondCodeNodes[Cond], 0);
1598 }
1599
1600 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1601 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,MutableArrayRef<int> M)1602 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1603 std::swap(N1, N2);
1604 ShuffleVectorSDNode::commuteMask(M);
1605 }
1606
getVectorShuffle(EVT VT,const SDLoc & dl,SDValue N1,SDValue N2,ArrayRef<int> Mask)1607 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1608 SDValue N2, ArrayRef<int> Mask) {
1609 assert(VT.getVectorNumElements() == Mask.size() &&
1610 "Must have the same number of vector elements as mask elements!");
1611 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1612 "Invalid VECTOR_SHUFFLE");
1613
1614 // Canonicalize shuffle undef, undef -> undef
1615 if (N1.isUndef() && N2.isUndef())
1616 return getUNDEF(VT);
1617
1618 // Validate that all indices in Mask are within the range of the elements
1619 // input to the shuffle.
1620 int NElts = Mask.size();
1621 assert(llvm::all_of(Mask,
1622 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1623 "Index out of range");
1624
1625 // Copy the mask so we can do any needed cleanup.
1626 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1627
1628 // Canonicalize shuffle v, v -> v, undef
1629 if (N1 == N2) {
1630 N2 = getUNDEF(VT);
1631 for (int i = 0; i != NElts; ++i)
1632 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1633 }
1634
1635 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1636 if (N1.isUndef())
1637 commuteShuffle(N1, N2, MaskVec);
1638
1639 if (TLI->hasVectorBlend()) {
1640 // If shuffling a splat, try to blend the splat instead. We do this here so
1641 // that even when this arises during lowering we don't have to re-handle it.
1642 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1643 BitVector UndefElements;
1644 SDValue Splat = BV->getSplatValue(&UndefElements);
1645 if (!Splat)
1646 return;
1647
1648 for (int i = 0; i < NElts; ++i) {
1649 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1650 continue;
1651
1652 // If this input comes from undef, mark it as such.
1653 if (UndefElements[MaskVec[i] - Offset]) {
1654 MaskVec[i] = -1;
1655 continue;
1656 }
1657
1658 // If we can blend a non-undef lane, use that instead.
1659 if (!UndefElements[i])
1660 MaskVec[i] = i + Offset;
1661 }
1662 };
1663 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1664 BlendSplat(N1BV, 0);
1665 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1666 BlendSplat(N2BV, NElts);
1667 }
1668
1669 // Canonicalize all index into lhs, -> shuffle lhs, undef
1670 // Canonicalize all index into rhs, -> shuffle rhs, undef
1671 bool AllLHS = true, AllRHS = true;
1672 bool N2Undef = N2.isUndef();
1673 for (int i = 0; i != NElts; ++i) {
1674 if (MaskVec[i] >= NElts) {
1675 if (N2Undef)
1676 MaskVec[i] = -1;
1677 else
1678 AllLHS = false;
1679 } else if (MaskVec[i] >= 0) {
1680 AllRHS = false;
1681 }
1682 }
1683 if (AllLHS && AllRHS)
1684 return getUNDEF(VT);
1685 if (AllLHS && !N2Undef)
1686 N2 = getUNDEF(VT);
1687 if (AllRHS) {
1688 N1 = getUNDEF(VT);
1689 commuteShuffle(N1, N2, MaskVec);
1690 }
1691 // Reset our undef status after accounting for the mask.
1692 N2Undef = N2.isUndef();
1693 // Re-check whether both sides ended up undef.
1694 if (N1.isUndef() && N2Undef)
1695 return getUNDEF(VT);
1696
1697 // If Identity shuffle return that node.
1698 bool Identity = true, AllSame = true;
1699 for (int i = 0; i != NElts; ++i) {
1700 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1701 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1702 }
1703 if (Identity && NElts)
1704 return N1;
1705
1706 // Shuffling a constant splat doesn't change the result.
1707 if (N2Undef) {
1708 SDValue V = N1;
1709
1710 // Look through any bitcasts. We check that these don't change the number
1711 // (and size) of elements and just changes their types.
1712 while (V.getOpcode() == ISD::BITCAST)
1713 V = V->getOperand(0);
1714
1715 // A splat should always show up as a build vector node.
1716 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1717 BitVector UndefElements;
1718 SDValue Splat = BV->getSplatValue(&UndefElements);
1719 // If this is a splat of an undef, shuffling it is also undef.
1720 if (Splat && Splat.isUndef())
1721 return getUNDEF(VT);
1722
1723 bool SameNumElts =
1724 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1725
1726 // We only have a splat which can skip shuffles if there is a splatted
1727 // value and no undef lanes rearranged by the shuffle.
1728 if (Splat && UndefElements.none()) {
1729 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1730 // number of elements match or the value splatted is a zero constant.
1731 if (SameNumElts)
1732 return N1;
1733 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1734 if (C->isNullValue())
1735 return N1;
1736 }
1737
1738 // If the shuffle itself creates a splat, build the vector directly.
1739 if (AllSame && SameNumElts) {
1740 EVT BuildVT = BV->getValueType(0);
1741 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1742 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1743
1744 // We may have jumped through bitcasts, so the type of the
1745 // BUILD_VECTOR may not match the type of the shuffle.
1746 if (BuildVT != VT)
1747 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1748 return NewBV;
1749 }
1750 }
1751 }
1752
1753 FoldingSetNodeID ID;
1754 SDValue Ops[2] = { N1, N2 };
1755 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1756 for (int i = 0; i != NElts; ++i)
1757 ID.AddInteger(MaskVec[i]);
1758
1759 void* IP = nullptr;
1760 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1761 return SDValue(E, 0);
1762
1763 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1764 // SDNode doesn't have access to it. This memory will be "leaked" when
1765 // the node is deallocated, but recovered when the NodeAllocator is released.
1766 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1767 llvm::copy(MaskVec, MaskAlloc);
1768
1769 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1770 dl.getDebugLoc(), MaskAlloc);
1771 createOperands(N, Ops);
1772
1773 CSEMap.InsertNode(N, IP);
1774 InsertNode(N);
1775 SDValue V = SDValue(N, 0);
1776 NewSDValueDbgMsg(V, "Creating new node: ", this);
1777 return V;
1778 }
1779
getCommutedVectorShuffle(const ShuffleVectorSDNode & SV)1780 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1781 EVT VT = SV.getValueType(0);
1782 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1783 ShuffleVectorSDNode::commuteMask(MaskVec);
1784
1785 SDValue Op0 = SV.getOperand(0);
1786 SDValue Op1 = SV.getOperand(1);
1787 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1788 }
1789
getRegister(unsigned RegNo,EVT VT)1790 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1791 FoldingSetNodeID ID;
1792 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1793 ID.AddInteger(RegNo);
1794 void *IP = nullptr;
1795 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1796 return SDValue(E, 0);
1797
1798 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1799 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
1800 CSEMap.InsertNode(N, IP);
1801 InsertNode(N);
1802 return SDValue(N, 0);
1803 }
1804
getRegisterMask(const uint32_t * RegMask)1805 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1806 FoldingSetNodeID ID;
1807 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1808 ID.AddPointer(RegMask);
1809 void *IP = nullptr;
1810 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1811 return SDValue(E, 0);
1812
1813 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1814 CSEMap.InsertNode(N, IP);
1815 InsertNode(N);
1816 return SDValue(N, 0);
1817 }
1818
getEHLabel(const SDLoc & dl,SDValue Root,MCSymbol * Label)1819 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1820 MCSymbol *Label) {
1821 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
1822 }
1823
getLabelNode(unsigned Opcode,const SDLoc & dl,SDValue Root,MCSymbol * Label)1824 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
1825 SDValue Root, MCSymbol *Label) {
1826 FoldingSetNodeID ID;
1827 SDValue Ops[] = { Root };
1828 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
1829 ID.AddPointer(Label);
1830 void *IP = nullptr;
1831 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1832 return SDValue(E, 0);
1833
1834 auto *N =
1835 newSDNode<LabelSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(), Label);
1836 createOperands(N, Ops);
1837
1838 CSEMap.InsertNode(N, IP);
1839 InsertNode(N);
1840 return SDValue(N, 0);
1841 }
1842
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned TargetFlags)1843 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1844 int64_t Offset, bool isTarget,
1845 unsigned TargetFlags) {
1846 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1847
1848 FoldingSetNodeID ID;
1849 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1850 ID.AddPointer(BA);
1851 ID.AddInteger(Offset);
1852 ID.AddInteger(TargetFlags);
1853 void *IP = nullptr;
1854 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1855 return SDValue(E, 0);
1856
1857 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1858 CSEMap.InsertNode(N, IP);
1859 InsertNode(N);
1860 return SDValue(N, 0);
1861 }
1862
getSrcValue(const Value * V)1863 SDValue SelectionDAG::getSrcValue(const Value *V) {
1864 assert((!V || V->getType()->isPointerTy()) &&
1865 "SrcValue is not a pointer?");
1866
1867 FoldingSetNodeID ID;
1868 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1869 ID.AddPointer(V);
1870
1871 void *IP = nullptr;
1872 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1873 return SDValue(E, 0);
1874
1875 auto *N = newSDNode<SrcValueSDNode>(V);
1876 CSEMap.InsertNode(N, IP);
1877 InsertNode(N);
1878 return SDValue(N, 0);
1879 }
1880
getMDNode(const MDNode * MD)1881 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1882 FoldingSetNodeID ID;
1883 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1884 ID.AddPointer(MD);
1885
1886 void *IP = nullptr;
1887 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1888 return SDValue(E, 0);
1889
1890 auto *N = newSDNode<MDNodeSDNode>(MD);
1891 CSEMap.InsertNode(N, IP);
1892 InsertNode(N);
1893 return SDValue(N, 0);
1894 }
1895
getBitcast(EVT VT,SDValue V)1896 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
1897 if (VT == V.getValueType())
1898 return V;
1899
1900 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1901 }
1902
getAddrSpaceCast(const SDLoc & dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)1903 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
1904 unsigned SrcAS, unsigned DestAS) {
1905 SDValue Ops[] = {Ptr};
1906 FoldingSetNodeID ID;
1907 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1908 ID.AddInteger(SrcAS);
1909 ID.AddInteger(DestAS);
1910
1911 void *IP = nullptr;
1912 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1913 return SDValue(E, 0);
1914
1915 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
1916 VT, SrcAS, DestAS);
1917 createOperands(N, Ops);
1918
1919 CSEMap.InsertNode(N, IP);
1920 InsertNode(N);
1921 return SDValue(N, 0);
1922 }
1923
1924 /// getShiftAmountOperand - Return the specified value casted to
1925 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)1926 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1927 EVT OpTy = Op.getValueType();
1928 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1929 if (OpTy == ShTy || OpTy.isVector()) return Op;
1930
1931 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1932 }
1933
expandVAArg(SDNode * Node)1934 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
1935 SDLoc dl(Node);
1936 const TargetLowering &TLI = getTargetLoweringInfo();
1937 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1938 EVT VT = Node->getValueType(0);
1939 SDValue Tmp1 = Node->getOperand(0);
1940 SDValue Tmp2 = Node->getOperand(1);
1941 const MaybeAlign MA(Node->getConstantOperandVal(3));
1942
1943 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
1944 Tmp2, MachinePointerInfo(V));
1945 SDValue VAList = VAListLoad;
1946
1947 if (MA && *MA > TLI.getMinStackArgumentAlignment()) {
1948 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1949 getConstant(MA->value() - 1, dl, VAList.getValueType()));
1950
1951 VAList =
1952 getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1953 getConstant(-(int64_t)MA->value(), dl, VAList.getValueType()));
1954 }
1955
1956 // Increment the pointer, VAList, to the next vaarg
1957 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1958 getConstant(getDataLayout().getTypeAllocSize(
1959 VT.getTypeForEVT(*getContext())),
1960 dl, VAList.getValueType()));
1961 // Store the incremented VAList to the legalized pointer
1962 Tmp1 =
1963 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
1964 // Load the actual argument out of the pointer VAList
1965 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
1966 }
1967
expandVACopy(SDNode * Node)1968 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
1969 SDLoc dl(Node);
1970 const TargetLowering &TLI = getTargetLoweringInfo();
1971 // This defaults to loading a pointer from the input and storing it to the
1972 // output, returning the chain.
1973 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1974 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1975 SDValue Tmp1 =
1976 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
1977 Node->getOperand(2), MachinePointerInfo(VS));
1978 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1979 MachinePointerInfo(VD));
1980 }
1981
CreateStackTemporary(EVT VT,unsigned minAlign)1982 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1983 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1984 unsigned ByteSize = VT.getStoreSize();
1985 Type *Ty = VT.getTypeForEVT(*getContext());
1986 unsigned StackAlign =
1987 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1988
1989 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
1990 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1991 }
1992
CreateStackTemporary(EVT VT1,EVT VT2)1993 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1994 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1995 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1996 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1997 const DataLayout &DL = getDataLayout();
1998 unsigned Align =
1999 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
2000
2001 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
2002 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
2003 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
2004 }
2005
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,const SDLoc & dl)2006 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
2007 ISD::CondCode Cond, const SDLoc &dl) {
2008 EVT OpVT = N1.getValueType();
2009
2010 // These setcc operations always fold.
2011 switch (Cond) {
2012 default: break;
2013 case ISD::SETFALSE:
2014 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
2015 case ISD::SETTRUE:
2016 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
2017
2018 case ISD::SETOEQ:
2019 case ISD::SETOGT:
2020 case ISD::SETOGE:
2021 case ISD::SETOLT:
2022 case ISD::SETOLE:
2023 case ISD::SETONE:
2024 case ISD::SETO:
2025 case ISD::SETUO:
2026 case ISD::SETUEQ:
2027 case ISD::SETUNE:
2028 assert(!OpVT.isInteger() && "Illegal setcc for integer!");
2029 break;
2030 }
2031
2032 if (OpVT.isInteger()) {
2033 // For EQ and NE, we can always pick a value for the undef to make the
2034 // predicate pass or fail, so we can return undef.
2035 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2036 // icmp eq/ne X, undef -> undef.
2037 if ((N1.isUndef() || N2.isUndef()) &&
2038 (Cond == ISD::SETEQ || Cond == ISD::SETNE))
2039 return getUNDEF(VT);
2040
2041 // If both operands are undef, we can return undef for int comparison.
2042 // icmp undef, undef -> undef.
2043 if (N1.isUndef() && N2.isUndef())
2044 return getUNDEF(VT);
2045
2046 // icmp X, X -> true/false
2047 // icmp X, undef -> true/false because undef could be X.
2048 if (N1 == N2)
2049 return getBoolConstant(ISD::isTrueWhenEqual(Cond), dl, VT, OpVT);
2050 }
2051
2052 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
2053 const APInt &C2 = N2C->getAPIntValue();
2054 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
2055 const APInt &C1 = N1C->getAPIntValue();
2056
2057 switch (Cond) {
2058 default: llvm_unreachable("Unknown integer setcc!");
2059 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT);
2060 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT);
2061 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
2062 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
2063 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
2064 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
2065 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
2066 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
2067 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
2068 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
2069 }
2070 }
2071 }
2072
2073 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
2074 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
2075
2076 if (N1CFP && N2CFP) {
2077 APFloat::cmpResult R = N1CFP->getValueAPF().compare(N2CFP->getValueAPF());
2078 switch (Cond) {
2079 default: break;
2080 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
2081 return getUNDEF(VT);
2082 LLVM_FALLTHROUGH;
2083 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2084 OpVT);
2085 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2086 return getUNDEF(VT);
2087 LLVM_FALLTHROUGH;
2088 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2089 R==APFloat::cmpLessThan, dl, VT,
2090 OpVT);
2091 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2092 return getUNDEF(VT);
2093 LLVM_FALLTHROUGH;
2094 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2095 OpVT);
2096 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2097 return getUNDEF(VT);
2098 LLVM_FALLTHROUGH;
2099 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2100 VT, OpVT);
2101 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2102 return getUNDEF(VT);
2103 LLVM_FALLTHROUGH;
2104 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2105 R==APFloat::cmpEqual, dl, VT,
2106 OpVT);
2107 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2108 return getUNDEF(VT);
2109 LLVM_FALLTHROUGH;
2110 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2111 R==APFloat::cmpEqual, dl, VT, OpVT);
2112 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2113 OpVT);
2114 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2115 OpVT);
2116 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2117 R==APFloat::cmpEqual, dl, VT,
2118 OpVT);
2119 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2120 OpVT);
2121 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2122 R==APFloat::cmpLessThan, dl, VT,
2123 OpVT);
2124 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2125 R==APFloat::cmpUnordered, dl, VT,
2126 OpVT);
2127 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2128 VT, OpVT);
2129 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2130 OpVT);
2131 }
2132 } else if (N1CFP && OpVT.isSimple() && !N2.isUndef()) {
2133 // Ensure that the constant occurs on the RHS.
2134 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2135 if (!TLI->isCondCodeLegal(SwappedCond, OpVT.getSimpleVT()))
2136 return SDValue();
2137 return getSetCC(dl, VT, N2, N1, SwappedCond);
2138 } else if ((N2CFP && N2CFP->getValueAPF().isNaN()) ||
2139 (OpVT.isFloatingPoint() && (N1.isUndef() || N2.isUndef()))) {
2140 // If an operand is known to be a nan (or undef that could be a nan), we can
2141 // fold it.
2142 // Choosing NaN for the undef will always make unordered comparison succeed
2143 // and ordered comparison fails.
2144 // Matches behavior in llvm::ConstantFoldCompareInstruction.
2145 switch (ISD::getUnorderedFlavor(Cond)) {
2146 default:
2147 llvm_unreachable("Unknown flavor!");
2148 case 0: // Known false.
2149 return getBoolConstant(false, dl, VT, OpVT);
2150 case 1: // Known true.
2151 return getBoolConstant(true, dl, VT, OpVT);
2152 case 2: // Undefined.
2153 return getUNDEF(VT);
2154 }
2155 }
2156
2157 // Could not fold it.
2158 return SDValue();
2159 }
2160
2161 /// See if the specified operand can be simplified with the knowledge that only
2162 /// the bits specified by DemandedBits are used.
2163 /// TODO: really we should be making this into the DAG equivalent of
2164 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits)2165 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits) {
2166 EVT VT = V.getValueType();
2167 APInt DemandedElts = VT.isVector()
2168 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2169 : APInt(1, 1);
2170 return GetDemandedBits(V, DemandedBits, DemandedElts);
2171 }
2172
2173 /// See if the specified operand can be simplified with the knowledge that only
2174 /// the bits specified by DemandedBits are used in the elements specified by
2175 /// DemandedElts.
2176 /// TODO: really we should be making this into the DAG equivalent of
2177 /// SimplifyMultipleUseDemandedBits and not generate any new nodes.
GetDemandedBits(SDValue V,const APInt & DemandedBits,const APInt & DemandedElts)2178 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &DemandedBits,
2179 const APInt &DemandedElts) {
2180 switch (V.getOpcode()) {
2181 default:
2182 break;
2183 case ISD::Constant: {
2184 auto *CV = cast<ConstantSDNode>(V.getNode());
2185 assert(CV && "Const value should be ConstSDNode.");
2186 const APInt &CVal = CV->getAPIntValue();
2187 APInt NewVal = CVal & DemandedBits;
2188 if (NewVal != CVal)
2189 return getConstant(NewVal, SDLoc(V), V.getValueType());
2190 break;
2191 }
2192 case ISD::OR:
2193 case ISD::XOR:
2194 case ISD::SIGN_EXTEND_INREG:
2195 return TLI->SimplifyMultipleUseDemandedBits(V, DemandedBits, DemandedElts,
2196 *this, 0);
2197 case ISD::SRL:
2198 // Only look at single-use SRLs.
2199 if (!V.getNode()->hasOneUse())
2200 break;
2201 if (auto *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2202 // See if we can recursively simplify the LHS.
2203 unsigned Amt = RHSC->getZExtValue();
2204
2205 // Watch out for shift count overflow though.
2206 if (Amt >= DemandedBits.getBitWidth())
2207 break;
2208 APInt SrcDemandedBits = DemandedBits << Amt;
2209 if (SDValue SimplifyLHS =
2210 GetDemandedBits(V.getOperand(0), SrcDemandedBits))
2211 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2212 V.getOperand(1));
2213 }
2214 break;
2215 case ISD::AND: {
2216 // X & -1 -> X (ignoring bits which aren't demanded).
2217 // Also handle the case where masked out bits in X are known to be zero.
2218 if (ConstantSDNode *RHSC = isConstOrConstSplat(V.getOperand(1))) {
2219 const APInt &AndVal = RHSC->getAPIntValue();
2220 if (DemandedBits.isSubsetOf(AndVal) ||
2221 DemandedBits.isSubsetOf(computeKnownBits(V.getOperand(0)).Zero |
2222 AndVal))
2223 return V.getOperand(0);
2224 }
2225 break;
2226 }
2227 case ISD::ANY_EXTEND: {
2228 SDValue Src = V.getOperand(0);
2229 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
2230 // Being conservative here - only peek through if we only demand bits in the
2231 // non-extended source (even though the extended bits are technically
2232 // undef).
2233 if (DemandedBits.getActiveBits() > SrcBitWidth)
2234 break;
2235 APInt SrcDemandedBits = DemandedBits.trunc(SrcBitWidth);
2236 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcDemandedBits))
2237 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc);
2238 break;
2239 }
2240 }
2241 return SDValue();
2242 }
2243
2244 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2245 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const2246 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2247 unsigned BitWidth = Op.getScalarValueSizeInBits();
2248 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2249 }
2250
2251 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2252 /// this predicate to simplify operations downstream. Mask is known to be zero
2253 /// for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,unsigned Depth) const2254 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2255 unsigned Depth) const {
2256 EVT VT = V.getValueType();
2257 APInt DemandedElts = VT.isVector()
2258 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2259 : APInt(1, 1);
2260 return MaskedValueIsZero(V, Mask, DemandedElts, Depth);
2261 }
2262
2263 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero in
2264 /// DemandedElts. We use this predicate to simplify operations downstream.
2265 /// Mask is known to be zero for bits that V cannot have.
MaskedValueIsZero(SDValue V,const APInt & Mask,const APInt & DemandedElts,unsigned Depth) const2266 bool SelectionDAG::MaskedValueIsZero(SDValue V, const APInt &Mask,
2267 const APInt &DemandedElts,
2268 unsigned Depth) const {
2269 return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
2270 }
2271
2272 /// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
MaskedValueIsAllOnes(SDValue V,const APInt & Mask,unsigned Depth) const2273 bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
2274 unsigned Depth) const {
2275 return Mask.isSubsetOf(computeKnownBits(V, Depth).One);
2276 }
2277
2278 /// isSplatValue - Return true if the vector V has the same value
2279 /// across all DemandedElts.
isSplatValue(SDValue V,const APInt & DemandedElts,APInt & UndefElts)2280 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2281 APInt &UndefElts) {
2282 if (!DemandedElts)
2283 return false; // No demanded elts, better to assume we don't know anything.
2284
2285 EVT VT = V.getValueType();
2286 assert(VT.isVector() && "Vector type expected");
2287
2288 unsigned NumElts = VT.getVectorNumElements();
2289 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2290 UndefElts = APInt::getNullValue(NumElts);
2291
2292 switch (V.getOpcode()) {
2293 case ISD::BUILD_VECTOR: {
2294 SDValue Scl;
2295 for (unsigned i = 0; i != NumElts; ++i) {
2296 SDValue Op = V.getOperand(i);
2297 if (Op.isUndef()) {
2298 UndefElts.setBit(i);
2299 continue;
2300 }
2301 if (!DemandedElts[i])
2302 continue;
2303 if (Scl && Scl != Op)
2304 return false;
2305 Scl = Op;
2306 }
2307 return true;
2308 }
2309 case ISD::VECTOR_SHUFFLE: {
2310 // Check if this is a shuffle node doing a splat.
2311 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2312 int SplatIndex = -1;
2313 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2314 for (int i = 0; i != (int)NumElts; ++i) {
2315 int M = Mask[i];
2316 if (M < 0) {
2317 UndefElts.setBit(i);
2318 continue;
2319 }
2320 if (!DemandedElts[i])
2321 continue;
2322 if (0 <= SplatIndex && SplatIndex != M)
2323 return false;
2324 SplatIndex = M;
2325 }
2326 return true;
2327 }
2328 case ISD::EXTRACT_SUBVECTOR: {
2329 SDValue Src = V.getOperand(0);
2330 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(V.getOperand(1));
2331 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2332 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2333 // Offset the demanded elts by the subvector index.
2334 uint64_t Idx = SubIdx->getZExtValue();
2335 APInt UndefSrcElts;
2336 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2337 if (isSplatValue(Src, DemandedSrc, UndefSrcElts)) {
2338 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2339 return true;
2340 }
2341 }
2342 break;
2343 }
2344 case ISD::ADD:
2345 case ISD::SUB:
2346 case ISD::AND: {
2347 APInt UndefLHS, UndefRHS;
2348 SDValue LHS = V.getOperand(0);
2349 SDValue RHS = V.getOperand(1);
2350 if (isSplatValue(LHS, DemandedElts, UndefLHS) &&
2351 isSplatValue(RHS, DemandedElts, UndefRHS)) {
2352 UndefElts = UndefLHS | UndefRHS;
2353 return true;
2354 }
2355 break;
2356 }
2357 }
2358
2359 return false;
2360 }
2361
2362 /// Helper wrapper to main isSplatValue function.
isSplatValue(SDValue V,bool AllowUndefs)2363 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
2364 EVT VT = V.getValueType();
2365 assert(VT.isVector() && "Vector type expected");
2366 unsigned NumElts = VT.getVectorNumElements();
2367
2368 APInt UndefElts;
2369 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
2370 return isSplatValue(V, DemandedElts, UndefElts) &&
2371 (AllowUndefs || !UndefElts);
2372 }
2373
getSplatSourceVector(SDValue V,int & SplatIdx)2374 SDValue SelectionDAG::getSplatSourceVector(SDValue V, int &SplatIdx) {
2375 V = peekThroughExtractSubvectors(V);
2376
2377 EVT VT = V.getValueType();
2378 unsigned Opcode = V.getOpcode();
2379 switch (Opcode) {
2380 default: {
2381 APInt UndefElts;
2382 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
2383 if (isSplatValue(V, DemandedElts, UndefElts)) {
2384 // Handle case where all demanded elements are UNDEF.
2385 if (DemandedElts.isSubsetOf(UndefElts)) {
2386 SplatIdx = 0;
2387 return getUNDEF(VT);
2388 }
2389 SplatIdx = (UndefElts & DemandedElts).countTrailingOnes();
2390 return V;
2391 }
2392 break;
2393 }
2394 case ISD::VECTOR_SHUFFLE: {
2395 // Check if this is a shuffle node doing a splat.
2396 // TODO - remove this and rely purely on SelectionDAG::isSplatValue,
2397 // getTargetVShiftNode currently struggles without the splat source.
2398 auto *SVN = cast<ShuffleVectorSDNode>(V);
2399 if (!SVN->isSplat())
2400 break;
2401 int Idx = SVN->getSplatIndex();
2402 int NumElts = V.getValueType().getVectorNumElements();
2403 SplatIdx = Idx % NumElts;
2404 return V.getOperand(Idx / NumElts);
2405 }
2406 }
2407
2408 return SDValue();
2409 }
2410
getSplatValue(SDValue V)2411 SDValue SelectionDAG::getSplatValue(SDValue V) {
2412 int SplatIdx;
2413 if (SDValue SrcVector = getSplatSourceVector(V, SplatIdx))
2414 return getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V),
2415 SrcVector.getValueType().getScalarType(), SrcVector,
2416 getIntPtrConstant(SplatIdx, SDLoc(V)));
2417 return SDValue();
2418 }
2419
2420 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
2421 /// is less than the element bit-width of the shift node, return it.
getValidShiftAmountConstant(SDValue V,const APInt & DemandedElts)2422 static const APInt *getValidShiftAmountConstant(SDValue V,
2423 const APInt &DemandedElts) {
2424 unsigned BitWidth = V.getScalarValueSizeInBits();
2425 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1), DemandedElts)) {
2426 // Shifting more than the bitwidth is not valid.
2427 const APInt &ShAmt = SA->getAPIntValue();
2428 if (ShAmt.ult(BitWidth))
2429 return &ShAmt;
2430 }
2431 return nullptr;
2432 }
2433
2434 /// If a SHL/SRA/SRL node has constant vector shift amounts that are all less
2435 /// than the element bit-width of the shift node, return the minimum value.
2436 static const APInt *
getValidMinimumShiftAmountConstant(SDValue V,const APInt & DemandedElts)2437 getValidMinimumShiftAmountConstant(SDValue V, const APInt &DemandedElts) {
2438 unsigned BitWidth = V.getScalarValueSizeInBits();
2439 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2440 if (!BV)
2441 return nullptr;
2442 const APInt *MinShAmt = nullptr;
2443 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2444 if (!DemandedElts[i])
2445 continue;
2446 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2447 if (!SA)
2448 return nullptr;
2449 // Shifting more than the bitwidth is not valid.
2450 const APInt &ShAmt = SA->getAPIntValue();
2451 if (ShAmt.uge(BitWidth))
2452 return nullptr;
2453 if (MinShAmt && MinShAmt->ule(ShAmt))
2454 continue;
2455 MinShAmt = &ShAmt;
2456 }
2457 return MinShAmt;
2458 }
2459
2460 /// If a SHL/SRA/SRL node has constant vector shift amounts that are all less
2461 /// than the element bit-width of the shift node, return the maximum value.
2462 static const APInt *
getValidMaximumShiftAmountConstant(SDValue V,const APInt & DemandedElts)2463 getValidMaximumShiftAmountConstant(SDValue V, const APInt &DemandedElts) {
2464 unsigned BitWidth = V.getScalarValueSizeInBits();
2465 auto *BV = dyn_cast<BuildVectorSDNode>(V.getOperand(1));
2466 if (!BV)
2467 return nullptr;
2468 const APInt *MaxShAmt = nullptr;
2469 for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
2470 if (!DemandedElts[i])
2471 continue;
2472 auto *SA = dyn_cast<ConstantSDNode>(BV->getOperand(i));
2473 if (!SA)
2474 return nullptr;
2475 // Shifting more than the bitwidth is not valid.
2476 const APInt &ShAmt = SA->getAPIntValue();
2477 if (ShAmt.uge(BitWidth))
2478 return nullptr;
2479 if (MaxShAmt && MaxShAmt->uge(ShAmt))
2480 continue;
2481 MaxShAmt = &ShAmt;
2482 }
2483 return MaxShAmt;
2484 }
2485
2486 /// Determine which bits of Op are known to be either zero or one and return
2487 /// them in Known. For vectors, the known bits are those that are shared by
2488 /// every vector element.
computeKnownBits(SDValue Op,unsigned Depth) const2489 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2490 EVT VT = Op.getValueType();
2491 APInt DemandedElts = VT.isVector()
2492 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2493 : APInt(1, 1);
2494 return computeKnownBits(Op, DemandedElts, Depth);
2495 }
2496
2497 /// Determine which bits of Op are known to be either zero or one and return
2498 /// them in Known. The DemandedElts argument allows us to only collect the known
2499 /// bits that are shared by the requested vector elements.
computeKnownBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const2500 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2501 unsigned Depth) const {
2502 unsigned BitWidth = Op.getScalarValueSizeInBits();
2503
2504 KnownBits Known(BitWidth); // Don't know anything.
2505
2506 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2507 // We know all of the bits for a constant!
2508 Known.One = C->getAPIntValue();
2509 Known.Zero = ~Known.One;
2510 return Known;
2511 }
2512 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2513 // We know all of the bits for a constant fp!
2514 Known.One = C->getValueAPF().bitcastToAPInt();
2515 Known.Zero = ~Known.One;
2516 return Known;
2517 }
2518
2519 if (Depth >= MaxRecursionDepth)
2520 return Known; // Limit search depth.
2521
2522 KnownBits Known2;
2523 unsigned NumElts = DemandedElts.getBitWidth();
2524 assert((!Op.getValueType().isVector() ||
2525 NumElts == Op.getValueType().getVectorNumElements()) &&
2526 "Unexpected vector size");
2527
2528 if (!DemandedElts)
2529 return Known; // No demanded elts, better to assume we don't know anything.
2530
2531 unsigned Opcode = Op.getOpcode();
2532 switch (Opcode) {
2533 case ISD::BUILD_VECTOR:
2534 // Collect the known bits that are shared by every demanded vector element.
2535 Known.Zero.setAllBits(); Known.One.setAllBits();
2536 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2537 if (!DemandedElts[i])
2538 continue;
2539
2540 SDValue SrcOp = Op.getOperand(i);
2541 Known2 = computeKnownBits(SrcOp, Depth + 1);
2542
2543 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2544 if (SrcOp.getValueSizeInBits() != BitWidth) {
2545 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2546 "Expected BUILD_VECTOR implicit truncation");
2547 Known2 = Known2.trunc(BitWidth);
2548 }
2549
2550 // Known bits are the values that are shared by every demanded element.
2551 Known.One &= Known2.One;
2552 Known.Zero &= Known2.Zero;
2553
2554 // If we don't know any bits, early out.
2555 if (Known.isUnknown())
2556 break;
2557 }
2558 break;
2559 case ISD::VECTOR_SHUFFLE: {
2560 // Collect the known bits that are shared by every vector element referenced
2561 // by the shuffle.
2562 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2563 Known.Zero.setAllBits(); Known.One.setAllBits();
2564 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2565 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2566 for (unsigned i = 0; i != NumElts; ++i) {
2567 if (!DemandedElts[i])
2568 continue;
2569
2570 int M = SVN->getMaskElt(i);
2571 if (M < 0) {
2572 // For UNDEF elements, we don't know anything about the common state of
2573 // the shuffle result.
2574 Known.resetAll();
2575 DemandedLHS.clearAllBits();
2576 DemandedRHS.clearAllBits();
2577 break;
2578 }
2579
2580 if ((unsigned)M < NumElts)
2581 DemandedLHS.setBit((unsigned)M % NumElts);
2582 else
2583 DemandedRHS.setBit((unsigned)M % NumElts);
2584 }
2585 // Known bits are the values that are shared by every demanded element.
2586 if (!!DemandedLHS) {
2587 SDValue LHS = Op.getOperand(0);
2588 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2589 Known.One &= Known2.One;
2590 Known.Zero &= Known2.Zero;
2591 }
2592 // If we don't know any bits, early out.
2593 if (Known.isUnknown())
2594 break;
2595 if (!!DemandedRHS) {
2596 SDValue RHS = Op.getOperand(1);
2597 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2598 Known.One &= Known2.One;
2599 Known.Zero &= Known2.Zero;
2600 }
2601 break;
2602 }
2603 case ISD::CONCAT_VECTORS: {
2604 // Split DemandedElts and test each of the demanded subvectors.
2605 Known.Zero.setAllBits(); Known.One.setAllBits();
2606 EVT SubVectorVT = Op.getOperand(0).getValueType();
2607 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2608 unsigned NumSubVectors = Op.getNumOperands();
2609 for (unsigned i = 0; i != NumSubVectors; ++i) {
2610 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2611 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2612 if (!!DemandedSub) {
2613 SDValue Sub = Op.getOperand(i);
2614 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2615 Known.One &= Known2.One;
2616 Known.Zero &= Known2.Zero;
2617 }
2618 // If we don't know any bits, early out.
2619 if (Known.isUnknown())
2620 break;
2621 }
2622 break;
2623 }
2624 case ISD::INSERT_SUBVECTOR: {
2625 // If we know the element index, demand any elements from the subvector and
2626 // the remainder from the src its inserted into, otherwise demand them all.
2627 SDValue Src = Op.getOperand(0);
2628 SDValue Sub = Op.getOperand(1);
2629 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2630 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2631 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
2632 Known.One.setAllBits();
2633 Known.Zero.setAllBits();
2634 uint64_t Idx = SubIdx->getZExtValue();
2635 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2636 if (!!DemandedSubElts) {
2637 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2638 if (Known.isUnknown())
2639 break; // early-out.
2640 }
2641 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
2642 APInt DemandedSrcElts = DemandedElts & ~SubMask;
2643 if (!!DemandedSrcElts) {
2644 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2645 Known.One &= Known2.One;
2646 Known.Zero &= Known2.Zero;
2647 }
2648 } else {
2649 Known = computeKnownBits(Sub, Depth + 1);
2650 if (Known.isUnknown())
2651 break; // early-out.
2652 Known2 = computeKnownBits(Src, Depth + 1);
2653 Known.One &= Known2.One;
2654 Known.Zero &= Known2.Zero;
2655 }
2656 break;
2657 }
2658 case ISD::EXTRACT_SUBVECTOR: {
2659 // If we know the element index, just demand that subvector elements,
2660 // otherwise demand them all.
2661 SDValue Src = Op.getOperand(0);
2662 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2663 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2664 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts);
2665 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2666 // Offset the demanded elts by the subvector index.
2667 uint64_t Idx = SubIdx->getZExtValue();
2668 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2669 }
2670 Known = computeKnownBits(Src, DemandedSrc, Depth + 1);
2671 break;
2672 }
2673 case ISD::SCALAR_TO_VECTOR: {
2674 // We know about scalar_to_vector as much as we know about it source,
2675 // which becomes the first element of otherwise unknown vector.
2676 if (DemandedElts != 1)
2677 break;
2678
2679 SDValue N0 = Op.getOperand(0);
2680 Known = computeKnownBits(N0, Depth + 1);
2681 if (N0.getValueSizeInBits() != BitWidth)
2682 Known = Known.trunc(BitWidth);
2683
2684 break;
2685 }
2686 case ISD::BITCAST: {
2687 SDValue N0 = Op.getOperand(0);
2688 EVT SubVT = N0.getValueType();
2689 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2690
2691 // Ignore bitcasts from unsupported types.
2692 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2693 break;
2694
2695 // Fast handling of 'identity' bitcasts.
2696 if (BitWidth == SubBitWidth) {
2697 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2698 break;
2699 }
2700
2701 bool IsLE = getDataLayout().isLittleEndian();
2702
2703 // Bitcast 'small element' vector to 'large element' scalar/vector.
2704 if ((BitWidth % SubBitWidth) == 0) {
2705 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2706
2707 // Collect known bits for the (larger) output by collecting the known
2708 // bits from each set of sub elements and shift these into place.
2709 // We need to separately call computeKnownBits for each set of
2710 // sub elements as the knownbits for each is likely to be different.
2711 unsigned SubScale = BitWidth / SubBitWidth;
2712 APInt SubDemandedElts(NumElts * SubScale, 0);
2713 for (unsigned i = 0; i != NumElts; ++i)
2714 if (DemandedElts[i])
2715 SubDemandedElts.setBit(i * SubScale);
2716
2717 for (unsigned i = 0; i != SubScale; ++i) {
2718 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
2719 Depth + 1);
2720 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
2721 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts);
2722 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts);
2723 }
2724 }
2725
2726 // Bitcast 'large element' scalar/vector to 'small element' vector.
2727 if ((SubBitWidth % BitWidth) == 0) {
2728 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2729
2730 // Collect known bits for the (smaller) output by collecting the known
2731 // bits from the overlapping larger input elements and extracting the
2732 // sub sections we actually care about.
2733 unsigned SubScale = SubBitWidth / BitWidth;
2734 APInt SubDemandedElts(NumElts / SubScale, 0);
2735 for (unsigned i = 0; i != NumElts; ++i)
2736 if (DemandedElts[i])
2737 SubDemandedElts.setBit(i / SubScale);
2738
2739 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
2740
2741 Known.Zero.setAllBits(); Known.One.setAllBits();
2742 for (unsigned i = 0; i != NumElts; ++i)
2743 if (DemandedElts[i]) {
2744 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
2745 unsigned Offset = (Shifts % SubScale) * BitWidth;
2746 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2747 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
2748 // If we don't know any bits, early out.
2749 if (Known.isUnknown())
2750 break;
2751 }
2752 }
2753 break;
2754 }
2755 case ISD::AND:
2756 // If either the LHS or the RHS are Zero, the result is zero.
2757 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2758 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2759
2760 // Output known-1 bits are only known if set in both the LHS & RHS.
2761 Known.One &= Known2.One;
2762 // Output known-0 are known to be clear if zero in either the LHS | RHS.
2763 Known.Zero |= Known2.Zero;
2764 break;
2765 case ISD::OR:
2766 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2767 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2768
2769 // Output known-0 bits are only known if clear in both the LHS & RHS.
2770 Known.Zero &= Known2.Zero;
2771 // Output known-1 are known to be set if set in either the LHS | RHS.
2772 Known.One |= Known2.One;
2773 break;
2774 case ISD::XOR: {
2775 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2776 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2777
2778 // Output known-0 bits are known if clear or set in both the LHS & RHS.
2779 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
2780 // Output known-1 are known to be set if set in only one of the LHS, RHS.
2781 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
2782 Known.Zero = KnownZeroOut;
2783 break;
2784 }
2785 case ISD::MUL: {
2786 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2787 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2788
2789 // If low bits are zero in either operand, output low known-0 bits.
2790 // Also compute a conservative estimate for high known-0 bits.
2791 // More trickiness is possible, but this is sufficient for the
2792 // interesting case of alignment computation.
2793 unsigned TrailZ = Known.countMinTrailingZeros() +
2794 Known2.countMinTrailingZeros();
2795 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
2796 Known2.countMinLeadingZeros(),
2797 BitWidth) - BitWidth;
2798
2799 Known.resetAll();
2800 Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
2801 Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
2802 break;
2803 }
2804 case ISD::UDIV: {
2805 // For the purposes of computing leading zeros we can conservatively
2806 // treat a udiv as a logical right shift by the power of 2 known to
2807 // be less than the denominator.
2808 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2809 unsigned LeadZ = Known2.countMinLeadingZeros();
2810
2811 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2812 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
2813 if (RHSMaxLeadingZeros != BitWidth)
2814 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
2815
2816 Known.Zero.setHighBits(LeadZ);
2817 break;
2818 }
2819 case ISD::SELECT:
2820 case ISD::VSELECT:
2821 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2822 // If we don't know any bits, early out.
2823 if (Known.isUnknown())
2824 break;
2825 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
2826
2827 // Only known if known in both the LHS and RHS.
2828 Known.One &= Known2.One;
2829 Known.Zero &= Known2.Zero;
2830 break;
2831 case ISD::SELECT_CC:
2832 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
2833 // If we don't know any bits, early out.
2834 if (Known.isUnknown())
2835 break;
2836 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2837
2838 // Only known if known in both the LHS and RHS.
2839 Known.One &= Known2.One;
2840 Known.Zero &= Known2.Zero;
2841 break;
2842 case ISD::SMULO:
2843 case ISD::UMULO:
2844 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
2845 if (Op.getResNo() != 1)
2846 break;
2847 // The boolean result conforms to getBooleanContents.
2848 // If we know the result of a setcc has the top bits zero, use this info.
2849 // We know that we have an integer-based boolean since these operations
2850 // are only available for integer.
2851 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2852 TargetLowering::ZeroOrOneBooleanContent &&
2853 BitWidth > 1)
2854 Known.Zero.setBitsFrom(1);
2855 break;
2856 case ISD::SETCC:
2857 case ISD::STRICT_FSETCC:
2858 case ISD::STRICT_FSETCCS: {
2859 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
2860 // If we know the result of a setcc has the top bits zero, use this info.
2861 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
2862 TargetLowering::ZeroOrOneBooleanContent &&
2863 BitWidth > 1)
2864 Known.Zero.setBitsFrom(1);
2865 break;
2866 }
2867 case ISD::SHL:
2868 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2869
2870 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) {
2871 unsigned Shift = ShAmt->getZExtValue();
2872 Known.Zero <<= Shift;
2873 Known.One <<= Shift;
2874 // Low bits are known zero.
2875 Known.Zero.setLowBits(Shift);
2876 break;
2877 }
2878
2879 // No matter the shift amount, the trailing zeros will stay zero.
2880 Known.Zero = APInt::getLowBitsSet(BitWidth, Known.countMinTrailingZeros());
2881 Known.One.clearAllBits();
2882
2883 // Minimum shift low bits are known zero.
2884 if (const APInt *ShMinAmt =
2885 getValidMinimumShiftAmountConstant(Op, DemandedElts))
2886 Known.Zero.setLowBits(ShMinAmt->getZExtValue());
2887 break;
2888 case ISD::SRL:
2889 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2890
2891 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) {
2892 unsigned Shift = ShAmt->getZExtValue();
2893 Known.Zero.lshrInPlace(Shift);
2894 Known.One.lshrInPlace(Shift);
2895 // High bits are known zero.
2896 Known.Zero.setHighBits(Shift);
2897 break;
2898 }
2899
2900 // No matter the shift amount, the leading zeros will stay zero.
2901 Known.Zero = APInt::getHighBitsSet(BitWidth, Known.countMinLeadingZeros());
2902 Known.One.clearAllBits();
2903
2904 // Minimum shift high bits are known zero.
2905 if (const APInt *ShMinAmt =
2906 getValidMinimumShiftAmountConstant(Op, DemandedElts))
2907 Known.Zero.setHighBits(ShMinAmt->getZExtValue());
2908 break;
2909 case ISD::SRA:
2910 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) {
2911 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2912 unsigned Shift = ShAmt->getZExtValue();
2913 // Sign extend known zero/one bit (else is unknown).
2914 Known.Zero.ashrInPlace(Shift);
2915 Known.One.ashrInPlace(Shift);
2916 }
2917 break;
2918 case ISD::FSHL:
2919 case ISD::FSHR:
2920 if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(2), DemandedElts)) {
2921 unsigned Amt = C->getAPIntValue().urem(BitWidth);
2922
2923 // For fshl, 0-shift returns the 1st arg.
2924 // For fshr, 0-shift returns the 2nd arg.
2925 if (Amt == 0) {
2926 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
2927 DemandedElts, Depth + 1);
2928 break;
2929 }
2930
2931 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2932 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2933 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2934 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2935 if (Opcode == ISD::FSHL) {
2936 Known.One <<= Amt;
2937 Known.Zero <<= Amt;
2938 Known2.One.lshrInPlace(BitWidth - Amt);
2939 Known2.Zero.lshrInPlace(BitWidth - Amt);
2940 } else {
2941 Known.One <<= BitWidth - Amt;
2942 Known.Zero <<= BitWidth - Amt;
2943 Known2.One.lshrInPlace(Amt);
2944 Known2.Zero.lshrInPlace(Amt);
2945 }
2946 Known.One |= Known2.One;
2947 Known.Zero |= Known2.Zero;
2948 }
2949 break;
2950 case ISD::SIGN_EXTEND_INREG: {
2951 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2952 unsigned EBits = EVT.getScalarSizeInBits();
2953
2954 // Sign extension. Compute the demanded bits in the result that are not
2955 // present in the input.
2956 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2957
2958 APInt InSignMask = APInt::getSignMask(EBits);
2959 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2960
2961 // If the sign extended bits are demanded, we know that the sign
2962 // bit is demanded.
2963 InSignMask = InSignMask.zext(BitWidth);
2964 if (NewBits.getBoolValue())
2965 InputDemandedBits |= InSignMask;
2966
2967 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2968 Known.One &= InputDemandedBits;
2969 Known.Zero &= InputDemandedBits;
2970
2971 // If the sign bit of the input is known set or clear, then we know the
2972 // top bits of the result.
2973 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear
2974 Known.Zero |= NewBits;
2975 Known.One &= ~NewBits;
2976 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set
2977 Known.One |= NewBits;
2978 Known.Zero &= ~NewBits;
2979 } else { // Input sign bit unknown
2980 Known.Zero &= ~NewBits;
2981 Known.One &= ~NewBits;
2982 }
2983 break;
2984 }
2985 case ISD::CTTZ:
2986 case ISD::CTTZ_ZERO_UNDEF: {
2987 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2988 // If we have a known 1, its position is our upper bound.
2989 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
2990 unsigned LowBits = Log2_32(PossibleTZ) + 1;
2991 Known.Zero.setBitsFrom(LowBits);
2992 break;
2993 }
2994 case ISD::CTLZ:
2995 case ISD::CTLZ_ZERO_UNDEF: {
2996 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2997 // If we have a known 1, its position is our upper bound.
2998 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
2999 unsigned LowBits = Log2_32(PossibleLZ) + 1;
3000 Known.Zero.setBitsFrom(LowBits);
3001 break;
3002 }
3003 case ISD::CTPOP: {
3004 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3005 // If we know some of the bits are zero, they can't be one.
3006 unsigned PossibleOnes = Known2.countMaxPopulation();
3007 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
3008 break;
3009 }
3010 case ISD::LOAD: {
3011 LoadSDNode *LD = cast<LoadSDNode>(Op);
3012 const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
3013 if (ISD::isNON_EXTLoad(LD) && Cst) {
3014 // Determine any common known bits from the loaded constant pool value.
3015 Type *CstTy = Cst->getType();
3016 if ((NumElts * BitWidth) == CstTy->getPrimitiveSizeInBits()) {
3017 // If its a vector splat, then we can (quickly) reuse the scalar path.
3018 // NOTE: We assume all elements match and none are UNDEF.
3019 if (CstTy->isVectorTy()) {
3020 if (const Constant *Splat = Cst->getSplatValue()) {
3021 Cst = Splat;
3022 CstTy = Cst->getType();
3023 }
3024 }
3025 // TODO - do we need to handle different bitwidths?
3026 if (CstTy->isVectorTy() && BitWidth == CstTy->getScalarSizeInBits()) {
3027 // Iterate across all vector elements finding common known bits.
3028 Known.One.setAllBits();
3029 Known.Zero.setAllBits();
3030 for (unsigned i = 0; i != NumElts; ++i) {
3031 if (!DemandedElts[i])
3032 continue;
3033 if (Constant *Elt = Cst->getAggregateElement(i)) {
3034 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
3035 const APInt &Value = CInt->getValue();
3036 Known.One &= Value;
3037 Known.Zero &= ~Value;
3038 continue;
3039 }
3040 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
3041 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3042 Known.One &= Value;
3043 Known.Zero &= ~Value;
3044 continue;
3045 }
3046 }
3047 Known.One.clearAllBits();
3048 Known.Zero.clearAllBits();
3049 break;
3050 }
3051 } else if (BitWidth == CstTy->getPrimitiveSizeInBits()) {
3052 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
3053 const APInt &Value = CInt->getValue();
3054 Known.One = Value;
3055 Known.Zero = ~Value;
3056 } else if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
3057 APInt Value = CFP->getValueAPF().bitcastToAPInt();
3058 Known.One = Value;
3059 Known.Zero = ~Value;
3060 }
3061 }
3062 }
3063 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
3064 // If this is a ZEXTLoad and we are looking at the loaded value.
3065 EVT VT = LD->getMemoryVT();
3066 unsigned MemBits = VT.getScalarSizeInBits();
3067 Known.Zero.setBitsFrom(MemBits);
3068 } else if (const MDNode *Ranges = LD->getRanges()) {
3069 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
3070 computeKnownBitsFromRangeMetadata(*Ranges, Known);
3071 }
3072 break;
3073 }
3074 case ISD::ZERO_EXTEND_VECTOR_INREG: {
3075 EVT InVT = Op.getOperand(0).getValueType();
3076 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3077 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3078 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */);
3079 break;
3080 }
3081 case ISD::ZERO_EXTEND: {
3082 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3083 Known = Known.zext(BitWidth, true /* ExtendedBitsAreKnownZero */);
3084 break;
3085 }
3086 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3087 EVT InVT = Op.getOperand(0).getValueType();
3088 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
3089 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
3090 // If the sign bit is known to be zero or one, then sext will extend
3091 // it to the top bits, else it will just zext.
3092 Known = Known.sext(BitWidth);
3093 break;
3094 }
3095 case ISD::SIGN_EXTEND: {
3096 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3097 // If the sign bit is known to be zero or one, then sext will extend
3098 // it to the top bits, else it will just zext.
3099 Known = Known.sext(BitWidth);
3100 break;
3101 }
3102 case ISD::ANY_EXTEND: {
3103 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3104 Known = Known.zext(BitWidth, false /* ExtendedBitsAreKnownZero */);
3105 break;
3106 }
3107 case ISD::TRUNCATE: {
3108 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3109 Known = Known.trunc(BitWidth);
3110 break;
3111 }
3112 case ISD::AssertZext: {
3113 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3114 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
3115 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3116 Known.Zero |= (~InMask);
3117 Known.One &= (~Known.Zero);
3118 break;
3119 }
3120 case ISD::FGETSIGN:
3121 // All bits are zero except the low bit.
3122 Known.Zero.setBitsFrom(1);
3123 break;
3124 case ISD::USUBO:
3125 case ISD::SSUBO:
3126 if (Op.getResNo() == 1) {
3127 // If we know the result of a setcc has the top bits zero, use this info.
3128 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3129 TargetLowering::ZeroOrOneBooleanContent &&
3130 BitWidth > 1)
3131 Known.Zero.setBitsFrom(1);
3132 break;
3133 }
3134 LLVM_FALLTHROUGH;
3135 case ISD::SUB:
3136 case ISD::SUBC: {
3137 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3138 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3139 Known = KnownBits::computeForAddSub(/* Add */ false, /* NSW */ false,
3140 Known, Known2);
3141 break;
3142 }
3143 case ISD::UADDO:
3144 case ISD::SADDO:
3145 case ISD::ADDCARRY:
3146 if (Op.getResNo() == 1) {
3147 // If we know the result of a setcc has the top bits zero, use this info.
3148 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3149 TargetLowering::ZeroOrOneBooleanContent &&
3150 BitWidth > 1)
3151 Known.Zero.setBitsFrom(1);
3152 break;
3153 }
3154 LLVM_FALLTHROUGH;
3155 case ISD::ADD:
3156 case ISD::ADDC:
3157 case ISD::ADDE: {
3158 assert(Op.getResNo() == 0 && "We only compute knownbits for the sum here.");
3159
3160 // With ADDE and ADDCARRY, a carry bit may be added in.
3161 KnownBits Carry(1);
3162 if (Opcode == ISD::ADDE)
3163 // Can't track carry from glue, set carry to unknown.
3164 Carry.resetAll();
3165 else if (Opcode == ISD::ADDCARRY)
3166 // TODO: Compute known bits for the carry operand. Not sure if it is worth
3167 // the trouble (how often will we find a known carry bit). And I haven't
3168 // tested this very much yet, but something like this might work:
3169 // Carry = computeKnownBits(Op.getOperand(2), DemandedElts, Depth + 1);
3170 // Carry = Carry.zextOrTrunc(1, false);
3171 Carry.resetAll();
3172 else
3173 Carry.setAllZero();
3174
3175 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3176 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3177 Known = KnownBits::computeForAddCarry(Known, Known2, Carry);
3178 break;
3179 }
3180 case ISD::SREM:
3181 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
3182 const APInt &RA = Rem->getAPIntValue().abs();
3183 if (RA.isPowerOf2()) {
3184 APInt LowBits = RA - 1;
3185 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3186
3187 // The low bits of the first operand are unchanged by the srem.
3188 Known.Zero = Known2.Zero & LowBits;
3189 Known.One = Known2.One & LowBits;
3190
3191 // If the first operand is non-negative or has all low bits zero, then
3192 // the upper bits are all zero.
3193 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
3194 Known.Zero |= ~LowBits;
3195
3196 // If the first operand is negative and not all low bits are zero, then
3197 // the upper bits are all one.
3198 if (Known2.isNegative() && LowBits.intersects(Known2.One))
3199 Known.One |= ~LowBits;
3200 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?");
3201 }
3202 }
3203 break;
3204 case ISD::UREM: {
3205 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
3206 const APInt &RA = Rem->getAPIntValue();
3207 if (RA.isPowerOf2()) {
3208 APInt LowBits = (RA - 1);
3209 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3210
3211 // The upper bits are all zero, the lower ones are unchanged.
3212 Known.Zero = Known2.Zero | ~LowBits;
3213 Known.One = Known2.One & LowBits;
3214 break;
3215 }
3216 }
3217
3218 // Since the result is less than or equal to either operand, any leading
3219 // zero bits in either operand must also exist in the result.
3220 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3221 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3222
3223 uint32_t Leaders =
3224 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
3225 Known.resetAll();
3226 Known.Zero.setHighBits(Leaders);
3227 break;
3228 }
3229 case ISD::EXTRACT_ELEMENT: {
3230 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3231 const unsigned Index = Op.getConstantOperandVal(1);
3232 const unsigned EltBitWidth = Op.getValueSizeInBits();
3233
3234 // Remove low part of known bits mask
3235 Known.Zero = Known.Zero.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3236 Known.One = Known.One.getHiBits(Known.getBitWidth() - Index * EltBitWidth);
3237
3238 // Remove high part of known bit mask
3239 Known = Known.trunc(EltBitWidth);
3240 break;
3241 }
3242 case ISD::EXTRACT_VECTOR_ELT: {
3243 SDValue InVec = Op.getOperand(0);
3244 SDValue EltNo = Op.getOperand(1);
3245 EVT VecVT = InVec.getValueType();
3246 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3247 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3248 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3249 // anything about the extended bits.
3250 if (BitWidth > EltBitWidth)
3251 Known = Known.trunc(EltBitWidth);
3252 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3253 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
3254 // If we know the element index, just demand that vector element.
3255 unsigned Idx = ConstEltNo->getZExtValue();
3256 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
3257 Known = computeKnownBits(InVec, DemandedElt, Depth + 1);
3258 } else {
3259 // Unknown element index, so ignore DemandedElts and demand them all.
3260 Known = computeKnownBits(InVec, Depth + 1);
3261 }
3262 if (BitWidth > EltBitWidth)
3263 Known = Known.zext(BitWidth, false /* => any extend */);
3264 break;
3265 }
3266 case ISD::INSERT_VECTOR_ELT: {
3267 SDValue InVec = Op.getOperand(0);
3268 SDValue InVal = Op.getOperand(1);
3269 SDValue EltNo = Op.getOperand(2);
3270
3271 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3272 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3273 // If we know the element index, split the demand between the
3274 // source vector and the inserted element.
3275 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth);
3276 unsigned EltIdx = CEltNo->getZExtValue();
3277
3278 // If we demand the inserted element then add its common known bits.
3279 if (DemandedElts[EltIdx]) {
3280 Known2 = computeKnownBits(InVal, Depth + 1);
3281 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
3282 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
3283 }
3284
3285 // If we demand the source vector then add its common known bits, ensuring
3286 // that we don't demand the inserted element.
3287 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx));
3288 if (!!VectorElts) {
3289 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1);
3290 Known.One &= Known2.One;
3291 Known.Zero &= Known2.Zero;
3292 }
3293 } else {
3294 // Unknown element index, so ignore DemandedElts and demand them all.
3295 Known = computeKnownBits(InVec, Depth + 1);
3296 Known2 = computeKnownBits(InVal, Depth + 1);
3297 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
3298 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
3299 }
3300 break;
3301 }
3302 case ISD::BITREVERSE: {
3303 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3304 Known.Zero = Known2.Zero.reverseBits();
3305 Known.One = Known2.One.reverseBits();
3306 break;
3307 }
3308 case ISD::BSWAP: {
3309 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3310 Known.Zero = Known2.Zero.byteSwap();
3311 Known.One = Known2.One.byteSwap();
3312 break;
3313 }
3314 case ISD::ABS: {
3315 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3316
3317 // If the source's MSB is zero then we know the rest of the bits already.
3318 if (Known2.isNonNegative()) {
3319 Known.Zero = Known2.Zero;
3320 Known.One = Known2.One;
3321 break;
3322 }
3323
3324 // We only know that the absolute values's MSB will be zero iff there is
3325 // a set bit that isn't the sign bit (otherwise it could be INT_MIN).
3326 Known2.One.clearSignBit();
3327 if (Known2.One.getBoolValue()) {
3328 Known.Zero = APInt::getSignMask(BitWidth);
3329 break;
3330 }
3331 break;
3332 }
3333 case ISD::UMIN: {
3334 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3335 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3336
3337 // UMIN - we know that the result will have the maximum of the
3338 // known zero leading bits of the inputs.
3339 unsigned LeadZero = Known.countMinLeadingZeros();
3340 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros());
3341
3342 Known.Zero &= Known2.Zero;
3343 Known.One &= Known2.One;
3344 Known.Zero.setHighBits(LeadZero);
3345 break;
3346 }
3347 case ISD::UMAX: {
3348 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3349 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3350
3351 // UMAX - we know that the result will have the maximum of the
3352 // known one leading bits of the inputs.
3353 unsigned LeadOne = Known.countMinLeadingOnes();
3354 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes());
3355
3356 Known.Zero &= Known2.Zero;
3357 Known.One &= Known2.One;
3358 Known.One.setHighBits(LeadOne);
3359 break;
3360 }
3361 case ISD::SMIN:
3362 case ISD::SMAX: {
3363 // If we have a clamp pattern, we know that the number of sign bits will be
3364 // the minimum of the clamp min/max range.
3365 bool IsMax = (Opcode == ISD::SMAX);
3366 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3367 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3368 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3369 CstHigh =
3370 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3371 if (CstLow && CstHigh) {
3372 if (!IsMax)
3373 std::swap(CstLow, CstHigh);
3374
3375 const APInt &ValueLow = CstLow->getAPIntValue();
3376 const APInt &ValueHigh = CstHigh->getAPIntValue();
3377 if (ValueLow.sle(ValueHigh)) {
3378 unsigned LowSignBits = ValueLow.getNumSignBits();
3379 unsigned HighSignBits = ValueHigh.getNumSignBits();
3380 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3381 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3382 Known.One.setHighBits(MinSignBits);
3383 break;
3384 }
3385 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3386 Known.Zero.setHighBits(MinSignBits);
3387 break;
3388 }
3389 }
3390 }
3391
3392 // Fallback - just get the shared known bits of the operands.
3393 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3394 if (Known.isUnknown()) break; // Early-out
3395 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3396 Known.Zero &= Known2.Zero;
3397 Known.One &= Known2.One;
3398 break;
3399 }
3400 case ISD::FrameIndex:
3401 case ISD::TargetFrameIndex:
3402 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth);
3403 break;
3404
3405 default:
3406 if (Opcode < ISD::BUILTIN_OP_END)
3407 break;
3408 LLVM_FALLTHROUGH;
3409 case ISD::INTRINSIC_WO_CHAIN:
3410 case ISD::INTRINSIC_W_CHAIN:
3411 case ISD::INTRINSIC_VOID:
3412 // Allow the target to implement this method for its nodes.
3413 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3414 break;
3415 }
3416
3417 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3418 return Known;
3419 }
3420
computeOverflowKind(SDValue N0,SDValue N1) const3421 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3422 SDValue N1) const {
3423 // X + 0 never overflow
3424 if (isNullConstant(N1))
3425 return OFK_Never;
3426
3427 KnownBits N1Known = computeKnownBits(N1);
3428 if (N1Known.Zero.getBoolValue()) {
3429 KnownBits N0Known = computeKnownBits(N0);
3430
3431 bool overflow;
3432 (void)N0Known.getMaxValue().uadd_ov(N1Known.getMaxValue(), overflow);
3433 if (!overflow)
3434 return OFK_Never;
3435 }
3436
3437 // mulhi + 1 never overflow
3438 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3439 (N1Known.getMaxValue() & 0x01) == N1Known.getMaxValue())
3440 return OFK_Never;
3441
3442 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3443 KnownBits N0Known = computeKnownBits(N0);
3444
3445 if ((N0Known.getMaxValue() & 0x01) == N0Known.getMaxValue())
3446 return OFK_Never;
3447 }
3448
3449 return OFK_Sometime;
3450 }
3451
isKnownToBeAPowerOfTwo(SDValue Val) const3452 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3453 EVT OpVT = Val.getValueType();
3454 unsigned BitWidth = OpVT.getScalarSizeInBits();
3455
3456 // Is the constant a known power of 2?
3457 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3458 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3459
3460 // A left-shift of a constant one will have exactly one bit set because
3461 // shifting the bit off the end is undefined.
3462 if (Val.getOpcode() == ISD::SHL) {
3463 auto *C = isConstOrConstSplat(Val.getOperand(0));
3464 if (C && C->getAPIntValue() == 1)
3465 return true;
3466 }
3467
3468 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3469 // one bit set.
3470 if (Val.getOpcode() == ISD::SRL) {
3471 auto *C = isConstOrConstSplat(Val.getOperand(0));
3472 if (C && C->getAPIntValue().isSignMask())
3473 return true;
3474 }
3475
3476 // Are all operands of a build vector constant powers of two?
3477 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3478 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3479 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3480 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3481 return false;
3482 }))
3483 return true;
3484
3485 // More could be done here, though the above checks are enough
3486 // to handle some common cases.
3487
3488 // Fall back to computeKnownBits to catch other known cases.
3489 KnownBits Known = computeKnownBits(Val);
3490 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3491 }
3492
ComputeNumSignBits(SDValue Op,unsigned Depth) const3493 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3494 EVT VT = Op.getValueType();
3495 APInt DemandedElts = VT.isVector()
3496 ? APInt::getAllOnesValue(VT.getVectorNumElements())
3497 : APInt(1, 1);
3498 return ComputeNumSignBits(Op, DemandedElts, Depth);
3499 }
3500
ComputeNumSignBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const3501 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3502 unsigned Depth) const {
3503 EVT VT = Op.getValueType();
3504 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3505 unsigned VTBits = VT.getScalarSizeInBits();
3506 unsigned NumElts = DemandedElts.getBitWidth();
3507 unsigned Tmp, Tmp2;
3508 unsigned FirstAnswer = 1;
3509
3510 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3511 const APInt &Val = C->getAPIntValue();
3512 return Val.getNumSignBits();
3513 }
3514
3515 if (Depth >= MaxRecursionDepth)
3516 return 1; // Limit search depth.
3517
3518 if (!DemandedElts)
3519 return 1; // No demanded elts, better to assume we don't know anything.
3520
3521 unsigned Opcode = Op.getOpcode();
3522 switch (Opcode) {
3523 default: break;
3524 case ISD::AssertSext:
3525 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3526 return VTBits-Tmp+1;
3527 case ISD::AssertZext:
3528 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3529 return VTBits-Tmp;
3530
3531 case ISD::BUILD_VECTOR:
3532 Tmp = VTBits;
3533 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3534 if (!DemandedElts[i])
3535 continue;
3536
3537 SDValue SrcOp = Op.getOperand(i);
3538 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1);
3539
3540 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3541 if (SrcOp.getValueSizeInBits() != VTBits) {
3542 assert(SrcOp.getValueSizeInBits() > VTBits &&
3543 "Expected BUILD_VECTOR implicit truncation");
3544 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3545 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3546 }
3547 Tmp = std::min(Tmp, Tmp2);
3548 }
3549 return Tmp;
3550
3551 case ISD::VECTOR_SHUFFLE: {
3552 // Collect the minimum number of sign bits that are shared by every vector
3553 // element referenced by the shuffle.
3554 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3555 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3556 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3557 for (unsigned i = 0; i != NumElts; ++i) {
3558 int M = SVN->getMaskElt(i);
3559 if (!DemandedElts[i])
3560 continue;
3561 // For UNDEF elements, we don't know anything about the common state of
3562 // the shuffle result.
3563 if (M < 0)
3564 return 1;
3565 if ((unsigned)M < NumElts)
3566 DemandedLHS.setBit((unsigned)M % NumElts);
3567 else
3568 DemandedRHS.setBit((unsigned)M % NumElts);
3569 }
3570 Tmp = std::numeric_limits<unsigned>::max();
3571 if (!!DemandedLHS)
3572 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3573 if (!!DemandedRHS) {
3574 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3575 Tmp = std::min(Tmp, Tmp2);
3576 }
3577 // If we don't know anything, early out and try computeKnownBits fall-back.
3578 if (Tmp == 1)
3579 break;
3580 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3581 return Tmp;
3582 }
3583
3584 case ISD::BITCAST: {
3585 SDValue N0 = Op.getOperand(0);
3586 EVT SrcVT = N0.getValueType();
3587 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3588
3589 // Ignore bitcasts from unsupported types..
3590 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3591 break;
3592
3593 // Fast handling of 'identity' bitcasts.
3594 if (VTBits == SrcBits)
3595 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3596
3597 bool IsLE = getDataLayout().isLittleEndian();
3598
3599 // Bitcast 'large element' scalar/vector to 'small element' vector.
3600 if ((SrcBits % VTBits) == 0) {
3601 assert(VT.isVector() && "Expected bitcast to vector");
3602
3603 unsigned Scale = SrcBits / VTBits;
3604 APInt SrcDemandedElts(NumElts / Scale, 0);
3605 for (unsigned i = 0; i != NumElts; ++i)
3606 if (DemandedElts[i])
3607 SrcDemandedElts.setBit(i / Scale);
3608
3609 // Fast case - sign splat can be simply split across the small elements.
3610 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3611 if (Tmp == SrcBits)
3612 return VTBits;
3613
3614 // Slow case - determine how far the sign extends into each sub-element.
3615 Tmp2 = VTBits;
3616 for (unsigned i = 0; i != NumElts; ++i)
3617 if (DemandedElts[i]) {
3618 unsigned SubOffset = i % Scale;
3619 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3620 SubOffset = SubOffset * VTBits;
3621 if (Tmp <= SubOffset)
3622 return 1;
3623 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3624 }
3625 return Tmp2;
3626 }
3627 break;
3628 }
3629
3630 case ISD::SIGN_EXTEND:
3631 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3632 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3633 case ISD::SIGN_EXTEND_INREG:
3634 // Max of the input and what this extends.
3635 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3636 Tmp = VTBits-Tmp+1;
3637 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3638 return std::max(Tmp, Tmp2);
3639 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3640 SDValue Src = Op.getOperand(0);
3641 EVT SrcVT = Src.getValueType();
3642 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3643 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3644 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3645 }
3646 case ISD::SRA:
3647 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3648 // SRA X, C -> adds C sign bits.
3649 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts))
3650 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3651 else if (const APInt *ShAmt =
3652 getValidMinimumShiftAmountConstant(Op, DemandedElts))
3653 Tmp = std::min<uint64_t>(Tmp + ShAmt->getZExtValue(), VTBits);
3654 return Tmp;
3655 case ISD::SHL:
3656 if (const APInt *ShAmt = getValidShiftAmountConstant(Op, DemandedElts)) {
3657 // shl destroys sign bits, ensure it doesn't shift out all sign bits.
3658 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3659 if (ShAmt->ult(Tmp))
3660 return Tmp - ShAmt->getZExtValue();
3661 } else if (const APInt *ShAmt =
3662 getValidMaximumShiftAmountConstant(Op, DemandedElts)) {
3663 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
3664 if (ShAmt->ult(Tmp))
3665 return Tmp - ShAmt->getZExtValue();
3666 }
3667 break;
3668 case ISD::AND:
3669 case ISD::OR:
3670 case ISD::XOR: // NOT is handled here.
3671 // Logical binary ops preserve the number of sign bits at the worst.
3672 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3673 if (Tmp != 1) {
3674 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3675 FirstAnswer = std::min(Tmp, Tmp2);
3676 // We computed what we know about the sign bits as our first
3677 // answer. Now proceed to the generic code that uses
3678 // computeKnownBits, and pick whichever answer is better.
3679 }
3680 break;
3681
3682 case ISD::SELECT:
3683 case ISD::VSELECT:
3684 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3685 if (Tmp == 1) return 1; // Early out.
3686 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3687 return std::min(Tmp, Tmp2);
3688 case ISD::SELECT_CC:
3689 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3690 if (Tmp == 1) return 1; // Early out.
3691 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3692 return std::min(Tmp, Tmp2);
3693
3694 case ISD::SMIN:
3695 case ISD::SMAX: {
3696 // If we have a clamp pattern, we know that the number of sign bits will be
3697 // the minimum of the clamp min/max range.
3698 bool IsMax = (Opcode == ISD::SMAX);
3699 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3700 if ((CstLow = isConstOrConstSplat(Op.getOperand(1), DemandedElts)))
3701 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3702 CstHigh =
3703 isConstOrConstSplat(Op.getOperand(0).getOperand(1), DemandedElts);
3704 if (CstLow && CstHigh) {
3705 if (!IsMax)
3706 std::swap(CstLow, CstHigh);
3707 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3708 Tmp = CstLow->getAPIntValue().getNumSignBits();
3709 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3710 return std::min(Tmp, Tmp2);
3711 }
3712 }
3713
3714 // Fallback - just get the minimum number of sign bits of the operands.
3715 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3716 if (Tmp == 1)
3717 return 1; // Early out.
3718 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3719 return std::min(Tmp, Tmp2);
3720 }
3721 case ISD::UMIN:
3722 case ISD::UMAX:
3723 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3724 if (Tmp == 1)
3725 return 1; // Early out.
3726 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3727 return std::min(Tmp, Tmp2);
3728 case ISD::SADDO:
3729 case ISD::UADDO:
3730 case ISD::SSUBO:
3731 case ISD::USUBO:
3732 case ISD::SMULO:
3733 case ISD::UMULO:
3734 if (Op.getResNo() != 1)
3735 break;
3736 // The boolean result conforms to getBooleanContents. Fall through.
3737 // If setcc returns 0/-1, all bits are sign bits.
3738 // We know that we have an integer-based boolean since these operations
3739 // are only available for integer.
3740 if (TLI->getBooleanContents(VT.isVector(), false) ==
3741 TargetLowering::ZeroOrNegativeOneBooleanContent)
3742 return VTBits;
3743 break;
3744 case ISD::SETCC:
3745 case ISD::STRICT_FSETCC:
3746 case ISD::STRICT_FSETCCS: {
3747 unsigned OpNo = Op->isStrictFPOpcode() ? 1 : 0;
3748 // If setcc returns 0/-1, all bits are sign bits.
3749 if (TLI->getBooleanContents(Op.getOperand(OpNo).getValueType()) ==
3750 TargetLowering::ZeroOrNegativeOneBooleanContent)
3751 return VTBits;
3752 break;
3753 }
3754 case ISD::ROTL:
3755 case ISD::ROTR:
3756 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3757 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3758
3759 // Handle rotate right by N like a rotate left by 32-N.
3760 if (Opcode == ISD::ROTR)
3761 RotAmt = (VTBits - RotAmt) % VTBits;
3762
3763 // If we aren't rotating out all of the known-in sign bits, return the
3764 // number that are left. This handles rotl(sext(x), 1) for example.
3765 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3766 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3767 }
3768 break;
3769 case ISD::ADD:
3770 case ISD::ADDC:
3771 // Add can have at most one carry bit. Thus we know that the output
3772 // is, at worst, one more bit than the inputs.
3773 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3774 if (Tmp == 1) return 1; // Early out.
3775
3776 // Special case decrementing a value (ADD X, -1):
3777 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3778 if (CRHS->isAllOnesValue()) {
3779 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1);
3780
3781 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3782 // sign bits set.
3783 if ((Known.Zero | 1).isAllOnesValue())
3784 return VTBits;
3785
3786 // If we are subtracting one from a positive number, there is no carry
3787 // out of the result.
3788 if (Known.isNonNegative())
3789 return Tmp;
3790 }
3791
3792 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3793 if (Tmp2 == 1) return 1;
3794 return std::min(Tmp, Tmp2)-1;
3795
3796 case ISD::SUB:
3797 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3798 if (Tmp2 == 1) return 1;
3799
3800 // Handle NEG.
3801 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
3802 if (CLHS->isNullValue()) {
3803 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1);
3804 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3805 // sign bits set.
3806 if ((Known.Zero | 1).isAllOnesValue())
3807 return VTBits;
3808
3809 // If the input is known to be positive (the sign bit is known clear),
3810 // the output of the NEG has the same number of sign bits as the input.
3811 if (Known.isNonNegative())
3812 return Tmp2;
3813
3814 // Otherwise, we treat this like a SUB.
3815 }
3816
3817 // Sub can have at most one carry bit. Thus we know that the output
3818 // is, at worst, one more bit than the inputs.
3819 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3820 if (Tmp == 1) return 1; // Early out.
3821 return std::min(Tmp, Tmp2)-1;
3822 case ISD::MUL: {
3823 // The output of the Mul can be at most twice the valid bits in the inputs.
3824 unsigned SignBitsOp0 = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3825 if (SignBitsOp0 == 1)
3826 break;
3827 unsigned SignBitsOp1 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3828 if (SignBitsOp1 == 1)
3829 break;
3830 unsigned OutValidBits =
3831 (VTBits - SignBitsOp0 + 1) + (VTBits - SignBitsOp1 + 1);
3832 return OutValidBits > VTBits ? 1 : VTBits - OutValidBits + 1;
3833 }
3834 case ISD::TRUNCATE: {
3835 // Check if the sign bits of source go down as far as the truncated value.
3836 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
3837 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3838 if (NumSrcSignBits > (NumSrcBits - VTBits))
3839 return NumSrcSignBits - (NumSrcBits - VTBits);
3840 break;
3841 }
3842 case ISD::EXTRACT_ELEMENT: {
3843 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3844 const int BitWidth = Op.getValueSizeInBits();
3845 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
3846
3847 // Get reverse index (starting from 1), Op1 value indexes elements from
3848 // little end. Sign starts at big end.
3849 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
3850
3851 // If the sign portion ends in our element the subtraction gives correct
3852 // result. Otherwise it gives either negative or > bitwidth result
3853 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
3854 }
3855 case ISD::INSERT_VECTOR_ELT: {
3856 SDValue InVec = Op.getOperand(0);
3857 SDValue InVal = Op.getOperand(1);
3858 SDValue EltNo = Op.getOperand(2);
3859
3860 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3861 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3862 // If we know the element index, split the demand between the
3863 // source vector and the inserted element.
3864 unsigned EltIdx = CEltNo->getZExtValue();
3865
3866 // If we demand the inserted element then get its sign bits.
3867 Tmp = std::numeric_limits<unsigned>::max();
3868 if (DemandedElts[EltIdx]) {
3869 // TODO - handle implicit truncation of inserted elements.
3870 if (InVal.getScalarValueSizeInBits() != VTBits)
3871 break;
3872 Tmp = ComputeNumSignBits(InVal, Depth + 1);
3873 }
3874
3875 // If we demand the source vector then get its sign bits, and determine
3876 // the minimum.
3877 APInt VectorElts = DemandedElts;
3878 VectorElts.clearBit(EltIdx);
3879 if (!!VectorElts) {
3880 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1);
3881 Tmp = std::min(Tmp, Tmp2);
3882 }
3883 } else {
3884 // Unknown element index, so ignore DemandedElts and demand them all.
3885 Tmp = ComputeNumSignBits(InVec, Depth + 1);
3886 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
3887 Tmp = std::min(Tmp, Tmp2);
3888 }
3889 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3890 return Tmp;
3891 }
3892 case ISD::EXTRACT_VECTOR_ELT: {
3893 SDValue InVec = Op.getOperand(0);
3894 SDValue EltNo = Op.getOperand(1);
3895 EVT VecVT = InVec.getValueType();
3896 const unsigned BitWidth = Op.getValueSizeInBits();
3897 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
3898 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3899
3900 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
3901 // anything about sign bits. But if the sizes match we can derive knowledge
3902 // about sign bits from the vector operand.
3903 if (BitWidth != EltBitWidth)
3904 break;
3905
3906 // If we know the element index, just demand that vector element, else for
3907 // an unknown element index, ignore DemandedElts and demand them all.
3908 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3909 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3910 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3911 DemandedSrcElts =
3912 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3913
3914 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
3915 }
3916 case ISD::EXTRACT_SUBVECTOR: {
3917 // If we know the element index, just demand that subvector elements,
3918 // otherwise demand them all.
3919 SDValue Src = Op.getOperand(0);
3920 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
3921 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3922 APInt DemandedSrc = APInt::getAllOnesValue(NumSrcElts);
3923 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
3924 // Offset the demanded elts by the subvector index.
3925 uint64_t Idx = SubIdx->getZExtValue();
3926 DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
3927 }
3928 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
3929 }
3930 case ISD::CONCAT_VECTORS: {
3931 // Determine the minimum number of sign bits across all demanded
3932 // elts of the input vectors. Early out if the result is already 1.
3933 Tmp = std::numeric_limits<unsigned>::max();
3934 EVT SubVectorVT = Op.getOperand(0).getValueType();
3935 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3936 unsigned NumSubVectors = Op.getNumOperands();
3937 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
3938 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
3939 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
3940 if (!DemandedSub)
3941 continue;
3942 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
3943 Tmp = std::min(Tmp, Tmp2);
3944 }
3945 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3946 return Tmp;
3947 }
3948 case ISD::INSERT_SUBVECTOR: {
3949 // If we know the element index, demand any elements from the subvector and
3950 // the remainder from the src its inserted into, otherwise demand them all.
3951 SDValue Src = Op.getOperand(0);
3952 SDValue Sub = Op.getOperand(1);
3953 auto *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3954 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
3955 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
3956 Tmp = std::numeric_limits<unsigned>::max();
3957 uint64_t Idx = SubIdx->getZExtValue();
3958 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
3959 if (!!DemandedSubElts) {
3960 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
3961 if (Tmp == 1) return 1; // early-out
3962 }
3963 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
3964 APInt DemandedSrcElts = DemandedElts & ~SubMask;
3965 if (!!DemandedSrcElts) {
3966 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
3967 Tmp = std::min(Tmp, Tmp2);
3968 }
3969 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3970 return Tmp;
3971 }
3972
3973 // Not able to determine the index so just assume worst case.
3974 Tmp = ComputeNumSignBits(Sub, Depth + 1);
3975 if (Tmp == 1) return 1; // early-out
3976 Tmp2 = ComputeNumSignBits(Src, Depth + 1);
3977 Tmp = std::min(Tmp, Tmp2);
3978 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3979 return Tmp;
3980 }
3981 }
3982
3983 // If we are looking at the loaded value of the SDNode.
3984 if (Op.getResNo() == 0) {
3985 // Handle LOADX separately here. EXTLOAD case will fallthrough.
3986 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
3987 unsigned ExtType = LD->getExtensionType();
3988 switch (ExtType) {
3989 default: break;
3990 case ISD::SEXTLOAD: // e.g. i16->i32 = '17' bits known.
3991 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3992 return VTBits - Tmp + 1;
3993 case ISD::ZEXTLOAD: // e.g. i16->i32 = '16' bits known.
3994 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3995 return VTBits - Tmp;
3996 case ISD::NON_EXTLOAD:
3997 if (const Constant *Cst = TLI->getTargetConstantFromLoad(LD)) {
3998 // We only need to handle vectors - computeKnownBits should handle
3999 // scalar cases.
4000 Type *CstTy = Cst->getType();
4001 if (CstTy->isVectorTy() &&
4002 (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits()) {
4003 Tmp = VTBits;
4004 for (unsigned i = 0; i != NumElts; ++i) {
4005 if (!DemandedElts[i])
4006 continue;
4007 if (Constant *Elt = Cst->getAggregateElement(i)) {
4008 if (auto *CInt = dyn_cast<ConstantInt>(Elt)) {
4009 const APInt &Value = CInt->getValue();
4010 Tmp = std::min(Tmp, Value.getNumSignBits());
4011 continue;
4012 }
4013 if (auto *CFP = dyn_cast<ConstantFP>(Elt)) {
4014 APInt Value = CFP->getValueAPF().bitcastToAPInt();
4015 Tmp = std::min(Tmp, Value.getNumSignBits());
4016 continue;
4017 }
4018 }
4019 // Unknown type. Conservatively assume no bits match sign bit.
4020 return 1;
4021 }
4022 return Tmp;
4023 }
4024 }
4025 break;
4026 }
4027 }
4028 }
4029
4030 // Allow the target to implement this method for its nodes.
4031 if (Opcode >= ISD::BUILTIN_OP_END ||
4032 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4033 Opcode == ISD::INTRINSIC_W_CHAIN ||
4034 Opcode == ISD::INTRINSIC_VOID) {
4035 unsigned NumBits =
4036 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
4037 if (NumBits > 1)
4038 FirstAnswer = std::max(FirstAnswer, NumBits);
4039 }
4040
4041 // Finally, if we can prove that the top bits of the result are 0's or 1's,
4042 // use this information.
4043 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
4044
4045 APInt Mask;
4046 if (Known.isNonNegative()) { // sign bit is 0
4047 Mask = Known.Zero;
4048 } else if (Known.isNegative()) { // sign bit is 1;
4049 Mask = Known.One;
4050 } else {
4051 // Nothing known.
4052 return FirstAnswer;
4053 }
4054
4055 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
4056 // the number of identical bits in the top of the input value.
4057 Mask = ~Mask;
4058 Mask <<= Mask.getBitWidth()-VTBits;
4059 // Return # leading zeros. We use 'min' here in case Val was zero before
4060 // shifting. We don't want to return '64' as for an i32 "0".
4061 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
4062 }
4063
isBaseWithConstantOffset(SDValue Op) const4064 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
4065 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
4066 !isa<ConstantSDNode>(Op.getOperand(1)))
4067 return false;
4068
4069 if (Op.getOpcode() == ISD::OR &&
4070 !MaskedValueIsZero(Op.getOperand(0), Op.getConstantOperandAPInt(1)))
4071 return false;
4072
4073 return true;
4074 }
4075
isKnownNeverNaN(SDValue Op,bool SNaN,unsigned Depth) const4076 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
4077 // If we're told that NaNs won't happen, assume they won't.
4078 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
4079 return true;
4080
4081 if (Depth >= MaxRecursionDepth)
4082 return false; // Limit search depth.
4083
4084 // TODO: Handle vectors.
4085 // If the value is a constant, we can obviously see if it is a NaN or not.
4086 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
4087 return !C->getValueAPF().isNaN() ||
4088 (SNaN && !C->getValueAPF().isSignaling());
4089 }
4090
4091 unsigned Opcode = Op.getOpcode();
4092 switch (Opcode) {
4093 case ISD::FADD:
4094 case ISD::FSUB:
4095 case ISD::FMUL:
4096 case ISD::FDIV:
4097 case ISD::FREM:
4098 case ISD::FSIN:
4099 case ISD::FCOS: {
4100 if (SNaN)
4101 return true;
4102 // TODO: Need isKnownNeverInfinity
4103 return false;
4104 }
4105 case ISD::FCANONICALIZE:
4106 case ISD::FEXP:
4107 case ISD::FEXP2:
4108 case ISD::FTRUNC:
4109 case ISD::FFLOOR:
4110 case ISD::FCEIL:
4111 case ISD::FROUND:
4112 case ISD::FRINT:
4113 case ISD::FNEARBYINT: {
4114 if (SNaN)
4115 return true;
4116 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4117 }
4118 case ISD::FABS:
4119 case ISD::FNEG:
4120 case ISD::FCOPYSIGN: {
4121 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4122 }
4123 case ISD::SELECT:
4124 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4125 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4126 case ISD::FP_EXTEND:
4127 case ISD::FP_ROUND: {
4128 if (SNaN)
4129 return true;
4130 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4131 }
4132 case ISD::SINT_TO_FP:
4133 case ISD::UINT_TO_FP:
4134 return true;
4135 case ISD::FMA:
4136 case ISD::FMAD: {
4137 if (SNaN)
4138 return true;
4139 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4140 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4141 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4142 }
4143 case ISD::FSQRT: // Need is known positive
4144 case ISD::FLOG:
4145 case ISD::FLOG2:
4146 case ISD::FLOG10:
4147 case ISD::FPOWI:
4148 case ISD::FPOW: {
4149 if (SNaN)
4150 return true;
4151 // TODO: Refine on operand
4152 return false;
4153 }
4154 case ISD::FMINNUM:
4155 case ISD::FMAXNUM: {
4156 // Only one needs to be known not-nan, since it will be returned if the
4157 // other ends up being one.
4158 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
4159 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4160 }
4161 case ISD::FMINNUM_IEEE:
4162 case ISD::FMAXNUM_IEEE: {
4163 if (SNaN)
4164 return true;
4165 // This can return a NaN if either operand is an sNaN, or if both operands
4166 // are NaN.
4167 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
4168 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
4169 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
4170 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
4171 }
4172 case ISD::FMINIMUM:
4173 case ISD::FMAXIMUM: {
4174 // TODO: Does this quiet or return the origina NaN as-is?
4175 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4176 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4177 }
4178 case ISD::EXTRACT_VECTOR_ELT: {
4179 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4180 }
4181 default:
4182 if (Opcode >= ISD::BUILTIN_OP_END ||
4183 Opcode == ISD::INTRINSIC_WO_CHAIN ||
4184 Opcode == ISD::INTRINSIC_W_CHAIN ||
4185 Opcode == ISD::INTRINSIC_VOID) {
4186 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
4187 }
4188
4189 return false;
4190 }
4191 }
4192
isKnownNeverZeroFloat(SDValue Op) const4193 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
4194 assert(Op.getValueType().isFloatingPoint() &&
4195 "Floating point type expected");
4196
4197 // If the value is a constant, we can obviously see if it is a zero or not.
4198 // TODO: Add BuildVector support.
4199 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
4200 return !C->isZero();
4201 return false;
4202 }
4203
isKnownNeverZero(SDValue Op) const4204 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
4205 assert(!Op.getValueType().isFloatingPoint() &&
4206 "Floating point types unsupported - use isKnownNeverZeroFloat");
4207
4208 // If the value is a constant, we can obviously see if it is a zero or not.
4209 if (ISD::matchUnaryPredicate(
4210 Op, [](ConstantSDNode *C) { return !C->isNullValue(); }))
4211 return true;
4212
4213 // TODO: Recognize more cases here.
4214 switch (Op.getOpcode()) {
4215 default: break;
4216 case ISD::OR:
4217 if (isKnownNeverZero(Op.getOperand(1)) ||
4218 isKnownNeverZero(Op.getOperand(0)))
4219 return true;
4220 break;
4221 }
4222
4223 return false;
4224 }
4225
isEqualTo(SDValue A,SDValue B) const4226 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
4227 // Check the obvious case.
4228 if (A == B) return true;
4229
4230 // For for negative and positive zero.
4231 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
4232 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
4233 if (CA->isZero() && CB->isZero()) return true;
4234
4235 // Otherwise they may not be equal.
4236 return false;
4237 }
4238
4239 // FIXME: unify with llvm::haveNoCommonBitsSet.
4240 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
haveNoCommonBitsSet(SDValue A,SDValue B) const4241 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
4242 assert(A.getValueType() == B.getValueType() &&
4243 "Values must have the same type");
4244 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue();
4245 }
4246
FoldBUILD_VECTOR(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4247 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
4248 ArrayRef<SDValue> Ops,
4249 SelectionDAG &DAG) {
4250 int NumOps = Ops.size();
4251 assert(NumOps != 0 && "Can't build an empty vector!");
4252 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
4253 "Incorrect element count in BUILD_VECTOR!");
4254
4255 // BUILD_VECTOR of UNDEFs is UNDEF.
4256 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4257 return DAG.getUNDEF(VT);
4258
4259 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
4260 SDValue IdentitySrc;
4261 bool IsIdentity = true;
4262 for (int i = 0; i != NumOps; ++i) {
4263 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4264 Ops[i].getOperand(0).getValueType() != VT ||
4265 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4266 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4267 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4268 IsIdentity = false;
4269 break;
4270 }
4271 IdentitySrc = Ops[i].getOperand(0);
4272 }
4273 if (IsIdentity)
4274 return IdentitySrc;
4275
4276 return SDValue();
4277 }
4278
4279 /// Try to simplify vector concatenation to an input value, undef, or build
4280 /// vector.
foldCONCAT_VECTORS(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4281 static SDValue foldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4282 ArrayRef<SDValue> Ops,
4283 SelectionDAG &DAG) {
4284 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
4285 assert(llvm::all_of(Ops,
4286 [Ops](SDValue Op) {
4287 return Ops[0].getValueType() == Op.getValueType();
4288 }) &&
4289 "Concatenation of vectors with inconsistent value types!");
4290 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) ==
4291 VT.getVectorNumElements() &&
4292 "Incorrect element count in vector concatenation!");
4293
4294 if (Ops.size() == 1)
4295 return Ops[0];
4296
4297 // Concat of UNDEFs is UNDEF.
4298 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4299 return DAG.getUNDEF(VT);
4300
4301 // Scan the operands and look for extract operations from a single source
4302 // that correspond to insertion at the same location via this concatenation:
4303 // concat (extract X, 0*subvec_elts), (extract X, 1*subvec_elts), ...
4304 SDValue IdentitySrc;
4305 bool IsIdentity = true;
4306 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
4307 SDValue Op = Ops[i];
4308 unsigned IdentityIndex = i * Op.getValueType().getVectorNumElements();
4309 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
4310 Op.getOperand(0).getValueType() != VT ||
4311 (IdentitySrc && Op.getOperand(0) != IdentitySrc) ||
4312 !isa<ConstantSDNode>(Op.getOperand(1)) ||
4313 Op.getConstantOperandVal(1) != IdentityIndex) {
4314 IsIdentity = false;
4315 break;
4316 }
4317 assert((!IdentitySrc || IdentitySrc == Op.getOperand(0)) &&
4318 "Unexpected identity source vector for concat of extracts");
4319 IdentitySrc = Op.getOperand(0);
4320 }
4321 if (IsIdentity) {
4322 assert(IdentitySrc && "Failed to set source vector of extracts");
4323 return IdentitySrc;
4324 }
4325
4326 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4327 // simplified to one big BUILD_VECTOR.
4328 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4329 EVT SVT = VT.getScalarType();
4330 SmallVector<SDValue, 16> Elts;
4331 for (SDValue Op : Ops) {
4332 EVT OpVT = Op.getValueType();
4333 if (Op.isUndef())
4334 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4335 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4336 Elts.append(Op->op_begin(), Op->op_end());
4337 else
4338 return SDValue();
4339 }
4340
4341 // BUILD_VECTOR requires all inputs to be of the same type, find the
4342 // maximum type and extend them all.
4343 for (SDValue Op : Elts)
4344 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4345
4346 if (SVT.bitsGT(VT.getScalarType()))
4347 for (SDValue &Op : Elts)
4348 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4349 ? DAG.getZExtOrTrunc(Op, DL, SVT)
4350 : DAG.getSExtOrTrunc(Op, DL, SVT);
4351
4352 SDValue V = DAG.getBuildVector(VT, DL, Elts);
4353 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4354 return V;
4355 }
4356
4357 /// Gets or creates the specified node.
getNode(unsigned Opcode,const SDLoc & DL,EVT VT)4358 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4359 FoldingSetNodeID ID;
4360 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4361 void *IP = nullptr;
4362 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4363 return SDValue(E, 0);
4364
4365 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4366 getVTList(VT));
4367 CSEMap.InsertNode(N, IP);
4368
4369 InsertNode(N);
4370 SDValue V = SDValue(N, 0);
4371 NewSDValueDbgMsg(V, "Creating new node: ", this);
4372 return V;
4373 }
4374
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue Operand,const SDNodeFlags Flags)4375 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4376 SDValue Operand, const SDNodeFlags Flags) {
4377 // Constant fold unary operations with an integer constant operand. Even
4378 // opaque constant will be folded, because the folding of unary operations
4379 // doesn't create new constants with different values. Nevertheless, the
4380 // opaque flag is preserved during folding to prevent future folding with
4381 // other constants.
4382 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4383 const APInt &Val = C->getAPIntValue();
4384 switch (Opcode) {
4385 default: break;
4386 case ISD::SIGN_EXTEND:
4387 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4388 C->isTargetOpcode(), C->isOpaque());
4389 case ISD::TRUNCATE:
4390 if (C->isOpaque())
4391 break;
4392 LLVM_FALLTHROUGH;
4393 case ISD::ANY_EXTEND:
4394 case ISD::ZERO_EXTEND:
4395 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4396 C->isTargetOpcode(), C->isOpaque());
4397 case ISD::UINT_TO_FP:
4398 case ISD::SINT_TO_FP: {
4399 APFloat apf(EVTToAPFloatSemantics(VT),
4400 APInt::getNullValue(VT.getSizeInBits()));
4401 (void)apf.convertFromAPInt(Val,
4402 Opcode==ISD::SINT_TO_FP,
4403 APFloat::rmNearestTiesToEven);
4404 return getConstantFP(apf, DL, VT);
4405 }
4406 case ISD::BITCAST:
4407 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4408 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4409 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4410 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4411 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4412 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4413 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4414 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4415 break;
4416 case ISD::ABS:
4417 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4418 C->isOpaque());
4419 case ISD::BITREVERSE:
4420 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4421 C->isOpaque());
4422 case ISD::BSWAP:
4423 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4424 C->isOpaque());
4425 case ISD::CTPOP:
4426 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4427 C->isOpaque());
4428 case ISD::CTLZ:
4429 case ISD::CTLZ_ZERO_UNDEF:
4430 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4431 C->isOpaque());
4432 case ISD::CTTZ:
4433 case ISD::CTTZ_ZERO_UNDEF:
4434 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4435 C->isOpaque());
4436 case ISD::FP16_TO_FP: {
4437 bool Ignored;
4438 APFloat FPV(APFloat::IEEEhalf(),
4439 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4440
4441 // This can return overflow, underflow, or inexact; we don't care.
4442 // FIXME need to be more flexible about rounding mode.
4443 (void)FPV.convert(EVTToAPFloatSemantics(VT),
4444 APFloat::rmNearestTiesToEven, &Ignored);
4445 return getConstantFP(FPV, DL, VT);
4446 }
4447 }
4448 }
4449
4450 // Constant fold unary operations with a floating point constant operand.
4451 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4452 APFloat V = C->getValueAPF(); // make copy
4453 switch (Opcode) {
4454 case ISD::FNEG:
4455 V.changeSign();
4456 return getConstantFP(V, DL, VT);
4457 case ISD::FABS:
4458 V.clearSign();
4459 return getConstantFP(V, DL, VT);
4460 case ISD::FCEIL: {
4461 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4462 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4463 return getConstantFP(V, DL, VT);
4464 break;
4465 }
4466 case ISD::FTRUNC: {
4467 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4468 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4469 return getConstantFP(V, DL, VT);
4470 break;
4471 }
4472 case ISD::FFLOOR: {
4473 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4474 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4475 return getConstantFP(V, DL, VT);
4476 break;
4477 }
4478 case ISD::FP_EXTEND: {
4479 bool ignored;
4480 // This can return overflow, underflow, or inexact; we don't care.
4481 // FIXME need to be more flexible about rounding mode.
4482 (void)V.convert(EVTToAPFloatSemantics(VT),
4483 APFloat::rmNearestTiesToEven, &ignored);
4484 return getConstantFP(V, DL, VT);
4485 }
4486 case ISD::FP_TO_SINT:
4487 case ISD::FP_TO_UINT: {
4488 bool ignored;
4489 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4490 // FIXME need to be more flexible about rounding mode.
4491 APFloat::opStatus s =
4492 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4493 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4494 break;
4495 return getConstant(IntVal, DL, VT);
4496 }
4497 case ISD::BITCAST:
4498 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4499 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4500 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4501 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4502 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4503 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4504 break;
4505 case ISD::FP_TO_FP16: {
4506 bool Ignored;
4507 // This can return overflow, underflow, or inexact; we don't care.
4508 // FIXME need to be more flexible about rounding mode.
4509 (void)V.convert(APFloat::IEEEhalf(),
4510 APFloat::rmNearestTiesToEven, &Ignored);
4511 return getConstant(V.bitcastToAPInt(), DL, VT);
4512 }
4513 }
4514 }
4515
4516 // Constant fold unary operations with a vector integer or float operand.
4517 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
4518 if (BV->isConstant()) {
4519 switch (Opcode) {
4520 default:
4521 // FIXME: Entirely reasonable to perform folding of other unary
4522 // operations here as the need arises.
4523 break;
4524 case ISD::FNEG:
4525 case ISD::FABS:
4526 case ISD::FCEIL:
4527 case ISD::FTRUNC:
4528 case ISD::FFLOOR:
4529 case ISD::FP_EXTEND:
4530 case ISD::FP_TO_SINT:
4531 case ISD::FP_TO_UINT:
4532 case ISD::TRUNCATE:
4533 case ISD::ANY_EXTEND:
4534 case ISD::ZERO_EXTEND:
4535 case ISD::SIGN_EXTEND:
4536 case ISD::UINT_TO_FP:
4537 case ISD::SINT_TO_FP:
4538 case ISD::ABS:
4539 case ISD::BITREVERSE:
4540 case ISD::BSWAP:
4541 case ISD::CTLZ:
4542 case ISD::CTLZ_ZERO_UNDEF:
4543 case ISD::CTTZ:
4544 case ISD::CTTZ_ZERO_UNDEF:
4545 case ISD::CTPOP: {
4546 SDValue Ops = { Operand };
4547 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4548 return Fold;
4549 }
4550 }
4551 }
4552 }
4553
4554 unsigned OpOpcode = Operand.getNode()->getOpcode();
4555 switch (Opcode) {
4556 case ISD::TokenFactor:
4557 case ISD::MERGE_VALUES:
4558 case ISD::CONCAT_VECTORS:
4559 return Operand; // Factor, merge or concat of one node? No need.
4560 case ISD::BUILD_VECTOR: {
4561 // Attempt to simplify BUILD_VECTOR.
4562 SDValue Ops[] = {Operand};
4563 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4564 return V;
4565 break;
4566 }
4567 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
4568 case ISD::FP_EXTEND:
4569 assert(VT.isFloatingPoint() &&
4570 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
4571 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4572 assert((!VT.isVector() ||
4573 VT.getVectorNumElements() ==
4574 Operand.getValueType().getVectorNumElements()) &&
4575 "Vector element count mismatch!");
4576 assert(Operand.getValueType().bitsLT(VT) &&
4577 "Invalid fpext node, dst < src!");
4578 if (Operand.isUndef())
4579 return getUNDEF(VT);
4580 break;
4581 case ISD::FP_TO_SINT:
4582 case ISD::FP_TO_UINT:
4583 if (Operand.isUndef())
4584 return getUNDEF(VT);
4585 break;
4586 case ISD::SINT_TO_FP:
4587 case ISD::UINT_TO_FP:
4588 // [us]itofp(undef) = 0, because the result value is bounded.
4589 if (Operand.isUndef())
4590 return getConstantFP(0.0, DL, VT);
4591 break;
4592 case ISD::SIGN_EXTEND:
4593 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4594 "Invalid SIGN_EXTEND!");
4595 assert(VT.isVector() == Operand.getValueType().isVector() &&
4596 "SIGN_EXTEND result type type should be vector iff the operand "
4597 "type is vector!");
4598 if (Operand.getValueType() == VT) return Operand; // noop extension
4599 assert((!VT.isVector() ||
4600 VT.getVectorNumElements() ==
4601 Operand.getValueType().getVectorNumElements()) &&
4602 "Vector element count mismatch!");
4603 assert(Operand.getValueType().bitsLT(VT) &&
4604 "Invalid sext node, dst < src!");
4605 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4606 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4607 else if (OpOpcode == ISD::UNDEF)
4608 // sext(undef) = 0, because the top bits will all be the same.
4609 return getConstant(0, DL, VT);
4610 break;
4611 case ISD::ZERO_EXTEND:
4612 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4613 "Invalid ZERO_EXTEND!");
4614 assert(VT.isVector() == Operand.getValueType().isVector() &&
4615 "ZERO_EXTEND result type type should be vector iff the operand "
4616 "type is vector!");
4617 if (Operand.getValueType() == VT) return Operand; // noop extension
4618 assert((!VT.isVector() ||
4619 VT.getVectorNumElements() ==
4620 Operand.getValueType().getVectorNumElements()) &&
4621 "Vector element count mismatch!");
4622 assert(Operand.getValueType().bitsLT(VT) &&
4623 "Invalid zext node, dst < src!");
4624 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
4625 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4626 else if (OpOpcode == ISD::UNDEF)
4627 // zext(undef) = 0, because the top bits will be zero.
4628 return getConstant(0, DL, VT);
4629 break;
4630 case ISD::ANY_EXTEND:
4631 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4632 "Invalid ANY_EXTEND!");
4633 assert(VT.isVector() == Operand.getValueType().isVector() &&
4634 "ANY_EXTEND result type type should be vector iff the operand "
4635 "type is vector!");
4636 if (Operand.getValueType() == VT) return Operand; // noop extension
4637 assert((!VT.isVector() ||
4638 VT.getVectorNumElements() ==
4639 Operand.getValueType().getVectorNumElements()) &&
4640 "Vector element count mismatch!");
4641 assert(Operand.getValueType().bitsLT(VT) &&
4642 "Invalid anyext node, dst < src!");
4643
4644 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4645 OpOpcode == ISD::ANY_EXTEND)
4646 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4647 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4648 else if (OpOpcode == ISD::UNDEF)
4649 return getUNDEF(VT);
4650
4651 // (ext (trunc x)) -> x
4652 if (OpOpcode == ISD::TRUNCATE) {
4653 SDValue OpOp = Operand.getOperand(0);
4654 if (OpOp.getValueType() == VT) {
4655 transferDbgValues(Operand, OpOp);
4656 return OpOp;
4657 }
4658 }
4659 break;
4660 case ISD::TRUNCATE:
4661 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4662 "Invalid TRUNCATE!");
4663 assert(VT.isVector() == Operand.getValueType().isVector() &&
4664 "TRUNCATE result type type should be vector iff the operand "
4665 "type is vector!");
4666 if (Operand.getValueType() == VT) return Operand; // noop truncate
4667 assert((!VT.isVector() ||
4668 VT.getVectorNumElements() ==
4669 Operand.getValueType().getVectorNumElements()) &&
4670 "Vector element count mismatch!");
4671 assert(Operand.getValueType().bitsGT(VT) &&
4672 "Invalid truncate node, src < dst!");
4673 if (OpOpcode == ISD::TRUNCATE)
4674 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4675 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4676 OpOpcode == ISD::ANY_EXTEND) {
4677 // If the source is smaller than the dest, we still need an extend.
4678 if (Operand.getOperand(0).getValueType().getScalarType()
4679 .bitsLT(VT.getScalarType()))
4680 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4681 if (Operand.getOperand(0).getValueType().bitsGT(VT))
4682 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4683 return Operand.getOperand(0);
4684 }
4685 if (OpOpcode == ISD::UNDEF)
4686 return getUNDEF(VT);
4687 break;
4688 case ISD::ANY_EXTEND_VECTOR_INREG:
4689 case ISD::ZERO_EXTEND_VECTOR_INREG:
4690 case ISD::SIGN_EXTEND_VECTOR_INREG:
4691 assert(VT.isVector() && "This DAG node is restricted to vector types.");
4692 assert(Operand.getValueType().bitsLE(VT) &&
4693 "The input must be the same size or smaller than the result.");
4694 assert(VT.getVectorNumElements() <
4695 Operand.getValueType().getVectorNumElements() &&
4696 "The destination vector type must have fewer lanes than the input.");
4697 break;
4698 case ISD::ABS:
4699 assert(VT.isInteger() && VT == Operand.getValueType() &&
4700 "Invalid ABS!");
4701 if (OpOpcode == ISD::UNDEF)
4702 return getUNDEF(VT);
4703 break;
4704 case ISD::BSWAP:
4705 assert(VT.isInteger() && VT == Operand.getValueType() &&
4706 "Invalid BSWAP!");
4707 assert((VT.getScalarSizeInBits() % 16 == 0) &&
4708 "BSWAP types must be a multiple of 16 bits!");
4709 if (OpOpcode == ISD::UNDEF)
4710 return getUNDEF(VT);
4711 break;
4712 case ISD::BITREVERSE:
4713 assert(VT.isInteger() && VT == Operand.getValueType() &&
4714 "Invalid BITREVERSE!");
4715 if (OpOpcode == ISD::UNDEF)
4716 return getUNDEF(VT);
4717 break;
4718 case ISD::BITCAST:
4719 // Basic sanity checking.
4720 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
4721 "Cannot BITCAST between types of different sizes!");
4722 if (VT == Operand.getValueType()) return Operand; // noop conversion.
4723 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
4724 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
4725 if (OpOpcode == ISD::UNDEF)
4726 return getUNDEF(VT);
4727 break;
4728 case ISD::SCALAR_TO_VECTOR:
4729 assert(VT.isVector() && !Operand.getValueType().isVector() &&
4730 (VT.getVectorElementType() == Operand.getValueType() ||
4731 (VT.getVectorElementType().isInteger() &&
4732 Operand.getValueType().isInteger() &&
4733 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
4734 "Illegal SCALAR_TO_VECTOR node!");
4735 if (OpOpcode == ISD::UNDEF)
4736 return getUNDEF(VT);
4737 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
4738 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
4739 isa<ConstantSDNode>(Operand.getOperand(1)) &&
4740 Operand.getConstantOperandVal(1) == 0 &&
4741 Operand.getOperand(0).getValueType() == VT)
4742 return Operand.getOperand(0);
4743 break;
4744 case ISD::FNEG:
4745 // Negation of an unknown bag of bits is still completely undefined.
4746 if (OpOpcode == ISD::UNDEF)
4747 return getUNDEF(VT);
4748
4749 if (OpOpcode == ISD::FNEG) // --X -> X
4750 return Operand.getOperand(0);
4751 break;
4752 case ISD::FABS:
4753 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
4754 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
4755 break;
4756 }
4757
4758 SDNode *N;
4759 SDVTList VTs = getVTList(VT);
4760 SDValue Ops[] = {Operand};
4761 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
4762 FoldingSetNodeID ID;
4763 AddNodeIDNode(ID, Opcode, VTs, Ops);
4764 void *IP = nullptr;
4765 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4766 E->intersectFlagsWith(Flags);
4767 return SDValue(E, 0);
4768 }
4769
4770 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4771 N->setFlags(Flags);
4772 createOperands(N, Ops);
4773 CSEMap.InsertNode(N, IP);
4774 } else {
4775 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4776 createOperands(N, Ops);
4777 }
4778
4779 InsertNode(N);
4780 SDValue V = SDValue(N, 0);
4781 NewSDValueDbgMsg(V, "Creating new node: ", this);
4782 return V;
4783 }
4784
FoldValue(unsigned Opcode,const APInt & C1,const APInt & C2)4785 static llvm::Optional<APInt> FoldValue(unsigned Opcode, const APInt &C1,
4786 const APInt &C2) {
4787 switch (Opcode) {
4788 case ISD::ADD: return C1 + C2;
4789 case ISD::SUB: return C1 - C2;
4790 case ISD::MUL: return C1 * C2;
4791 case ISD::AND: return C1 & C2;
4792 case ISD::OR: return C1 | C2;
4793 case ISD::XOR: return C1 ^ C2;
4794 case ISD::SHL: return C1 << C2;
4795 case ISD::SRL: return C1.lshr(C2);
4796 case ISD::SRA: return C1.ashr(C2);
4797 case ISD::ROTL: return C1.rotl(C2);
4798 case ISD::ROTR: return C1.rotr(C2);
4799 case ISD::SMIN: return C1.sle(C2) ? C1 : C2;
4800 case ISD::SMAX: return C1.sge(C2) ? C1 : C2;
4801 case ISD::UMIN: return C1.ule(C2) ? C1 : C2;
4802 case ISD::UMAX: return C1.uge(C2) ? C1 : C2;
4803 case ISD::SADDSAT: return C1.sadd_sat(C2);
4804 case ISD::UADDSAT: return C1.uadd_sat(C2);
4805 case ISD::SSUBSAT: return C1.ssub_sat(C2);
4806 case ISD::USUBSAT: return C1.usub_sat(C2);
4807 case ISD::UDIV:
4808 if (!C2.getBoolValue())
4809 break;
4810 return C1.udiv(C2);
4811 case ISD::UREM:
4812 if (!C2.getBoolValue())
4813 break;
4814 return C1.urem(C2);
4815 case ISD::SDIV:
4816 if (!C2.getBoolValue())
4817 break;
4818 return C1.sdiv(C2);
4819 case ISD::SREM:
4820 if (!C2.getBoolValue())
4821 break;
4822 return C1.srem(C2);
4823 }
4824 return llvm::None;
4825 }
4826
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,const ConstantSDNode * C1,const ConstantSDNode * C2)4827 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4828 EVT VT, const ConstantSDNode *C1,
4829 const ConstantSDNode *C2) {
4830 if (C1->isOpaque() || C2->isOpaque())
4831 return SDValue();
4832 if (Optional<APInt> Folded =
4833 FoldValue(Opcode, C1->getAPIntValue(), C2->getAPIntValue()))
4834 return getConstant(Folded.getValue(), DL, VT);
4835 return SDValue();
4836 }
4837
FoldSymbolOffset(unsigned Opcode,EVT VT,const GlobalAddressSDNode * GA,const SDNode * N2)4838 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
4839 const GlobalAddressSDNode *GA,
4840 const SDNode *N2) {
4841 if (GA->getOpcode() != ISD::GlobalAddress)
4842 return SDValue();
4843 if (!TLI->isOffsetFoldingLegal(GA))
4844 return SDValue();
4845 auto *C2 = dyn_cast<ConstantSDNode>(N2);
4846 if (!C2)
4847 return SDValue();
4848 int64_t Offset = C2->getSExtValue();
4849 switch (Opcode) {
4850 case ISD::ADD: break;
4851 case ISD::SUB: Offset = -uint64_t(Offset); break;
4852 default: return SDValue();
4853 }
4854 return getGlobalAddress(GA->getGlobal(), SDLoc(C2), VT,
4855 GA->getOffset() + uint64_t(Offset));
4856 }
4857
isUndef(unsigned Opcode,ArrayRef<SDValue> Ops)4858 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
4859 switch (Opcode) {
4860 case ISD::SDIV:
4861 case ISD::UDIV:
4862 case ISD::SREM:
4863 case ISD::UREM: {
4864 // If a divisor is zero/undef or any element of a divisor vector is
4865 // zero/undef, the whole op is undef.
4866 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
4867 SDValue Divisor = Ops[1];
4868 if (Divisor.isUndef() || isNullConstant(Divisor))
4869 return true;
4870
4871 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
4872 llvm::any_of(Divisor->op_values(),
4873 [](SDValue V) { return V.isUndef() ||
4874 isNullConstant(V); });
4875 // TODO: Handle signed overflow.
4876 }
4877 // TODO: Handle oversized shifts.
4878 default:
4879 return false;
4880 }
4881 }
4882
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,SDNode * N1,SDNode * N2)4883 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4884 EVT VT, SDNode *N1, SDNode *N2) {
4885 // If the opcode is a target-specific ISD node, there's nothing we can
4886 // do here and the operand rules may not line up with the below, so
4887 // bail early.
4888 if (Opcode >= ISD::BUILTIN_OP_END)
4889 return SDValue();
4890
4891 if (isUndef(Opcode, {SDValue(N1, 0), SDValue(N2, 0)}))
4892 return getUNDEF(VT);
4893
4894 // Handle the case of two scalars.
4895 if (auto *C1 = dyn_cast<ConstantSDNode>(N1)) {
4896 if (auto *C2 = dyn_cast<ConstantSDNode>(N2)) {
4897 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, C1, C2);
4898 assert((!Folded || !VT.isVector()) &&
4899 "Can't fold vectors ops with scalar operands");
4900 return Folded;
4901 }
4902 }
4903
4904 // fold (add Sym, c) -> Sym+c
4905 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N1))
4906 return FoldSymbolOffset(Opcode, VT, GA, N2);
4907 if (TLI->isCommutativeBinOp(Opcode))
4908 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N2))
4909 return FoldSymbolOffset(Opcode, VT, GA, N1);
4910
4911 // For vectors, extract each constant element and fold them individually.
4912 // Either input may be an undef value.
4913 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
4914 if (!BV1 && !N1->isUndef())
4915 return SDValue();
4916 auto *BV2 = dyn_cast<BuildVectorSDNode>(N2);
4917 if (!BV2 && !N2->isUndef())
4918 return SDValue();
4919 // If both operands are undef, that's handled the same way as scalars.
4920 if (!BV1 && !BV2)
4921 return SDValue();
4922
4923 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) &&
4924 "Vector binop with different number of elements in operands?");
4925
4926 EVT SVT = VT.getScalarType();
4927 EVT LegalSVT = SVT;
4928 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4929 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4930 if (LegalSVT.bitsLT(SVT))
4931 return SDValue();
4932 }
4933 SmallVector<SDValue, 4> Outputs;
4934 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands();
4935 for (unsigned I = 0; I != NumOps; ++I) {
4936 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT);
4937 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT);
4938 if (SVT.isInteger()) {
4939 if (V1->getValueType(0).bitsGT(SVT))
4940 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
4941 if (V2->getValueType(0).bitsGT(SVT))
4942 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
4943 }
4944
4945 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
4946 return SDValue();
4947
4948 // Fold one vector element.
4949 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
4950 if (LegalSVT != SVT)
4951 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4952
4953 // Scalar folding only succeeded if the result is a constant or UNDEF.
4954 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4955 ScalarResult.getOpcode() != ISD::ConstantFP)
4956 return SDValue();
4957 Outputs.push_back(ScalarResult);
4958 }
4959
4960 assert(VT.getVectorNumElements() == Outputs.size() &&
4961 "Vector size mismatch!");
4962
4963 // We may have a vector type but a scalar result. Create a splat.
4964 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
4965
4966 // Build a big vector out of the scalar elements we generated.
4967 return getBuildVector(VT, SDLoc(), Outputs);
4968 }
4969
4970 // TODO: Merge with FoldConstantArithmetic
FoldConstantVectorArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)4971 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
4972 const SDLoc &DL, EVT VT,
4973 ArrayRef<SDValue> Ops,
4974 const SDNodeFlags Flags) {
4975 // If the opcode is a target-specific ISD node, there's nothing we can
4976 // do here and the operand rules may not line up with the below, so
4977 // bail early.
4978 if (Opcode >= ISD::BUILTIN_OP_END)
4979 return SDValue();
4980
4981 if (isUndef(Opcode, Ops))
4982 return getUNDEF(VT);
4983
4984 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
4985 if (!VT.isVector())
4986 return SDValue();
4987
4988 unsigned NumElts = VT.getVectorNumElements();
4989
4990 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
4991 return !Op.getValueType().isVector() ||
4992 Op.getValueType().getVectorNumElements() == NumElts;
4993 };
4994
4995 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
4996 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
4997 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
4998 (BV && BV->isConstant());
4999 };
5000
5001 // All operands must be vector types with the same number of elements as
5002 // the result type and must be either UNDEF or a build vector of constant
5003 // or UNDEF scalars.
5004 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
5005 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
5006 return SDValue();
5007
5008 // If we are comparing vectors, then the result needs to be a i1 boolean
5009 // that is then sign-extended back to the legal result type.
5010 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
5011
5012 // Find legal integer scalar type for constant promotion and
5013 // ensure that its scalar size is at least as large as source.
5014 EVT LegalSVT = VT.getScalarType();
5015 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
5016 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
5017 if (LegalSVT.bitsLT(VT.getScalarType()))
5018 return SDValue();
5019 }
5020
5021 // Constant fold each scalar lane separately.
5022 SmallVector<SDValue, 4> ScalarResults;
5023 for (unsigned i = 0; i != NumElts; i++) {
5024 SmallVector<SDValue, 4> ScalarOps;
5025 for (SDValue Op : Ops) {
5026 EVT InSVT = Op.getValueType().getScalarType();
5027 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
5028 if (!InBV) {
5029 // We've checked that this is UNDEF or a constant of some kind.
5030 if (Op.isUndef())
5031 ScalarOps.push_back(getUNDEF(InSVT));
5032 else
5033 ScalarOps.push_back(Op);
5034 continue;
5035 }
5036
5037 SDValue ScalarOp = InBV->getOperand(i);
5038 EVT ScalarVT = ScalarOp.getValueType();
5039
5040 // Build vector (integer) scalar operands may need implicit
5041 // truncation - do this before constant folding.
5042 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
5043 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
5044
5045 ScalarOps.push_back(ScalarOp);
5046 }
5047
5048 // Constant fold the scalar operands.
5049 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
5050
5051 // Legalize the (integer) scalar constant if necessary.
5052 if (LegalSVT != SVT)
5053 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
5054
5055 // Scalar folding only succeeded if the result is a constant or UNDEF.
5056 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
5057 ScalarResult.getOpcode() != ISD::ConstantFP)
5058 return SDValue();
5059 ScalarResults.push_back(ScalarResult);
5060 }
5061
5062 SDValue V = getBuildVector(VT, DL, ScalarResults);
5063 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
5064 return V;
5065 }
5066
foldConstantFPMath(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2)5067 SDValue SelectionDAG::foldConstantFPMath(unsigned Opcode, const SDLoc &DL,
5068 EVT VT, SDValue N1, SDValue N2) {
5069 // TODO: We don't do any constant folding for strict FP opcodes here, but we
5070 // should. That will require dealing with a potentially non-default
5071 // rounding mode, checking the "opStatus" return value from the APFloat
5072 // math calculations, and possibly other variations.
5073 auto *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
5074 auto *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
5075 if (N1CFP && N2CFP) {
5076 APFloat C1 = N1CFP->getValueAPF(), C2 = N2CFP->getValueAPF();
5077 switch (Opcode) {
5078 case ISD::FADD:
5079 C1.add(C2, APFloat::rmNearestTiesToEven);
5080 return getConstantFP(C1, DL, VT);
5081 case ISD::FSUB:
5082 C1.subtract(C2, APFloat::rmNearestTiesToEven);
5083 return getConstantFP(C1, DL, VT);
5084 case ISD::FMUL:
5085 C1.multiply(C2, APFloat::rmNearestTiesToEven);
5086 return getConstantFP(C1, DL, VT);
5087 case ISD::FDIV:
5088 C1.divide(C2, APFloat::rmNearestTiesToEven);
5089 return getConstantFP(C1, DL, VT);
5090 case ISD::FREM:
5091 C1.mod(C2);
5092 return getConstantFP(C1, DL, VT);
5093 case ISD::FCOPYSIGN:
5094 C1.copySign(C2);
5095 return getConstantFP(C1, DL, VT);
5096 default: break;
5097 }
5098 }
5099 if (N1CFP && Opcode == ISD::FP_ROUND) {
5100 APFloat C1 = N1CFP->getValueAPF(); // make copy
5101 bool Unused;
5102 // This can return overflow, underflow, or inexact; we don't care.
5103 // FIXME need to be more flexible about rounding mode.
5104 (void) C1.convert(EVTToAPFloatSemantics(VT), APFloat::rmNearestTiesToEven,
5105 &Unused);
5106 return getConstantFP(C1, DL, VT);
5107 }
5108
5109 switch (Opcode) {
5110 case ISD::FADD:
5111 case ISD::FSUB:
5112 case ISD::FMUL:
5113 case ISD::FDIV:
5114 case ISD::FREM:
5115 // If both operands are undef, the result is undef. If 1 operand is undef,
5116 // the result is NaN. This should match the behavior of the IR optimizer.
5117 if (N1.isUndef() && N2.isUndef())
5118 return getUNDEF(VT);
5119 if (N1.isUndef() || N2.isUndef())
5120 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5121 }
5122 return SDValue();
5123 }
5124
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,const SDNodeFlags Flags)5125 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5126 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
5127 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
5128 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
5129 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5130 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5131
5132 // Canonicalize constant to RHS if commutative.
5133 if (TLI->isCommutativeBinOp(Opcode)) {
5134 if (N1C && !N2C) {
5135 std::swap(N1C, N2C);
5136 std::swap(N1, N2);
5137 } else if (N1CFP && !N2CFP) {
5138 std::swap(N1CFP, N2CFP);
5139 std::swap(N1, N2);
5140 }
5141 }
5142
5143 switch (Opcode) {
5144 default: break;
5145 case ISD::TokenFactor:
5146 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
5147 N2.getValueType() == MVT::Other && "Invalid token factor!");
5148 // Fold trivial token factors.
5149 if (N1.getOpcode() == ISD::EntryToken) return N2;
5150 if (N2.getOpcode() == ISD::EntryToken) return N1;
5151 if (N1 == N2) return N1;
5152 break;
5153 case ISD::BUILD_VECTOR: {
5154 // Attempt to simplify BUILD_VECTOR.
5155 SDValue Ops[] = {N1, N2};
5156 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5157 return V;
5158 break;
5159 }
5160 case ISD::CONCAT_VECTORS: {
5161 SDValue Ops[] = {N1, N2};
5162 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5163 return V;
5164 break;
5165 }
5166 case ISD::AND:
5167 assert(VT.isInteger() && "This operator does not apply to FP types!");
5168 assert(N1.getValueType() == N2.getValueType() &&
5169 N1.getValueType() == VT && "Binary operator types must match!");
5170 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
5171 // worth handling here.
5172 if (N2C && N2C->isNullValue())
5173 return N2;
5174 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
5175 return N1;
5176 break;
5177 case ISD::OR:
5178 case ISD::XOR:
5179 case ISD::ADD:
5180 case ISD::SUB:
5181 assert(VT.isInteger() && "This operator does not apply to FP types!");
5182 assert(N1.getValueType() == N2.getValueType() &&
5183 N1.getValueType() == VT && "Binary operator types must match!");
5184 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
5185 // it's worth handling here.
5186 if (N2C && N2C->isNullValue())
5187 return N1;
5188 break;
5189 case ISD::UDIV:
5190 case ISD::UREM:
5191 case ISD::MULHU:
5192 case ISD::MULHS:
5193 case ISD::MUL:
5194 case ISD::SDIV:
5195 case ISD::SREM:
5196 case ISD::SMIN:
5197 case ISD::SMAX:
5198 case ISD::UMIN:
5199 case ISD::UMAX:
5200 case ISD::SADDSAT:
5201 case ISD::SSUBSAT:
5202 case ISD::UADDSAT:
5203 case ISD::USUBSAT:
5204 assert(VT.isInteger() && "This operator does not apply to FP types!");
5205 assert(N1.getValueType() == N2.getValueType() &&
5206 N1.getValueType() == VT && "Binary operator types must match!");
5207 break;
5208 case ISD::FADD:
5209 case ISD::FSUB:
5210 case ISD::FMUL:
5211 case ISD::FDIV:
5212 case ISD::FREM:
5213 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5214 assert(N1.getValueType() == N2.getValueType() &&
5215 N1.getValueType() == VT && "Binary operator types must match!");
5216 if (SDValue V = simplifyFPBinop(Opcode, N1, N2))
5217 return V;
5218 break;
5219 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
5220 assert(N1.getValueType() == VT &&
5221 N1.getValueType().isFloatingPoint() &&
5222 N2.getValueType().isFloatingPoint() &&
5223 "Invalid FCOPYSIGN!");
5224 break;
5225 case ISD::SHL:
5226 case ISD::SRA:
5227 case ISD::SRL:
5228 if (SDValue V = simplifyShift(N1, N2))
5229 return V;
5230 LLVM_FALLTHROUGH;
5231 case ISD::ROTL:
5232 case ISD::ROTR:
5233 assert(VT == N1.getValueType() &&
5234 "Shift operators return type must be the same as their first arg");
5235 assert(VT.isInteger() && N2.getValueType().isInteger() &&
5236 "Shifts only work on integers");
5237 assert((!VT.isVector() || VT == N2.getValueType()) &&
5238 "Vector shift amounts must be in the same as their first arg");
5239 // Verify that the shift amount VT is big enough to hold valid shift
5240 // amounts. This catches things like trying to shift an i1024 value by an
5241 // i8, which is easy to fall into in generic code that uses
5242 // TLI.getShiftAmount().
5243 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&
5244 "Invalid use of small shift amount with oversized value!");
5245
5246 // Always fold shifts of i1 values so the code generator doesn't need to
5247 // handle them. Since we know the size of the shift has to be less than the
5248 // size of the value, the shift/rotate count is guaranteed to be zero.
5249 if (VT == MVT::i1)
5250 return N1;
5251 if (N2C && N2C->isNullValue())
5252 return N1;
5253 break;
5254 case ISD::FP_ROUND:
5255 assert(VT.isFloatingPoint() &&
5256 N1.getValueType().isFloatingPoint() &&
5257 VT.bitsLE(N1.getValueType()) &&
5258 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
5259 "Invalid FP_ROUND!");
5260 if (N1.getValueType() == VT) return N1; // noop conversion.
5261 break;
5262 case ISD::AssertSext:
5263 case ISD::AssertZext: {
5264 EVT EVT = cast<VTSDNode>(N2)->getVT();
5265 assert(VT == N1.getValueType() && "Not an inreg extend!");
5266 assert(VT.isInteger() && EVT.isInteger() &&
5267 "Cannot *_EXTEND_INREG FP types");
5268 assert(!EVT.isVector() &&
5269 "AssertSExt/AssertZExt type should be the vector element type "
5270 "rather than the vector type!");
5271 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
5272 if (VT.getScalarType() == EVT) return N1; // noop assertion.
5273 break;
5274 }
5275 case ISD::SIGN_EXTEND_INREG: {
5276 EVT EVT = cast<VTSDNode>(N2)->getVT();
5277 assert(VT == N1.getValueType() && "Not an inreg extend!");
5278 assert(VT.isInteger() && EVT.isInteger() &&
5279 "Cannot *_EXTEND_INREG FP types");
5280 assert(EVT.isVector() == VT.isVector() &&
5281 "SIGN_EXTEND_INREG type should be vector iff the operand "
5282 "type is vector!");
5283 assert((!EVT.isVector() ||
5284 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
5285 "Vector element counts must match in SIGN_EXTEND_INREG");
5286 assert(EVT.bitsLE(VT) && "Not extending!");
5287 if (EVT == VT) return N1; // Not actually extending
5288
5289 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
5290 unsigned FromBits = EVT.getScalarSizeInBits();
5291 Val <<= Val.getBitWidth() - FromBits;
5292 Val.ashrInPlace(Val.getBitWidth() - FromBits);
5293 return getConstant(Val, DL, ConstantVT);
5294 };
5295
5296 if (N1C) {
5297 const APInt &Val = N1C->getAPIntValue();
5298 return SignExtendInReg(Val, VT);
5299 }
5300 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
5301 SmallVector<SDValue, 8> Ops;
5302 llvm::EVT OpVT = N1.getOperand(0).getValueType();
5303 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
5304 SDValue Op = N1.getOperand(i);
5305 if (Op.isUndef()) {
5306 Ops.push_back(getUNDEF(OpVT));
5307 continue;
5308 }
5309 ConstantSDNode *C = cast<ConstantSDNode>(Op);
5310 APInt Val = C->getAPIntValue();
5311 Ops.push_back(SignExtendInReg(Val, OpVT));
5312 }
5313 return getBuildVector(VT, DL, Ops);
5314 }
5315 break;
5316 }
5317 case ISD::EXTRACT_VECTOR_ELT:
5318 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
5319 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
5320 element type of the vector.");
5321
5322 // Extract from an undefined value or using an undefined index is undefined.
5323 if (N1.isUndef() || N2.isUndef())
5324 return getUNDEF(VT);
5325
5326 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
5327 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
5328 return getUNDEF(VT);
5329
5330 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
5331 // expanding copies of large vectors from registers.
5332 if (N2C &&
5333 N1.getOpcode() == ISD::CONCAT_VECTORS &&
5334 N1.getNumOperands() > 0) {
5335 unsigned Factor =
5336 N1.getOperand(0).getValueType().getVectorNumElements();
5337 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
5338 N1.getOperand(N2C->getZExtValue() / Factor),
5339 getConstant(N2C->getZExtValue() % Factor, DL,
5340 N2.getValueType()));
5341 }
5342
5343 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
5344 // expanding large vector constants.
5345 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
5346 SDValue Elt = N1.getOperand(N2C->getZExtValue());
5347
5348 if (VT != Elt.getValueType())
5349 // If the vector element type is not legal, the BUILD_VECTOR operands
5350 // are promoted and implicitly truncated, and the result implicitly
5351 // extended. Make that explicit here.
5352 Elt = getAnyExtOrTrunc(Elt, DL, VT);
5353
5354 return Elt;
5355 }
5356
5357 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5358 // operations are lowered to scalars.
5359 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
5360 // If the indices are the same, return the inserted element else
5361 // if the indices are known different, extract the element from
5362 // the original vector.
5363 SDValue N1Op2 = N1.getOperand(2);
5364 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
5365
5366 if (N1Op2C && N2C) {
5367 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
5368 if (VT == N1.getOperand(1).getValueType())
5369 return N1.getOperand(1);
5370 else
5371 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
5372 }
5373
5374 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
5375 }
5376 }
5377
5378 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5379 // when vector types are scalarized and v1iX is legal.
5380 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx)
5381 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5382 N1.getValueType().getVectorNumElements() == 1) {
5383 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
5384 N1.getOperand(1));
5385 }
5386 break;
5387 case ISD::EXTRACT_ELEMENT:
5388 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
5389 assert(!N1.getValueType().isVector() && !VT.isVector() &&
5390 (N1.getValueType().isInteger() == VT.isInteger()) &&
5391 N1.getValueType() != VT &&
5392 "Wrong types for EXTRACT_ELEMENT!");
5393
5394 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5395 // 64-bit integers into 32-bit parts. Instead of building the extract of
5396 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5397 if (N1.getOpcode() == ISD::BUILD_PAIR)
5398 return N1.getOperand(N2C->getZExtValue());
5399
5400 // EXTRACT_ELEMENT of a constant int is also very common.
5401 if (N1C) {
5402 unsigned ElementSize = VT.getSizeInBits();
5403 unsigned Shift = ElementSize * N2C->getZExtValue();
5404 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
5405 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
5406 }
5407 break;
5408 case ISD::EXTRACT_SUBVECTOR:
5409 if (VT.isSimple() && N1.getValueType().isSimple()) {
5410 assert(VT.isVector() && N1.getValueType().isVector() &&
5411 "Extract subvector VTs must be a vectors!");
5412 assert(VT.getVectorElementType() ==
5413 N1.getValueType().getVectorElementType() &&
5414 "Extract subvector VTs must have the same element type!");
5415 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
5416 "Extract subvector must be from larger vector to smaller vector!");
5417
5418 if (N2C) {
5419 assert((VT.getVectorNumElements() + N2C->getZExtValue()
5420 <= N1.getValueType().getVectorNumElements())
5421 && "Extract subvector overflow!");
5422 }
5423
5424 // Trivial extraction.
5425 if (VT.getSimpleVT() == N1.getSimpleValueType())
5426 return N1;
5427
5428 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5429 if (N1.isUndef())
5430 return getUNDEF(VT);
5431
5432 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5433 // the concat have the same type as the extract.
5434 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS &&
5435 N1.getNumOperands() > 0 &&
5436 VT == N1.getOperand(0).getValueType()) {
5437 unsigned Factor = VT.getVectorNumElements();
5438 return N1.getOperand(N2C->getZExtValue() / Factor);
5439 }
5440
5441 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5442 // during shuffle legalization.
5443 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
5444 VT == N1.getOperand(1).getValueType())
5445 return N1.getOperand(1);
5446 }
5447 break;
5448 }
5449
5450 // Perform trivial constant folding.
5451 if (SDValue SV =
5452 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
5453 return SV;
5454
5455 if (SDValue V = foldConstantFPMath(Opcode, DL, VT, N1, N2))
5456 return V;
5457
5458 // Canonicalize an UNDEF to the RHS, even over a constant.
5459 if (N1.isUndef()) {
5460 if (TLI->isCommutativeBinOp(Opcode)) {
5461 std::swap(N1, N2);
5462 } else {
5463 switch (Opcode) {
5464 case ISD::SIGN_EXTEND_INREG:
5465 case ISD::SUB:
5466 return getUNDEF(VT); // fold op(undef, arg2) -> undef
5467 case ISD::UDIV:
5468 case ISD::SDIV:
5469 case ISD::UREM:
5470 case ISD::SREM:
5471 case ISD::SSUBSAT:
5472 case ISD::USUBSAT:
5473 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
5474 }
5475 }
5476 }
5477
5478 // Fold a bunch of operators when the RHS is undef.
5479 if (N2.isUndef()) {
5480 switch (Opcode) {
5481 case ISD::XOR:
5482 if (N1.isUndef())
5483 // Handle undef ^ undef -> 0 special case. This is a common
5484 // idiom (misuse).
5485 return getConstant(0, DL, VT);
5486 LLVM_FALLTHROUGH;
5487 case ISD::ADD:
5488 case ISD::SUB:
5489 case ISD::UDIV:
5490 case ISD::SDIV:
5491 case ISD::UREM:
5492 case ISD::SREM:
5493 return getUNDEF(VT); // fold op(arg1, undef) -> undef
5494 case ISD::MUL:
5495 case ISD::AND:
5496 case ISD::SSUBSAT:
5497 case ISD::USUBSAT:
5498 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
5499 case ISD::OR:
5500 case ISD::SADDSAT:
5501 case ISD::UADDSAT:
5502 return getAllOnesConstant(DL, VT);
5503 }
5504 }
5505
5506 // Memoize this node if possible.
5507 SDNode *N;
5508 SDVTList VTs = getVTList(VT);
5509 SDValue Ops[] = {N1, N2};
5510 if (VT != MVT::Glue) {
5511 FoldingSetNodeID ID;
5512 AddNodeIDNode(ID, Opcode, VTs, Ops);
5513 void *IP = nullptr;
5514 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5515 E->intersectFlagsWith(Flags);
5516 return SDValue(E, 0);
5517 }
5518
5519 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5520 N->setFlags(Flags);
5521 createOperands(N, Ops);
5522 CSEMap.InsertNode(N, IP);
5523 } else {
5524 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5525 createOperands(N, Ops);
5526 }
5527
5528 InsertNode(N);
5529 SDValue V = SDValue(N, 0);
5530 NewSDValueDbgMsg(V, "Creating new node: ", this);
5531 return V;
5532 }
5533
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,const SDNodeFlags Flags)5534 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5535 SDValue N1, SDValue N2, SDValue N3,
5536 const SDNodeFlags Flags) {
5537 // Perform various simplifications.
5538 switch (Opcode) {
5539 case ISD::FMA: {
5540 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5541 assert(N1.getValueType() == VT && N2.getValueType() == VT &&
5542 N3.getValueType() == VT && "FMA types must match!");
5543 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5544 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5545 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
5546 if (N1CFP && N2CFP && N3CFP) {
5547 APFloat V1 = N1CFP->getValueAPF();
5548 const APFloat &V2 = N2CFP->getValueAPF();
5549 const APFloat &V3 = N3CFP->getValueAPF();
5550 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
5551 return getConstantFP(V1, DL, VT);
5552 }
5553 break;
5554 }
5555 case ISD::BUILD_VECTOR: {
5556 // Attempt to simplify BUILD_VECTOR.
5557 SDValue Ops[] = {N1, N2, N3};
5558 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5559 return V;
5560 break;
5561 }
5562 case ISD::CONCAT_VECTORS: {
5563 SDValue Ops[] = {N1, N2, N3};
5564 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
5565 return V;
5566 break;
5567 }
5568 case ISD::SETCC: {
5569 assert(VT.isInteger() && "SETCC result type must be an integer!");
5570 assert(N1.getValueType() == N2.getValueType() &&
5571 "SETCC operands must have the same type!");
5572 assert(VT.isVector() == N1.getValueType().isVector() &&
5573 "SETCC type should be vector iff the operand type is vector!");
5574 assert((!VT.isVector() ||
5575 VT.getVectorNumElements() == N1.getValueType().getVectorNumElements()) &&
5576 "SETCC vector element counts must match!");
5577 // Use FoldSetCC to simplify SETCC's.
5578 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
5579 return V;
5580 // Vector constant folding.
5581 SDValue Ops[] = {N1, N2, N3};
5582 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
5583 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
5584 return V;
5585 }
5586 break;
5587 }
5588 case ISD::SELECT:
5589 case ISD::VSELECT:
5590 if (SDValue V = simplifySelect(N1, N2, N3))
5591 return V;
5592 break;
5593 case ISD::VECTOR_SHUFFLE:
5594 llvm_unreachable("should use getVectorShuffle constructor!");
5595 case ISD::INSERT_VECTOR_ELT: {
5596 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
5597 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
5598 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
5599 return getUNDEF(VT);
5600
5601 // Undefined index can be assumed out-of-bounds, so that's UNDEF too.
5602 if (N3.isUndef())
5603 return getUNDEF(VT);
5604
5605 // If the inserted element is an UNDEF, just use the input vector.
5606 if (N2.isUndef())
5607 return N1;
5608
5609 break;
5610 }
5611 case ISD::INSERT_SUBVECTOR: {
5612 // Inserting undef into undef is still undef.
5613 if (N1.isUndef() && N2.isUndef())
5614 return getUNDEF(VT);
5615 SDValue Index = N3;
5616 if (VT.isSimple() && N1.getValueType().isSimple()
5617 && N2.getValueType().isSimple()) {
5618 assert(VT.isVector() && N1.getValueType().isVector() &&
5619 N2.getValueType().isVector() &&
5620 "Insert subvector VTs must be a vectors");
5621 assert(VT == N1.getValueType() &&
5622 "Dest and insert subvector source types must match!");
5623 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
5624 "Insert subvector must be from smaller vector to larger vector!");
5625 if (isa<ConstantSDNode>(Index)) {
5626 assert((N2.getValueType().getVectorNumElements() +
5627 cast<ConstantSDNode>(Index)->getZExtValue()
5628 <= VT.getVectorNumElements())
5629 && "Insert subvector overflow!");
5630 }
5631
5632 // Trivial insertion.
5633 if (VT.getSimpleVT() == N2.getSimpleValueType())
5634 return N2;
5635
5636 // If this is an insert of an extracted vector into an undef vector, we
5637 // can just use the input to the extract.
5638 if (N1.isUndef() && N2.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5639 N2.getOperand(1) == N3 && N2.getOperand(0).getValueType() == VT)
5640 return N2.getOperand(0);
5641 }
5642 break;
5643 }
5644 case ISD::BITCAST:
5645 // Fold bit_convert nodes from a type to themselves.
5646 if (N1.getValueType() == VT)
5647 return N1;
5648 break;
5649 }
5650
5651 // Memoize node if it doesn't produce a flag.
5652 SDNode *N;
5653 SDVTList VTs = getVTList(VT);
5654 SDValue Ops[] = {N1, N2, N3};
5655 if (VT != MVT::Glue) {
5656 FoldingSetNodeID ID;
5657 AddNodeIDNode(ID, Opcode, VTs, Ops);
5658 void *IP = nullptr;
5659 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5660 E->intersectFlagsWith(Flags);
5661 return SDValue(E, 0);
5662 }
5663
5664 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5665 N->setFlags(Flags);
5666 createOperands(N, Ops);
5667 CSEMap.InsertNode(N, IP);
5668 } else {
5669 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5670 createOperands(N, Ops);
5671 }
5672
5673 InsertNode(N);
5674 SDValue V = SDValue(N, 0);
5675 NewSDValueDbgMsg(V, "Creating new node: ", this);
5676 return V;
5677 }
5678
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)5679 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5680 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5681 SDValue Ops[] = { N1, N2, N3, N4 };
5682 return getNode(Opcode, DL, VT, Ops);
5683 }
5684
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)5685 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5686 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5687 SDValue N5) {
5688 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5689 return getNode(Opcode, DL, VT, Ops);
5690 }
5691
5692 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
5693 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)5694 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
5695 SmallVector<SDValue, 8> ArgChains;
5696
5697 // Include the original chain at the beginning of the list. When this is
5698 // used by target LowerCall hooks, this helps legalize find the
5699 // CALLSEQ_BEGIN node.
5700 ArgChains.push_back(Chain);
5701
5702 // Add a chain value for each stack argument.
5703 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
5704 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
5705 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
5706 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
5707 if (FI->getIndex() < 0)
5708 ArgChains.push_back(SDValue(L, 1));
5709
5710 // Build a tokenfactor for all the chains.
5711 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
5712 }
5713
5714 /// getMemsetValue - Vectorized representation of the memset value
5715 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,const SDLoc & dl)5716 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
5717 const SDLoc &dl) {
5718 assert(!Value.isUndef());
5719
5720 unsigned NumBits = VT.getScalarSizeInBits();
5721 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
5722 assert(C->getAPIntValue().getBitWidth() == 8);
5723 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
5724 if (VT.isInteger()) {
5725 bool IsOpaque = VT.getSizeInBits() > 64 ||
5726 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
5727 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
5728 }
5729 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
5730 VT);
5731 }
5732
5733 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
5734 EVT IntVT = VT.getScalarType();
5735 if (!IntVT.isInteger())
5736 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
5737
5738 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
5739 if (NumBits > 8) {
5740 // Use a multiplication with 0x010101... to extend the input to the
5741 // required length.
5742 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
5743 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
5744 DAG.getConstant(Magic, dl, IntVT));
5745 }
5746
5747 if (VT != Value.getValueType() && !VT.isInteger())
5748 Value = DAG.getBitcast(VT.getScalarType(), Value);
5749 if (VT != Value.getValueType())
5750 Value = DAG.getSplatBuildVector(VT, dl, Value);
5751
5752 return Value;
5753 }
5754
5755 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
5756 /// used when a memcpy is turned into a memset when the source is a constant
5757 /// string ptr.
getMemsetStringVal(EVT VT,const SDLoc & dl,SelectionDAG & DAG,const TargetLowering & TLI,const ConstantDataArraySlice & Slice)5758 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
5759 const TargetLowering &TLI,
5760 const ConstantDataArraySlice &Slice) {
5761 // Handle vector with all elements zero.
5762 if (Slice.Array == nullptr) {
5763 if (VT.isInteger())
5764 return DAG.getConstant(0, dl, VT);
5765 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
5766 return DAG.getConstantFP(0.0, dl, VT);
5767 else if (VT.isVector()) {
5768 unsigned NumElts = VT.getVectorNumElements();
5769 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
5770 return DAG.getNode(ISD::BITCAST, dl, VT,
5771 DAG.getConstant(0, dl,
5772 EVT::getVectorVT(*DAG.getContext(),
5773 EltVT, NumElts)));
5774 } else
5775 llvm_unreachable("Expected type!");
5776 }
5777
5778 assert(!VT.isVector() && "Can't handle vector type here!");
5779 unsigned NumVTBits = VT.getSizeInBits();
5780 unsigned NumVTBytes = NumVTBits / 8;
5781 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
5782
5783 APInt Val(NumVTBits, 0);
5784 if (DAG.getDataLayout().isLittleEndian()) {
5785 for (unsigned i = 0; i != NumBytes; ++i)
5786 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
5787 } else {
5788 for (unsigned i = 0; i != NumBytes; ++i)
5789 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
5790 }
5791
5792 // If the "cost" of materializing the integer immediate is less than the cost
5793 // of a load, then it is cost effective to turn the load into the immediate.
5794 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
5795 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
5796 return DAG.getConstant(Val, dl, VT);
5797 return SDValue(nullptr, 0);
5798 }
5799
getMemBasePlusOffset(SDValue Base,int64_t Offset,const SDLoc & DL,const SDNodeFlags Flags)5800 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, int64_t Offset,
5801 const SDLoc &DL,
5802 const SDNodeFlags Flags) {
5803 EVT VT = Base.getValueType();
5804 return getMemBasePlusOffset(Base, getConstant(Offset, DL, VT), DL, Flags);
5805 }
5806
getMemBasePlusOffset(SDValue Ptr,SDValue Offset,const SDLoc & DL,const SDNodeFlags Flags)5807 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Ptr, SDValue Offset,
5808 const SDLoc &DL,
5809 const SDNodeFlags Flags) {
5810 assert(Offset.getValueType().isInteger());
5811 EVT BasePtrVT = Ptr.getValueType();
5812 return getNode(ISD::ADD, DL, BasePtrVT, Ptr, Offset, Flags);
5813 }
5814
5815 /// Returns true if memcpy source is constant data.
isMemSrcFromConstant(SDValue Src,ConstantDataArraySlice & Slice)5816 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
5817 uint64_t SrcDelta = 0;
5818 GlobalAddressSDNode *G = nullptr;
5819 if (Src.getOpcode() == ISD::GlobalAddress)
5820 G = cast<GlobalAddressSDNode>(Src);
5821 else if (Src.getOpcode() == ISD::ADD &&
5822 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
5823 Src.getOperand(1).getOpcode() == ISD::Constant) {
5824 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
5825 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
5826 }
5827 if (!G)
5828 return false;
5829
5830 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
5831 SrcDelta + G->getOffset());
5832 }
5833
shouldLowerMemFuncForSize(const MachineFunction & MF,SelectionDAG & DAG)5834 static bool shouldLowerMemFuncForSize(const MachineFunction &MF,
5835 SelectionDAG &DAG) {
5836 // On Darwin, -Os means optimize for size without hurting performance, so
5837 // only really optimize for size when -Oz (MinSize) is used.
5838 if (MF.getTarget().getTargetTriple().isOSDarwin())
5839 return MF.getFunction().hasMinSize();
5840 return DAG.shouldOptForSize();
5841 }
5842
chainLoadsAndStoresForMemcpy(SelectionDAG & DAG,const SDLoc & dl,SmallVector<SDValue,32> & OutChains,unsigned From,unsigned To,SmallVector<SDValue,16> & OutLoadChains,SmallVector<SDValue,16> & OutStoreChains)5843 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
5844 SmallVector<SDValue, 32> &OutChains, unsigned From,
5845 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
5846 SmallVector<SDValue, 16> &OutStoreChains) {
5847 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
5848 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
5849 SmallVector<SDValue, 16> GluedLoadChains;
5850 for (unsigned i = From; i < To; ++i) {
5851 OutChains.push_back(OutLoadChains[i]);
5852 GluedLoadChains.push_back(OutLoadChains[i]);
5853 }
5854
5855 // Chain for all loads.
5856 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
5857 GluedLoadChains);
5858
5859 for (unsigned i = From; i < To; ++i) {
5860 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
5861 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
5862 ST->getBasePtr(), ST->getMemoryVT(),
5863 ST->getMemOperand());
5864 OutChains.push_back(NewStore);
5865 }
5866 }
5867
getMemcpyLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Alignment,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)5868 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5869 SDValue Chain, SDValue Dst, SDValue Src,
5870 uint64_t Size, unsigned Alignment,
5871 bool isVol, bool AlwaysInline,
5872 MachinePointerInfo DstPtrInfo,
5873 MachinePointerInfo SrcPtrInfo) {
5874 // Turn a memcpy of undef to nop.
5875 // FIXME: We need to honor volatile even is Src is undef.
5876 if (Src.isUndef())
5877 return Chain;
5878
5879 // Expand memcpy to a series of load and store ops if the size operand falls
5880 // below a certain threshold.
5881 // TODO: In the AlwaysInline case, if the size is big then generate a loop
5882 // rather than maybe a humongous number of loads and stores.
5883 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5884 const DataLayout &DL = DAG.getDataLayout();
5885 LLVMContext &C = *DAG.getContext();
5886 std::vector<EVT> MemOps;
5887 bool DstAlignCanChange = false;
5888 MachineFunction &MF = DAG.getMachineFunction();
5889 MachineFrameInfo &MFI = MF.getFrameInfo();
5890 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
5891 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5892 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5893 DstAlignCanChange = true;
5894 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5895 if (Alignment > SrcAlign)
5896 SrcAlign = Alignment;
5897 ConstantDataArraySlice Slice;
5898 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
5899 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
5900 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
5901
5902 if (!TLI.findOptimalMemOpLowering(
5903 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Alignment),
5904 (isZeroConstant ? 0 : SrcAlign), /*IsMemset=*/false,
5905 /*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant,
5906 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(),
5907 SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
5908 return SDValue();
5909
5910 if (DstAlignCanChange) {
5911 Type *Ty = MemOps[0].getTypeForEVT(C);
5912 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5913
5914 // Don't promote to an alignment that would require dynamic stack
5915 // realignment.
5916 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
5917 if (!TRI->needsStackRealignment(MF))
5918 while (NewAlign > Alignment &&
5919 DL.exceedsNaturalStackAlignment(Align(NewAlign)))
5920 NewAlign /= 2;
5921
5922 if (NewAlign > Alignment) {
5923 // Give the stack frame object a larger alignment if needed.
5924 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5925 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5926 Alignment = NewAlign;
5927 }
5928 }
5929
5930 MachineMemOperand::Flags MMOFlags =
5931 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5932 SmallVector<SDValue, 16> OutLoadChains;
5933 SmallVector<SDValue, 16> OutStoreChains;
5934 SmallVector<SDValue, 32> OutChains;
5935 unsigned NumMemOps = MemOps.size();
5936 uint64_t SrcOff = 0, DstOff = 0;
5937 for (unsigned i = 0; i != NumMemOps; ++i) {
5938 EVT VT = MemOps[i];
5939 unsigned VTSize = VT.getSizeInBits() / 8;
5940 SDValue Value, Store;
5941
5942 if (VTSize > Size) {
5943 // Issuing an unaligned load / store pair that overlaps with the previous
5944 // pair. Adjust the offset accordingly.
5945 assert(i == NumMemOps-1 && i != 0);
5946 SrcOff -= VTSize - Size;
5947 DstOff -= VTSize - Size;
5948 }
5949
5950 if (CopyFromConstant &&
5951 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
5952 // It's unlikely a store of a vector immediate can be done in a single
5953 // instruction. It would require a load from a constantpool first.
5954 // We only handle zero vectors here.
5955 // FIXME: Handle other cases where store of vector immediate is done in
5956 // a single instruction.
5957 ConstantDataArraySlice SubSlice;
5958 if (SrcOff < Slice.Length) {
5959 SubSlice = Slice;
5960 SubSlice.move(SrcOff);
5961 } else {
5962 // This is an out-of-bounds access and hence UB. Pretend we read zero.
5963 SubSlice.Array = nullptr;
5964 SubSlice.Offset = 0;
5965 SubSlice.Length = VTSize;
5966 }
5967 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
5968 if (Value.getNode()) {
5969 Store = DAG.getStore(
5970 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5971 DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags);
5972 OutChains.push_back(Store);
5973 }
5974 }
5975
5976 if (!Store.getNode()) {
5977 // The type might not be legal for the target. This should only happen
5978 // if the type is smaller than a legal type, as on PPC, so the right
5979 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
5980 // to Load/Store if NVT==VT.
5981 // FIXME does the case above also need this?
5982 EVT NVT = TLI.getTypeToTransformTo(C, VT);
5983 assert(NVT.bitsGE(VT));
5984
5985 bool isDereferenceable =
5986 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
5987 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
5988 if (isDereferenceable)
5989 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
5990
5991 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
5992 DAG.getMemBasePlusOffset(Src, SrcOff, dl),
5993 SrcPtrInfo.getWithOffset(SrcOff), VT,
5994 MinAlign(SrcAlign, SrcOff), SrcMMOFlags);
5995 OutLoadChains.push_back(Value.getValue(1));
5996
5997 Store = DAG.getTruncStore(
5998 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5999 DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags);
6000 OutStoreChains.push_back(Store);
6001 }
6002 SrcOff += VTSize;
6003 DstOff += VTSize;
6004 Size -= VTSize;
6005 }
6006
6007 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
6008 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
6009 unsigned NumLdStInMemcpy = OutStoreChains.size();
6010
6011 if (NumLdStInMemcpy) {
6012 // It may be that memcpy might be converted to memset if it's memcpy
6013 // of constants. In such a case, we won't have loads and stores, but
6014 // just stores. In the absence of loads, there is nothing to gang up.
6015 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
6016 // If target does not care, just leave as it.
6017 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
6018 OutChains.push_back(OutLoadChains[i]);
6019 OutChains.push_back(OutStoreChains[i]);
6020 }
6021 } else {
6022 // Ld/St less than/equal limit set by target.
6023 if (NumLdStInMemcpy <= GluedLdStLimit) {
6024 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6025 NumLdStInMemcpy, OutLoadChains,
6026 OutStoreChains);
6027 } else {
6028 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
6029 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
6030 unsigned GlueIter = 0;
6031
6032 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
6033 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
6034 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
6035
6036 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
6037 OutLoadChains, OutStoreChains);
6038 GlueIter += GluedLdStLimit;
6039 }
6040
6041 // Residual ld/st.
6042 if (RemainingLdStInMemcpy) {
6043 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
6044 RemainingLdStInMemcpy, OutLoadChains,
6045 OutStoreChains);
6046 }
6047 }
6048 }
6049 }
6050 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6051 }
6052
getMemmoveLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6053 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
6054 SDValue Chain, SDValue Dst, SDValue Src,
6055 uint64_t Size, unsigned Align,
6056 bool isVol, bool AlwaysInline,
6057 MachinePointerInfo DstPtrInfo,
6058 MachinePointerInfo SrcPtrInfo) {
6059 // Turn a memmove of undef to nop.
6060 // FIXME: We need to honor volatile even is Src is undef.
6061 if (Src.isUndef())
6062 return Chain;
6063
6064 // Expand memmove to a series of load and store ops if the size operand falls
6065 // below a certain threshold.
6066 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6067 const DataLayout &DL = DAG.getDataLayout();
6068 LLVMContext &C = *DAG.getContext();
6069 std::vector<EVT> MemOps;
6070 bool DstAlignCanChange = false;
6071 MachineFunction &MF = DAG.getMachineFunction();
6072 MachineFrameInfo &MFI = MF.getFrameInfo();
6073 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6074 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6075 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6076 DstAlignCanChange = true;
6077 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
6078 if (Align > SrcAlign)
6079 SrcAlign = Align;
6080 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
6081 // FIXME: `AllowOverlap` should really be `!isVol` but there is a bug in
6082 // findOptimalMemOpLowering. Meanwhile, setting it to `false` produces the
6083 // correct code.
6084 bool AllowOverlap = false;
6085 if (!TLI.findOptimalMemOpLowering(
6086 MemOps, Limit, Size, (DstAlignCanChange ? 0 : Align), SrcAlign,
6087 /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false,
6088 AllowOverlap, DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
6089 MF.getFunction().getAttributes()))
6090 return SDValue();
6091
6092 if (DstAlignCanChange) {
6093 Type *Ty = MemOps[0].getTypeForEVT(C);
6094 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
6095 if (NewAlign > Align) {
6096 // Give the stack frame object a larger alignment if needed.
6097 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
6098 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6099 Align = NewAlign;
6100 }
6101 }
6102
6103 MachineMemOperand::Flags MMOFlags =
6104 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
6105 uint64_t SrcOff = 0, DstOff = 0;
6106 SmallVector<SDValue, 8> LoadValues;
6107 SmallVector<SDValue, 8> LoadChains;
6108 SmallVector<SDValue, 8> OutChains;
6109 unsigned NumMemOps = MemOps.size();
6110 for (unsigned i = 0; i < NumMemOps; i++) {
6111 EVT VT = MemOps[i];
6112 unsigned VTSize = VT.getSizeInBits() / 8;
6113 SDValue Value;
6114
6115 bool isDereferenceable =
6116 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
6117 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
6118 if (isDereferenceable)
6119 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
6120
6121 Value =
6122 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
6123 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags);
6124 LoadValues.push_back(Value);
6125 LoadChains.push_back(Value.getValue(1));
6126 SrcOff += VTSize;
6127 }
6128 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
6129 OutChains.clear();
6130 for (unsigned i = 0; i < NumMemOps; i++) {
6131 EVT VT = MemOps[i];
6132 unsigned VTSize = VT.getSizeInBits() / 8;
6133 SDValue Store;
6134
6135 Store = DAG.getStore(Chain, dl, LoadValues[i],
6136 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
6137 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
6138 OutChains.push_back(Store);
6139 DstOff += VTSize;
6140 }
6141
6142 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6143 }
6144
6145 /// Lower the call to 'memset' intrinsic function into a series of store
6146 /// operations.
6147 ///
6148 /// \param DAG Selection DAG where lowered code is placed.
6149 /// \param dl Link to corresponding IR location.
6150 /// \param Chain Control flow dependency.
6151 /// \param Dst Pointer to destination memory location.
6152 /// \param Src Value of byte to write into the memory.
6153 /// \param Size Number of bytes to write.
6154 /// \param Align Alignment of the destination in bytes.
6155 /// \param isVol True if destination is volatile.
6156 /// \param DstPtrInfo IR information on the memory pointer.
6157 /// \returns New head in the control flow, if lowering was successful, empty
6158 /// SDValue otherwise.
6159 ///
6160 /// The function tries to replace 'llvm.memset' intrinsic with several store
6161 /// operations and value calculation code. This is usually profitable for small
6162 /// memory size.
getMemsetStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,MachinePointerInfo DstPtrInfo)6163 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
6164 SDValue Chain, SDValue Dst, SDValue Src,
6165 uint64_t Size, unsigned Align, bool isVol,
6166 MachinePointerInfo DstPtrInfo) {
6167 // Turn a memset of undef to nop.
6168 // FIXME: We need to honor volatile even is Src is undef.
6169 if (Src.isUndef())
6170 return Chain;
6171
6172 // Expand memset to a series of load/store ops if the size operand
6173 // falls below a certain threshold.
6174 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6175 std::vector<EVT> MemOps;
6176 bool DstAlignCanChange = false;
6177 MachineFunction &MF = DAG.getMachineFunction();
6178 MachineFrameInfo &MFI = MF.getFrameInfo();
6179 bool OptSize = shouldLowerMemFuncForSize(MF, DAG);
6180 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
6181 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
6182 DstAlignCanChange = true;
6183 bool IsZeroVal =
6184 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
6185 if (!TLI.findOptimalMemOpLowering(
6186 MemOps, TLI.getMaxStoresPerMemset(OptSize), Size,
6187 (DstAlignCanChange ? 0 : Align), 0, /*IsMemset=*/true,
6188 /*ZeroMemset=*/IsZeroVal, /*MemcpyStrSrc=*/false,
6189 /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), ~0u,
6190 MF.getFunction().getAttributes()))
6191 return SDValue();
6192
6193 if (DstAlignCanChange) {
6194 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
6195 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
6196 if (NewAlign > Align) {
6197 // Give the stack frame object a larger alignment if needed.
6198 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
6199 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
6200 Align = NewAlign;
6201 }
6202 }
6203
6204 SmallVector<SDValue, 8> OutChains;
6205 uint64_t DstOff = 0;
6206 unsigned NumMemOps = MemOps.size();
6207
6208 // Find the largest store and generate the bit pattern for it.
6209 EVT LargestVT = MemOps[0];
6210 for (unsigned i = 1; i < NumMemOps; i++)
6211 if (MemOps[i].bitsGT(LargestVT))
6212 LargestVT = MemOps[i];
6213 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
6214
6215 for (unsigned i = 0; i < NumMemOps; i++) {
6216 EVT VT = MemOps[i];
6217 unsigned VTSize = VT.getSizeInBits() / 8;
6218 if (VTSize > Size) {
6219 // Issuing an unaligned load / store pair that overlaps with the previous
6220 // pair. Adjust the offset accordingly.
6221 assert(i == NumMemOps-1 && i != 0);
6222 DstOff -= VTSize - Size;
6223 }
6224
6225 // If this store is smaller than the largest store see whether we can get
6226 // the smaller value for free with a truncate.
6227 SDValue Value = MemSetValue;
6228 if (VT.bitsLT(LargestVT)) {
6229 if (!LargestVT.isVector() && !VT.isVector() &&
6230 TLI.isTruncateFree(LargestVT, VT))
6231 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
6232 else
6233 Value = getMemsetValue(Src, VT, DAG, dl);
6234 }
6235 assert(Value.getValueType() == VT && "Value with wrong type.");
6236 SDValue Store = DAG.getStore(
6237 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
6238 DstPtrInfo.getWithOffset(DstOff), Align,
6239 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
6240 OutChains.push_back(Store);
6241 DstOff += VT.getSizeInBits() / 8;
6242 Size -= VTSize;
6243 }
6244
6245 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6246 }
6247
checkAddrSpaceIsValidForLibcall(const TargetLowering * TLI,unsigned AS)6248 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
6249 unsigned AS) {
6250 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6251 // pointer operands can be losslessly bitcasted to pointers of address space 0
6252 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
6253 report_fatal_error("cannot lower memory intrinsic in address space " +
6254 Twine(AS));
6255 }
6256 }
6257
getMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool AlwaysInline,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6258 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
6259 SDValue Src, SDValue Size, unsigned Align,
6260 bool isVol, bool AlwaysInline, bool isTailCall,
6261 MachinePointerInfo DstPtrInfo,
6262 MachinePointerInfo SrcPtrInfo) {
6263 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6264
6265 // Check to see if we should lower the memcpy to loads and stores first.
6266 // For cases within the target-specified limits, this is the best choice.
6267 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6268 if (ConstantSize) {
6269 // Memcpy with size zero? Just return the original chain.
6270 if (ConstantSize->isNullValue())
6271 return Chain;
6272
6273 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6274 ConstantSize->getZExtValue(),Align,
6275 isVol, false, DstPtrInfo, SrcPtrInfo);
6276 if (Result.getNode())
6277 return Result;
6278 }
6279
6280 // Then check to see if we should lower the memcpy with target-specific
6281 // code. If the target chooses to do this, this is the next best.
6282 if (TSI) {
6283 SDValue Result = TSI->EmitTargetCodeForMemcpy(
6284 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
6285 DstPtrInfo, SrcPtrInfo);
6286 if (Result.getNode())
6287 return Result;
6288 }
6289
6290 // If we really need inline code and the target declined to provide it,
6291 // use a (potentially long) sequence of loads and stores.
6292 if (AlwaysInline) {
6293 assert(ConstantSize && "AlwaysInline requires a constant size!");
6294 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6295 ConstantSize->getZExtValue(), Align, isVol,
6296 true, DstPtrInfo, SrcPtrInfo);
6297 }
6298
6299 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6300 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6301
6302 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
6303 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
6304 // respect volatile, so they may do things like read or write memory
6305 // beyond the given memory regions. But fixing this isn't easy, and most
6306 // people don't care.
6307
6308 // Emit a library call.
6309 TargetLowering::ArgListTy Args;
6310 TargetLowering::ArgListEntry Entry;
6311 Entry.Ty = Type::getInt8PtrTy(*getContext());
6312 Entry.Node = Dst; Args.push_back(Entry);
6313 Entry.Node = Src; Args.push_back(Entry);
6314
6315 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6316 Entry.Node = Size; Args.push_back(Entry);
6317 // FIXME: pass in SDLoc
6318 TargetLowering::CallLoweringInfo CLI(*this);
6319 CLI.setDebugLoc(dl)
6320 .setChain(Chain)
6321 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
6322 Dst.getValueType().getTypeForEVT(*getContext()),
6323 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
6324 TLI->getPointerTy(getDataLayout())),
6325 std::move(Args))
6326 .setDiscardResult()
6327 .setTailCall(isTailCall);
6328
6329 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6330 return CallResult.second;
6331 }
6332
getAtomicMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6333 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
6334 SDValue Dst, unsigned DstAlign,
6335 SDValue Src, unsigned SrcAlign,
6336 SDValue Size, Type *SizeTy,
6337 unsigned ElemSz, bool isTailCall,
6338 MachinePointerInfo DstPtrInfo,
6339 MachinePointerInfo SrcPtrInfo) {
6340 // Emit a library call.
6341 TargetLowering::ArgListTy Args;
6342 TargetLowering::ArgListEntry Entry;
6343 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6344 Entry.Node = Dst;
6345 Args.push_back(Entry);
6346
6347 Entry.Node = Src;
6348 Args.push_back(Entry);
6349
6350 Entry.Ty = SizeTy;
6351 Entry.Node = Size;
6352 Args.push_back(Entry);
6353
6354 RTLIB::Libcall LibraryCall =
6355 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6356 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6357 report_fatal_error("Unsupported element size");
6358
6359 TargetLowering::CallLoweringInfo CLI(*this);
6360 CLI.setDebugLoc(dl)
6361 .setChain(Chain)
6362 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6363 Type::getVoidTy(*getContext()),
6364 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6365 TLI->getPointerTy(getDataLayout())),
6366 std::move(Args))
6367 .setDiscardResult()
6368 .setTailCall(isTailCall);
6369
6370 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6371 return CallResult.second;
6372 }
6373
getMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6374 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
6375 SDValue Src, SDValue Size, unsigned Align,
6376 bool isVol, bool isTailCall,
6377 MachinePointerInfo DstPtrInfo,
6378 MachinePointerInfo SrcPtrInfo) {
6379 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6380
6381 // Check to see if we should lower the memmove to loads and stores first.
6382 // For cases within the target-specified limits, this is the best choice.
6383 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6384 if (ConstantSize) {
6385 // Memmove with size zero? Just return the original chain.
6386 if (ConstantSize->isNullValue())
6387 return Chain;
6388
6389 SDValue Result =
6390 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
6391 ConstantSize->getZExtValue(), Align, isVol,
6392 false, DstPtrInfo, SrcPtrInfo);
6393 if (Result.getNode())
6394 return Result;
6395 }
6396
6397 // Then check to see if we should lower the memmove with target-specific
6398 // code. If the target chooses to do this, this is the next best.
6399 if (TSI) {
6400 SDValue Result = TSI->EmitTargetCodeForMemmove(
6401 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
6402 if (Result.getNode())
6403 return Result;
6404 }
6405
6406 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6407 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6408
6409 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
6410 // not be safe. See memcpy above for more details.
6411
6412 // Emit a library call.
6413 TargetLowering::ArgListTy Args;
6414 TargetLowering::ArgListEntry Entry;
6415 Entry.Ty = Type::getInt8PtrTy(*getContext());
6416 Entry.Node = Dst; Args.push_back(Entry);
6417 Entry.Node = Src; Args.push_back(Entry);
6418
6419 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6420 Entry.Node = Size; Args.push_back(Entry);
6421 // FIXME: pass in SDLoc
6422 TargetLowering::CallLoweringInfo CLI(*this);
6423 CLI.setDebugLoc(dl)
6424 .setChain(Chain)
6425 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
6426 Dst.getValueType().getTypeForEVT(*getContext()),
6427 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
6428 TLI->getPointerTy(getDataLayout())),
6429 std::move(Args))
6430 .setDiscardResult()
6431 .setTailCall(isTailCall);
6432
6433 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6434 return CallResult.second;
6435 }
6436
getAtomicMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6437 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
6438 SDValue Dst, unsigned DstAlign,
6439 SDValue Src, unsigned SrcAlign,
6440 SDValue Size, Type *SizeTy,
6441 unsigned ElemSz, bool isTailCall,
6442 MachinePointerInfo DstPtrInfo,
6443 MachinePointerInfo SrcPtrInfo) {
6444 // Emit a library call.
6445 TargetLowering::ArgListTy Args;
6446 TargetLowering::ArgListEntry Entry;
6447 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6448 Entry.Node = Dst;
6449 Args.push_back(Entry);
6450
6451 Entry.Node = Src;
6452 Args.push_back(Entry);
6453
6454 Entry.Ty = SizeTy;
6455 Entry.Node = Size;
6456 Args.push_back(Entry);
6457
6458 RTLIB::Libcall LibraryCall =
6459 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6460 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6461 report_fatal_error("Unsupported element size");
6462
6463 TargetLowering::CallLoweringInfo CLI(*this);
6464 CLI.setDebugLoc(dl)
6465 .setChain(Chain)
6466 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6467 Type::getVoidTy(*getContext()),
6468 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6469 TLI->getPointerTy(getDataLayout())),
6470 std::move(Args))
6471 .setDiscardResult()
6472 .setTailCall(isTailCall);
6473
6474 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6475 return CallResult.second;
6476 }
6477
getMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo)6478 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
6479 SDValue Src, SDValue Size, unsigned Align,
6480 bool isVol, bool isTailCall,
6481 MachinePointerInfo DstPtrInfo) {
6482 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6483
6484 // Check to see if we should lower the memset to stores first.
6485 // For cases within the target-specified limits, this is the best choice.
6486 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6487 if (ConstantSize) {
6488 // Memset with size zero? Just return the original chain.
6489 if (ConstantSize->isNullValue())
6490 return Chain;
6491
6492 SDValue Result =
6493 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
6494 Align, isVol, DstPtrInfo);
6495
6496 if (Result.getNode())
6497 return Result;
6498 }
6499
6500 // Then check to see if we should lower the memset with target-specific
6501 // code. If the target chooses to do this, this is the next best.
6502 if (TSI) {
6503 SDValue Result = TSI->EmitTargetCodeForMemset(
6504 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
6505 if (Result.getNode())
6506 return Result;
6507 }
6508
6509 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6510
6511 // Emit a library call.
6512 TargetLowering::ArgListTy Args;
6513 TargetLowering::ArgListEntry Entry;
6514 Entry.Node = Dst; Entry.Ty = Type::getInt8PtrTy(*getContext());
6515 Args.push_back(Entry);
6516 Entry.Node = Src;
6517 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
6518 Args.push_back(Entry);
6519 Entry.Node = Size;
6520 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6521 Args.push_back(Entry);
6522
6523 // FIXME: pass in SDLoc
6524 TargetLowering::CallLoweringInfo CLI(*this);
6525 CLI.setDebugLoc(dl)
6526 .setChain(Chain)
6527 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
6528 Dst.getValueType().getTypeForEVT(*getContext()),
6529 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
6530 TLI->getPointerTy(getDataLayout())),
6531 std::move(Args))
6532 .setDiscardResult()
6533 .setTailCall(isTailCall);
6534
6535 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6536 return CallResult.second;
6537 }
6538
getAtomicMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Value,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo)6539 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
6540 SDValue Dst, unsigned DstAlign,
6541 SDValue Value, SDValue Size, Type *SizeTy,
6542 unsigned ElemSz, bool isTailCall,
6543 MachinePointerInfo DstPtrInfo) {
6544 // Emit a library call.
6545 TargetLowering::ArgListTy Args;
6546 TargetLowering::ArgListEntry Entry;
6547 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6548 Entry.Node = Dst;
6549 Args.push_back(Entry);
6550
6551 Entry.Ty = Type::getInt8Ty(*getContext());
6552 Entry.Node = Value;
6553 Args.push_back(Entry);
6554
6555 Entry.Ty = SizeTy;
6556 Entry.Node = Size;
6557 Args.push_back(Entry);
6558
6559 RTLIB::Libcall LibraryCall =
6560 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6561 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6562 report_fatal_error("Unsupported element size");
6563
6564 TargetLowering::CallLoweringInfo CLI(*this);
6565 CLI.setDebugLoc(dl)
6566 .setChain(Chain)
6567 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6568 Type::getVoidTy(*getContext()),
6569 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6570 TLI->getPointerTy(getDataLayout())),
6571 std::move(Args))
6572 .setDiscardResult()
6573 .setTailCall(isTailCall);
6574
6575 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6576 return CallResult.second;
6577 }
6578
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)6579 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6580 SDVTList VTList, ArrayRef<SDValue> Ops,
6581 MachineMemOperand *MMO) {
6582 FoldingSetNodeID ID;
6583 ID.AddInteger(MemVT.getRawBits());
6584 AddNodeIDNode(ID, Opcode, VTList, Ops);
6585 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6586 void* IP = nullptr;
6587 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6588 cast<AtomicSDNode>(E)->refineAlignment(MMO);
6589 return SDValue(E, 0);
6590 }
6591
6592 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6593 VTList, MemVT, MMO);
6594 createOperands(N, Ops);
6595
6596 CSEMap.InsertNode(N, IP);
6597 InsertNode(N);
6598 return SDValue(N, 0);
6599 }
6600
getAtomicCmpSwap(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO)6601 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
6602 EVT MemVT, SDVTList VTs, SDValue Chain,
6603 SDValue Ptr, SDValue Cmp, SDValue Swp,
6604 MachineMemOperand *MMO) {
6605 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
6606 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
6607 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
6608
6609 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
6610 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6611 }
6612
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO)6613 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6614 SDValue Chain, SDValue Ptr, SDValue Val,
6615 MachineMemOperand *MMO) {
6616 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
6617 Opcode == ISD::ATOMIC_LOAD_SUB ||
6618 Opcode == ISD::ATOMIC_LOAD_AND ||
6619 Opcode == ISD::ATOMIC_LOAD_CLR ||
6620 Opcode == ISD::ATOMIC_LOAD_OR ||
6621 Opcode == ISD::ATOMIC_LOAD_XOR ||
6622 Opcode == ISD::ATOMIC_LOAD_NAND ||
6623 Opcode == ISD::ATOMIC_LOAD_MIN ||
6624 Opcode == ISD::ATOMIC_LOAD_MAX ||
6625 Opcode == ISD::ATOMIC_LOAD_UMIN ||
6626 Opcode == ISD::ATOMIC_LOAD_UMAX ||
6627 Opcode == ISD::ATOMIC_LOAD_FADD ||
6628 Opcode == ISD::ATOMIC_LOAD_FSUB ||
6629 Opcode == ISD::ATOMIC_SWAP ||
6630 Opcode == ISD::ATOMIC_STORE) &&
6631 "Invalid Atomic Op");
6632
6633 EVT VT = Val.getValueType();
6634
6635 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
6636 getVTList(VT, MVT::Other);
6637 SDValue Ops[] = {Chain, Ptr, Val};
6638 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6639 }
6640
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)6641 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6642 EVT VT, SDValue Chain, SDValue Ptr,
6643 MachineMemOperand *MMO) {
6644 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
6645
6646 SDVTList VTs = getVTList(VT, MVT::Other);
6647 SDValue Ops[] = {Chain, Ptr};
6648 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6649 }
6650
6651 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,const SDLoc & dl)6652 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
6653 if (Ops.size() == 1)
6654 return Ops[0];
6655
6656 SmallVector<EVT, 4> VTs;
6657 VTs.reserve(Ops.size());
6658 for (unsigned i = 0; i < Ops.size(); ++i)
6659 VTs.push_back(Ops[i].getValueType());
6660 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
6661 }
6662
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,unsigned Align,MachineMemOperand::Flags Flags,uint64_t Size,const AAMDNodes & AAInfo)6663 SDValue SelectionDAG::getMemIntrinsicNode(
6664 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
6665 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align,
6666 MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
6667 if (Align == 0) // Ensure that codegen never sees alignment 0
6668 Align = getEVTAlignment(MemVT);
6669
6670 if (!Size && MemVT.isScalableVector())
6671 Size = MemoryLocation::UnknownSize;
6672 else if (!Size)
6673 Size = MemVT.getStoreSize();
6674
6675 MachineFunction &MF = getMachineFunction();
6676 MachineMemOperand *MMO =
6677 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align, AAInfo);
6678
6679 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
6680 }
6681
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)6682 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
6683 SDVTList VTList,
6684 ArrayRef<SDValue> Ops, EVT MemVT,
6685 MachineMemOperand *MMO) {
6686 assert((Opcode == ISD::INTRINSIC_VOID ||
6687 Opcode == ISD::INTRINSIC_W_CHAIN ||
6688 Opcode == ISD::PREFETCH ||
6689 Opcode == ISD::LIFETIME_START ||
6690 Opcode == ISD::LIFETIME_END ||
6691 ((int)Opcode <= std::numeric_limits<int>::max() &&
6692 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
6693 "Opcode is not a memory-accessing opcode!");
6694
6695 // Memoize the node unless it returns a flag.
6696 MemIntrinsicSDNode *N;
6697 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
6698 FoldingSetNodeID ID;
6699 AddNodeIDNode(ID, Opcode, VTList, Ops);
6700 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
6701 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
6702 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6703 void *IP = nullptr;
6704 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6705 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
6706 return SDValue(E, 0);
6707 }
6708
6709 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6710 VTList, MemVT, MMO);
6711 createOperands(N, Ops);
6712
6713 CSEMap.InsertNode(N, IP);
6714 } else {
6715 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6716 VTList, MemVT, MMO);
6717 createOperands(N, Ops);
6718 }
6719 InsertNode(N);
6720 SDValue V(N, 0);
6721 NewSDValueDbgMsg(V, "Creating new node: ", this);
6722 return V;
6723 }
6724
getLifetimeNode(bool IsStart,const SDLoc & dl,SDValue Chain,int FrameIndex,int64_t Size,int64_t Offset)6725 SDValue SelectionDAG::getLifetimeNode(bool IsStart, const SDLoc &dl,
6726 SDValue Chain, int FrameIndex,
6727 int64_t Size, int64_t Offset) {
6728 const unsigned Opcode = IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END;
6729 const auto VTs = getVTList(MVT::Other);
6730 SDValue Ops[2] = {
6731 Chain,
6732 getFrameIndex(FrameIndex,
6733 getTargetLoweringInfo().getFrameIndexTy(getDataLayout()),
6734 true)};
6735
6736 FoldingSetNodeID ID;
6737 AddNodeIDNode(ID, Opcode, VTs, Ops);
6738 ID.AddInteger(FrameIndex);
6739 ID.AddInteger(Size);
6740 ID.AddInteger(Offset);
6741 void *IP = nullptr;
6742 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
6743 return SDValue(E, 0);
6744
6745 LifetimeSDNode *N = newSDNode<LifetimeSDNode>(
6746 Opcode, dl.getIROrder(), dl.getDebugLoc(), VTs, Size, Offset);
6747 createOperands(N, Ops);
6748 CSEMap.InsertNode(N, IP);
6749 InsertNode(N);
6750 SDValue V(N, 0);
6751 NewSDValueDbgMsg(V, "Creating new node: ", this);
6752 return V;
6753 }
6754
6755 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6756 /// MachinePointerInfo record from it. This is particularly useful because the
6757 /// code generator has many cases where it doesn't bother passing in a
6758 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,int64_t Offset=0)6759 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6760 SelectionDAG &DAG, SDValue Ptr,
6761 int64_t Offset = 0) {
6762 // If this is FI+Offset, we can model it.
6763 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
6764 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
6765 FI->getIndex(), Offset);
6766
6767 // If this is (FI+Offset1)+Offset2, we can model it.
6768 if (Ptr.getOpcode() != ISD::ADD ||
6769 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
6770 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
6771 return Info;
6772
6773 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6774 return MachinePointerInfo::getFixedStack(
6775 DAG.getMachineFunction(), FI,
6776 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
6777 }
6778
6779 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6780 /// MachinePointerInfo record from it. This is particularly useful because the
6781 /// code generator has many cases where it doesn't bother passing in a
6782 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,SDValue OffsetOp)6783 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6784 SelectionDAG &DAG, SDValue Ptr,
6785 SDValue OffsetOp) {
6786 // If the 'Offset' value isn't a constant, we can't handle this.
6787 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
6788 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
6789 if (OffsetOp.isUndef())
6790 return InferPointerInfo(Info, DAG, Ptr);
6791 return Info;
6792 }
6793
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)6794 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6795 EVT VT, const SDLoc &dl, SDValue Chain,
6796 SDValue Ptr, SDValue Offset,
6797 MachinePointerInfo PtrInfo, EVT MemVT,
6798 unsigned Alignment,
6799 MachineMemOperand::Flags MMOFlags,
6800 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6801 assert(Chain.getValueType() == MVT::Other &&
6802 "Invalid chain type");
6803 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6804 Alignment = getEVTAlignment(MemVT);
6805
6806 MMOFlags |= MachineMemOperand::MOLoad;
6807 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
6808 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
6809 // clients.
6810 if (PtrInfo.V.isNull())
6811 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
6812
6813 MachineFunction &MF = getMachineFunction();
6814 MachineMemOperand *MMO = MF.getMachineMemOperand(
6815 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges);
6816 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
6817 }
6818
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)6819 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6820 EVT VT, const SDLoc &dl, SDValue Chain,
6821 SDValue Ptr, SDValue Offset, EVT MemVT,
6822 MachineMemOperand *MMO) {
6823 if (VT == MemVT) {
6824 ExtType = ISD::NON_EXTLOAD;
6825 } else if (ExtType == ISD::NON_EXTLOAD) {
6826 assert(VT == MemVT && "Non-extending load from different memory type!");
6827 } else {
6828 // Extending load.
6829 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
6830 "Should only be an extending load, not truncating!");
6831 assert(VT.isInteger() == MemVT.isInteger() &&
6832 "Cannot convert from FP to Int or Int -> FP!");
6833 assert(VT.isVector() == MemVT.isVector() &&
6834 "Cannot use an ext load to convert to or from a vector!");
6835 assert((!VT.isVector() ||
6836 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
6837 "Cannot use an ext load to change the number of vector elements!");
6838 }
6839
6840 bool Indexed = AM != ISD::UNINDEXED;
6841 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
6842
6843 SDVTList VTs = Indexed ?
6844 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
6845 SDValue Ops[] = { Chain, Ptr, Offset };
6846 FoldingSetNodeID ID;
6847 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
6848 ID.AddInteger(MemVT.getRawBits());
6849 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
6850 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
6851 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6852 void *IP = nullptr;
6853 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6854 cast<LoadSDNode>(E)->refineAlignment(MMO);
6855 return SDValue(E, 0);
6856 }
6857 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
6858 ExtType, MemVT, MMO);
6859 createOperands(N, Ops);
6860
6861 CSEMap.InsertNode(N, IP);
6862 InsertNode(N);
6863 SDValue V(N, 0);
6864 NewSDValueDbgMsg(V, "Creating new node: ", this);
6865 return V;
6866 }
6867
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)6868 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6869 SDValue Ptr, MachinePointerInfo PtrInfo,
6870 unsigned Alignment,
6871 MachineMemOperand::Flags MMOFlags,
6872 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6873 SDValue Undef = getUNDEF(Ptr.getValueType());
6874 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6875 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
6876 }
6877
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)6878 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6879 SDValue Ptr, MachineMemOperand *MMO) {
6880 SDValue Undef = getUNDEF(Ptr.getValueType());
6881 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6882 VT, MMO);
6883 }
6884
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)6885 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6886 EVT VT, SDValue Chain, SDValue Ptr,
6887 MachinePointerInfo PtrInfo, EVT MemVT,
6888 unsigned Alignment,
6889 MachineMemOperand::Flags MMOFlags,
6890 const AAMDNodes &AAInfo) {
6891 SDValue Undef = getUNDEF(Ptr.getValueType());
6892 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
6893 MemVT, Alignment, MMOFlags, AAInfo);
6894 }
6895
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)6896 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6897 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
6898 MachineMemOperand *MMO) {
6899 SDValue Undef = getUNDEF(Ptr.getValueType());
6900 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
6901 MemVT, MMO);
6902 }
6903
getIndexedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)6904 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
6905 SDValue Base, SDValue Offset,
6906 ISD::MemIndexedMode AM) {
6907 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
6908 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
6909 // Don't propagate the invariant or dereferenceable flags.
6910 auto MMOFlags =
6911 LD->getMemOperand()->getFlags() &
6912 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
6913 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
6914 LD->getChain(), Base, Offset, LD->getPointerInfo(),
6915 LD->getMemoryVT(), LD->getAlignment(), MMOFlags,
6916 LD->getAAInfo());
6917 }
6918
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)6919 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6920 SDValue Ptr, MachinePointerInfo PtrInfo,
6921 unsigned Alignment,
6922 MachineMemOperand::Flags MMOFlags,
6923 const AAMDNodes &AAInfo) {
6924 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
6925 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6926 Alignment = getEVTAlignment(Val.getValueType());
6927
6928 MMOFlags |= MachineMemOperand::MOStore;
6929 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6930
6931 if (PtrInfo.V.isNull())
6932 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6933
6934 MachineFunction &MF = getMachineFunction();
6935 MachineMemOperand *MMO = MF.getMachineMemOperand(
6936 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo);
6937 return getStore(Chain, dl, Val, Ptr, MMO);
6938 }
6939
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)6940 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6941 SDValue Ptr, MachineMemOperand *MMO) {
6942 assert(Chain.getValueType() == MVT::Other &&
6943 "Invalid chain type");
6944 EVT VT = Val.getValueType();
6945 SDVTList VTs = getVTList(MVT::Other);
6946 SDValue Undef = getUNDEF(Ptr.getValueType());
6947 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6948 FoldingSetNodeID ID;
6949 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6950 ID.AddInteger(VT.getRawBits());
6951 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6952 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
6953 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6954 void *IP = nullptr;
6955 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6956 cast<StoreSDNode>(E)->refineAlignment(MMO);
6957 return SDValue(E, 0);
6958 }
6959 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6960 ISD::UNINDEXED, false, VT, MMO);
6961 createOperands(N, Ops);
6962
6963 CSEMap.InsertNode(N, IP);
6964 InsertNode(N);
6965 SDValue V(N, 0);
6966 NewSDValueDbgMsg(V, "Creating new node: ", this);
6967 return V;
6968 }
6969
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)6970 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6971 SDValue Ptr, MachinePointerInfo PtrInfo,
6972 EVT SVT, unsigned Alignment,
6973 MachineMemOperand::Flags MMOFlags,
6974 const AAMDNodes &AAInfo) {
6975 assert(Chain.getValueType() == MVT::Other &&
6976 "Invalid chain type");
6977 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6978 Alignment = getEVTAlignment(SVT);
6979
6980 MMOFlags |= MachineMemOperand::MOStore;
6981 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6982
6983 if (PtrInfo.V.isNull())
6984 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6985
6986 MachineFunction &MF = getMachineFunction();
6987 MachineMemOperand *MMO = MF.getMachineMemOperand(
6988 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
6989 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
6990 }
6991
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)6992 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6993 SDValue Ptr, EVT SVT,
6994 MachineMemOperand *MMO) {
6995 EVT VT = Val.getValueType();
6996
6997 assert(Chain.getValueType() == MVT::Other &&
6998 "Invalid chain type");
6999 if (VT == SVT)
7000 return getStore(Chain, dl, Val, Ptr, MMO);
7001
7002 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
7003 "Should only be a truncating store, not extending!");
7004 assert(VT.isInteger() == SVT.isInteger() &&
7005 "Can't do FP-INT conversion!");
7006 assert(VT.isVector() == SVT.isVector() &&
7007 "Cannot use trunc store to convert to or from a vector!");
7008 assert((!VT.isVector() ||
7009 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
7010 "Cannot use trunc store to change the number of vector elements!");
7011
7012 SDVTList VTs = getVTList(MVT::Other);
7013 SDValue Undef = getUNDEF(Ptr.getValueType());
7014 SDValue Ops[] = { Chain, Val, Ptr, Undef };
7015 FoldingSetNodeID ID;
7016 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7017 ID.AddInteger(SVT.getRawBits());
7018 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
7019 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
7020 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7021 void *IP = nullptr;
7022 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7023 cast<StoreSDNode>(E)->refineAlignment(MMO);
7024 return SDValue(E, 0);
7025 }
7026 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7027 ISD::UNINDEXED, true, SVT, MMO);
7028 createOperands(N, Ops);
7029
7030 CSEMap.InsertNode(N, IP);
7031 InsertNode(N);
7032 SDValue V(N, 0);
7033 NewSDValueDbgMsg(V, "Creating new node: ", this);
7034 return V;
7035 }
7036
getIndexedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7037 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
7038 SDValue Base, SDValue Offset,
7039 ISD::MemIndexedMode AM) {
7040 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
7041 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
7042 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
7043 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
7044 FoldingSetNodeID ID;
7045 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
7046 ID.AddInteger(ST->getMemoryVT().getRawBits());
7047 ID.AddInteger(ST->getRawSubclassData());
7048 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
7049 void *IP = nullptr;
7050 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
7051 return SDValue(E, 0);
7052
7053 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7054 ST->isTruncatingStore(), ST->getMemoryVT(),
7055 ST->getMemOperand());
7056 createOperands(N, Ops);
7057
7058 CSEMap.InsertNode(N, IP);
7059 InsertNode(N);
7060 SDValue V(N, 0);
7061 NewSDValueDbgMsg(V, "Creating new node: ", this);
7062 return V;
7063 }
7064
getMaskedLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Base,SDValue Offset,SDValue Mask,SDValue PassThru,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,ISD::LoadExtType ExtTy,bool isExpanding)7065 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
7066 SDValue Base, SDValue Offset, SDValue Mask,
7067 SDValue PassThru, EVT MemVT,
7068 MachineMemOperand *MMO,
7069 ISD::MemIndexedMode AM,
7070 ISD::LoadExtType ExtTy, bool isExpanding) {
7071 bool Indexed = AM != ISD::UNINDEXED;
7072 assert((Indexed || Offset.isUndef()) &&
7073 "Unindexed masked load with an offset!");
7074 SDVTList VTs = Indexed ? getVTList(VT, Base.getValueType(), MVT::Other)
7075 : getVTList(VT, MVT::Other);
7076 SDValue Ops[] = {Chain, Base, Offset, Mask, PassThru};
7077 FoldingSetNodeID ID;
7078 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
7079 ID.AddInteger(MemVT.getRawBits());
7080 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
7081 dl.getIROrder(), VTs, AM, ExtTy, isExpanding, MemVT, MMO));
7082 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7083 void *IP = nullptr;
7084 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7085 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
7086 return SDValue(E, 0);
7087 }
7088 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
7089 AM, ExtTy, isExpanding, MemVT, MMO);
7090 createOperands(N, Ops);
7091
7092 CSEMap.InsertNode(N, IP);
7093 InsertNode(N);
7094 SDValue V(N, 0);
7095 NewSDValueDbgMsg(V, "Creating new node: ", this);
7096 return V;
7097 }
7098
getIndexedMaskedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7099 SDValue SelectionDAG::getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl,
7100 SDValue Base, SDValue Offset,
7101 ISD::MemIndexedMode AM) {
7102 MaskedLoadSDNode *LD = cast<MaskedLoadSDNode>(OrigLoad);
7103 assert(LD->getOffset().isUndef() && "Masked load is already a indexed load!");
7104 return getMaskedLoad(OrigLoad.getValueType(), dl, LD->getChain(), Base,
7105 Offset, LD->getMask(), LD->getPassThru(),
7106 LD->getMemoryVT(), LD->getMemOperand(), AM,
7107 LD->getExtensionType(), LD->isExpandingLoad());
7108 }
7109
getMaskedStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Base,SDValue Offset,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,ISD::MemIndexedMode AM,bool IsTruncating,bool IsCompressing)7110 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
7111 SDValue Val, SDValue Base, SDValue Offset,
7112 SDValue Mask, EVT MemVT,
7113 MachineMemOperand *MMO,
7114 ISD::MemIndexedMode AM, bool IsTruncating,
7115 bool IsCompressing) {
7116 assert(Chain.getValueType() == MVT::Other &&
7117 "Invalid chain type");
7118 bool Indexed = AM != ISD::UNINDEXED;
7119 assert((Indexed || Offset.isUndef()) &&
7120 "Unindexed masked store with an offset!");
7121 SDVTList VTs = Indexed ? getVTList(Base.getValueType(), MVT::Other)
7122 : getVTList(MVT::Other);
7123 SDValue Ops[] = {Chain, Val, Base, Offset, Mask};
7124 FoldingSetNodeID ID;
7125 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
7126 ID.AddInteger(MemVT.getRawBits());
7127 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
7128 dl.getIROrder(), VTs, AM, IsTruncating, IsCompressing, MemVT, MMO));
7129 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7130 void *IP = nullptr;
7131 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7132 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
7133 return SDValue(E, 0);
7134 }
7135 auto *N =
7136 newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
7137 IsTruncating, IsCompressing, MemVT, MMO);
7138 createOperands(N, Ops);
7139
7140 CSEMap.InsertNode(N, IP);
7141 InsertNode(N);
7142 SDValue V(N, 0);
7143 NewSDValueDbgMsg(V, "Creating new node: ", this);
7144 return V;
7145 }
7146
getIndexedMaskedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)7147 SDValue SelectionDAG::getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
7148 SDValue Base, SDValue Offset,
7149 ISD::MemIndexedMode AM) {
7150 MaskedStoreSDNode *ST = cast<MaskedStoreSDNode>(OrigStore);
7151 assert(ST->getOffset().isUndef() &&
7152 "Masked store is already a indexed store!");
7153 return getMaskedStore(ST->getChain(), dl, ST->getValue(), Base, Offset,
7154 ST->getMask(), ST->getMemoryVT(), ST->getMemOperand(),
7155 AM, ST->isTruncatingStore(), ST->isCompressingStore());
7156 }
7157
getMaskedGather(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType)7158 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
7159 ArrayRef<SDValue> Ops,
7160 MachineMemOperand *MMO,
7161 ISD::MemIndexType IndexType) {
7162 assert(Ops.size() == 6 && "Incompatible number of operands");
7163
7164 FoldingSetNodeID ID;
7165 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
7166 ID.AddInteger(VT.getRawBits());
7167 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
7168 dl.getIROrder(), VTs, VT, MMO, IndexType));
7169 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7170 void *IP = nullptr;
7171 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7172 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
7173 return SDValue(E, 0);
7174 }
7175
7176 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7177 VTs, VT, MMO, IndexType);
7178 createOperands(N, Ops);
7179
7180 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
7181 "Incompatible type of the PassThru value in MaskedGatherSDNode");
7182 assert(N->getMask().getValueType().getVectorNumElements() ==
7183 N->getValueType(0).getVectorNumElements() &&
7184 "Vector width mismatch between mask and data");
7185 assert(N->getIndex().getValueType().getVectorNumElements() >=
7186 N->getValueType(0).getVectorNumElements() &&
7187 "Vector width mismatch between index and data");
7188 assert(isa<ConstantSDNode>(N->getScale()) &&
7189 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7190 "Scale should be a constant power of 2");
7191
7192 CSEMap.InsertNode(N, IP);
7193 InsertNode(N);
7194 SDValue V(N, 0);
7195 NewSDValueDbgMsg(V, "Creating new node: ", this);
7196 return V;
7197 }
7198
getMaskedScatter(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO,ISD::MemIndexType IndexType)7199 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
7200 ArrayRef<SDValue> Ops,
7201 MachineMemOperand *MMO,
7202 ISD::MemIndexType IndexType) {
7203 assert(Ops.size() == 6 && "Incompatible number of operands");
7204
7205 FoldingSetNodeID ID;
7206 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
7207 ID.AddInteger(VT.getRawBits());
7208 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
7209 dl.getIROrder(), VTs, VT, MMO, IndexType));
7210 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
7211 void *IP = nullptr;
7212 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
7213 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
7214 return SDValue(E, 0);
7215 }
7216 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
7217 VTs, VT, MMO, IndexType);
7218 createOperands(N, Ops);
7219
7220 assert(N->getMask().getValueType().getVectorNumElements() ==
7221 N->getValue().getValueType().getVectorNumElements() &&
7222 "Vector width mismatch between mask and data");
7223 assert(N->getIndex().getValueType().getVectorNumElements() >=
7224 N->getValue().getValueType().getVectorNumElements() &&
7225 "Vector width mismatch between index and data");
7226 assert(isa<ConstantSDNode>(N->getScale()) &&
7227 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
7228 "Scale should be a constant power of 2");
7229
7230 CSEMap.InsertNode(N, IP);
7231 InsertNode(N);
7232 SDValue V(N, 0);
7233 NewSDValueDbgMsg(V, "Creating new node: ", this);
7234 return V;
7235 }
7236
simplifySelect(SDValue Cond,SDValue T,SDValue F)7237 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
7238 // select undef, T, F --> T (if T is a constant), otherwise F
7239 // select, ?, undef, F --> F
7240 // select, ?, T, undef --> T
7241 if (Cond.isUndef())
7242 return isConstantValueOfAnyType(T) ? T : F;
7243 if (T.isUndef())
7244 return F;
7245 if (F.isUndef())
7246 return T;
7247
7248 // select true, T, F --> T
7249 // select false, T, F --> F
7250 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
7251 return CondC->isNullValue() ? F : T;
7252
7253 // TODO: This should simplify VSELECT with constant condition using something
7254 // like this (but check boolean contents to be complete?):
7255 // if (ISD::isBuildVectorAllOnes(Cond.getNode()))
7256 // return T;
7257 // if (ISD::isBuildVectorAllZeros(Cond.getNode()))
7258 // return F;
7259
7260 // select ?, T, T --> T
7261 if (T == F)
7262 return T;
7263
7264 return SDValue();
7265 }
7266
simplifyShift(SDValue X,SDValue Y)7267 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
7268 // shift undef, Y --> 0 (can always assume that the undef value is 0)
7269 if (X.isUndef())
7270 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
7271 // shift X, undef --> undef (because it may shift by the bitwidth)
7272 if (Y.isUndef())
7273 return getUNDEF(X.getValueType());
7274
7275 // shift 0, Y --> 0
7276 // shift X, 0 --> X
7277 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
7278 return X;
7279
7280 // shift X, C >= bitwidth(X) --> undef
7281 // All vector elements must be too big (or undef) to avoid partial undefs.
7282 auto isShiftTooBig = [X](ConstantSDNode *Val) {
7283 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
7284 };
7285 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
7286 return getUNDEF(X.getValueType());
7287
7288 return SDValue();
7289 }
7290
7291 // TODO: Use fast-math-flags to enable more simplifications.
simplifyFPBinop(unsigned Opcode,SDValue X,SDValue Y)7292 SDValue SelectionDAG::simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y) {
7293 ConstantFPSDNode *YC = isConstOrConstSplatFP(Y, /* AllowUndefs */ true);
7294 if (!YC)
7295 return SDValue();
7296
7297 // X + -0.0 --> X
7298 if (Opcode == ISD::FADD)
7299 if (YC->getValueAPF().isNegZero())
7300 return X;
7301
7302 // X - +0.0 --> X
7303 if (Opcode == ISD::FSUB)
7304 if (YC->getValueAPF().isPosZero())
7305 return X;
7306
7307 // X * 1.0 --> X
7308 // X / 1.0 --> X
7309 if (Opcode == ISD::FMUL || Opcode == ISD::FDIV)
7310 if (YC->getValueAPF().isExactlyValue(1.0))
7311 return X;
7312
7313 return SDValue();
7314 }
7315
getVAArg(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)7316 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
7317 SDValue Ptr, SDValue SV, unsigned Align) {
7318 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
7319 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
7320 }
7321
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDUse> Ops)7322 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7323 ArrayRef<SDUse> Ops) {
7324 switch (Ops.size()) {
7325 case 0: return getNode(Opcode, DL, VT);
7326 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
7327 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
7328 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
7329 default: break;
7330 }
7331
7332 // Copy from an SDUse array into an SDValue array for use with
7333 // the regular getNode logic.
7334 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
7335 return getNode(Opcode, DL, VT, NewOps);
7336 }
7337
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)7338 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7339 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7340 unsigned NumOps = Ops.size();
7341 switch (NumOps) {
7342 case 0: return getNode(Opcode, DL, VT);
7343 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
7344 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
7345 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
7346 default: break;
7347 }
7348
7349 switch (Opcode) {
7350 default: break;
7351 case ISD::BUILD_VECTOR:
7352 // Attempt to simplify BUILD_VECTOR.
7353 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
7354 return V;
7355 break;
7356 case ISD::CONCAT_VECTORS:
7357 if (SDValue V = foldCONCAT_VECTORS(DL, VT, Ops, *this))
7358 return V;
7359 break;
7360 case ISD::SELECT_CC:
7361 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
7362 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
7363 "LHS and RHS of condition must have same type!");
7364 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7365 "True and False arms of SelectCC must have same type!");
7366 assert(Ops[2].getValueType() == VT &&
7367 "select_cc node must be of same type as true and false value!");
7368 break;
7369 case ISD::BR_CC:
7370 assert(NumOps == 5 && "BR_CC takes 5 operands!");
7371 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7372 "LHS/RHS of comparison should match types!");
7373 break;
7374 }
7375
7376 // Memoize nodes.
7377 SDNode *N;
7378 SDVTList VTs = getVTList(VT);
7379
7380 if (VT != MVT::Glue) {
7381 FoldingSetNodeID ID;
7382 AddNodeIDNode(ID, Opcode, VTs, Ops);
7383 void *IP = nullptr;
7384
7385 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7386 return SDValue(E, 0);
7387
7388 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7389 createOperands(N, Ops);
7390
7391 CSEMap.InsertNode(N, IP);
7392 } else {
7393 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7394 createOperands(N, Ops);
7395 }
7396
7397 InsertNode(N);
7398 SDValue V(N, 0);
7399 NewSDValueDbgMsg(V, "Creating new node: ", this);
7400 return V;
7401 }
7402
getNode(unsigned Opcode,const SDLoc & DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)7403 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7404 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
7405 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
7406 }
7407
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops)7408 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7409 ArrayRef<SDValue> Ops) {
7410 if (VTList.NumVTs == 1)
7411 return getNode(Opcode, DL, VTList.VTs[0], Ops);
7412
7413 switch (Opcode) {
7414 case ISD::STRICT_FP_EXTEND:
7415 assert(VTList.NumVTs == 2 && Ops.size() == 2 &&
7416 "Invalid STRICT_FP_EXTEND!");
7417 assert(VTList.VTs[0].isFloatingPoint() &&
7418 Ops[1].getValueType().isFloatingPoint() && "Invalid FP cast!");
7419 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7420 "STRICT_FP_EXTEND result type should be vector iff the operand "
7421 "type is vector!");
7422 assert((!VTList.VTs[0].isVector() ||
7423 VTList.VTs[0].getVectorNumElements() ==
7424 Ops[1].getValueType().getVectorNumElements()) &&
7425 "Vector element count mismatch!");
7426 assert(Ops[1].getValueType().bitsLT(VTList.VTs[0]) &&
7427 "Invalid fpext node, dst <= src!");
7428 break;
7429 case ISD::STRICT_FP_ROUND:
7430 assert(VTList.NumVTs == 2 && Ops.size() == 3 && "Invalid STRICT_FP_ROUND!");
7431 assert(VTList.VTs[0].isVector() == Ops[1].getValueType().isVector() &&
7432 "STRICT_FP_ROUND result type should be vector iff the operand "
7433 "type is vector!");
7434 assert((!VTList.VTs[0].isVector() ||
7435 VTList.VTs[0].getVectorNumElements() ==
7436 Ops[1].getValueType().getVectorNumElements()) &&
7437 "Vector element count mismatch!");
7438 assert(VTList.VTs[0].isFloatingPoint() &&
7439 Ops[1].getValueType().isFloatingPoint() &&
7440 VTList.VTs[0].bitsLT(Ops[1].getValueType()) &&
7441 isa<ConstantSDNode>(Ops[2]) &&
7442 (cast<ConstantSDNode>(Ops[2])->getZExtValue() == 0 ||
7443 cast<ConstantSDNode>(Ops[2])->getZExtValue() == 1) &&
7444 "Invalid STRICT_FP_ROUND!");
7445 break;
7446 #if 0
7447 // FIXME: figure out how to safely handle things like
7448 // int foo(int x) { return 1 << (x & 255); }
7449 // int bar() { return foo(256); }
7450 case ISD::SRA_PARTS:
7451 case ISD::SRL_PARTS:
7452 case ISD::SHL_PARTS:
7453 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
7454 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
7455 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7456 else if (N3.getOpcode() == ISD::AND)
7457 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
7458 // If the and is only masking out bits that cannot effect the shift,
7459 // eliminate the and.
7460 unsigned NumBits = VT.getScalarSizeInBits()*2;
7461 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
7462 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7463 }
7464 break;
7465 #endif
7466 }
7467
7468 // Memoize the node unless it returns a flag.
7469 SDNode *N;
7470 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
7471 FoldingSetNodeID ID;
7472 AddNodeIDNode(ID, Opcode, VTList, Ops);
7473 void *IP = nullptr;
7474 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7475 return SDValue(E, 0);
7476
7477 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7478 createOperands(N, Ops);
7479 CSEMap.InsertNode(N, IP);
7480 } else {
7481 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7482 createOperands(N, Ops);
7483 }
7484 InsertNode(N);
7485 SDValue V(N, 0);
7486 NewSDValueDbgMsg(V, "Creating new node: ", this);
7487 return V;
7488 }
7489
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList)7490 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7491 SDVTList VTList) {
7492 return getNode(Opcode, DL, VTList, None);
7493 }
7494
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1)7495 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7496 SDValue N1) {
7497 SDValue Ops[] = { N1 };
7498 return getNode(Opcode, DL, VTList, Ops);
7499 }
7500
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2)7501 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7502 SDValue N1, SDValue N2) {
7503 SDValue Ops[] = { N1, N2 };
7504 return getNode(Opcode, DL, VTList, Ops);
7505 }
7506
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)7507 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7508 SDValue N1, SDValue N2, SDValue N3) {
7509 SDValue Ops[] = { N1, N2, N3 };
7510 return getNode(Opcode, DL, VTList, Ops);
7511 }
7512
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)7513 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7514 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
7515 SDValue Ops[] = { N1, N2, N3, N4 };
7516 return getNode(Opcode, DL, VTList, Ops);
7517 }
7518
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)7519 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7520 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
7521 SDValue N5) {
7522 SDValue Ops[] = { N1, N2, N3, N4, N5 };
7523 return getNode(Opcode, DL, VTList, Ops);
7524 }
7525
getVTList(EVT VT)7526 SDVTList SelectionDAG::getVTList(EVT VT) {
7527 return makeVTList(SDNode::getValueTypeList(VT), 1);
7528 }
7529
getVTList(EVT VT1,EVT VT2)7530 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
7531 FoldingSetNodeID ID;
7532 ID.AddInteger(2U);
7533 ID.AddInteger(VT1.getRawBits());
7534 ID.AddInteger(VT2.getRawBits());
7535
7536 void *IP = nullptr;
7537 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7538 if (!Result) {
7539 EVT *Array = Allocator.Allocate<EVT>(2);
7540 Array[0] = VT1;
7541 Array[1] = VT2;
7542 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
7543 VTListMap.InsertNode(Result, IP);
7544 }
7545 return Result->getSDVTList();
7546 }
7547
getVTList(EVT VT1,EVT VT2,EVT VT3)7548 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
7549 FoldingSetNodeID ID;
7550 ID.AddInteger(3U);
7551 ID.AddInteger(VT1.getRawBits());
7552 ID.AddInteger(VT2.getRawBits());
7553 ID.AddInteger(VT3.getRawBits());
7554
7555 void *IP = nullptr;
7556 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7557 if (!Result) {
7558 EVT *Array = Allocator.Allocate<EVT>(3);
7559 Array[0] = VT1;
7560 Array[1] = VT2;
7561 Array[2] = VT3;
7562 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
7563 VTListMap.InsertNode(Result, IP);
7564 }
7565 return Result->getSDVTList();
7566 }
7567
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)7568 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
7569 FoldingSetNodeID ID;
7570 ID.AddInteger(4U);
7571 ID.AddInteger(VT1.getRawBits());
7572 ID.AddInteger(VT2.getRawBits());
7573 ID.AddInteger(VT3.getRawBits());
7574 ID.AddInteger(VT4.getRawBits());
7575
7576 void *IP = nullptr;
7577 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7578 if (!Result) {
7579 EVT *Array = Allocator.Allocate<EVT>(4);
7580 Array[0] = VT1;
7581 Array[1] = VT2;
7582 Array[2] = VT3;
7583 Array[3] = VT4;
7584 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
7585 VTListMap.InsertNode(Result, IP);
7586 }
7587 return Result->getSDVTList();
7588 }
7589
getVTList(ArrayRef<EVT> VTs)7590 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
7591 unsigned NumVTs = VTs.size();
7592 FoldingSetNodeID ID;
7593 ID.AddInteger(NumVTs);
7594 for (unsigned index = 0; index < NumVTs; index++) {
7595 ID.AddInteger(VTs[index].getRawBits());
7596 }
7597
7598 void *IP = nullptr;
7599 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7600 if (!Result) {
7601 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
7602 llvm::copy(VTs, Array);
7603 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
7604 VTListMap.InsertNode(Result, IP);
7605 }
7606 return Result->getSDVTList();
7607 }
7608
7609
7610 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
7611 /// specified operands. If the resultant node already exists in the DAG,
7612 /// this does not modify the specified node, instead it returns the node that
7613 /// already exists. If the resultant node does not exist in the DAG, the
7614 /// input node is returned. As a degenerate case, if you specify the same
7615 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)7616 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
7617 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
7618
7619 // Check to see if there is no change.
7620 if (Op == N->getOperand(0)) return N;
7621
7622 // See if the modified node already exists.
7623 void *InsertPos = nullptr;
7624 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
7625 return Existing;
7626
7627 // Nope it doesn't. Remove the node from its current place in the maps.
7628 if (InsertPos)
7629 if (!RemoveNodeFromCSEMaps(N))
7630 InsertPos = nullptr;
7631
7632 // Now we update the operands.
7633 N->OperandList[0].set(Op);
7634
7635 updateDivergence(N);
7636 // If this gets put into a CSE map, add it.
7637 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7638 return N;
7639 }
7640
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)7641 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
7642 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
7643
7644 // Check to see if there is no change.
7645 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
7646 return N; // No operands changed, just return the input node.
7647
7648 // See if the modified node already exists.
7649 void *InsertPos = nullptr;
7650 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
7651 return Existing;
7652
7653 // Nope it doesn't. Remove the node from its current place in the maps.
7654 if (InsertPos)
7655 if (!RemoveNodeFromCSEMaps(N))
7656 InsertPos = nullptr;
7657
7658 // Now we update the operands.
7659 if (N->OperandList[0] != Op1)
7660 N->OperandList[0].set(Op1);
7661 if (N->OperandList[1] != Op2)
7662 N->OperandList[1].set(Op2);
7663
7664 updateDivergence(N);
7665 // If this gets put into a CSE map, add it.
7666 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7667 return N;
7668 }
7669
7670 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)7671 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
7672 SDValue Ops[] = { Op1, Op2, Op3 };
7673 return UpdateNodeOperands(N, Ops);
7674 }
7675
7676 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)7677 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7678 SDValue Op3, SDValue Op4) {
7679 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
7680 return UpdateNodeOperands(N, Ops);
7681 }
7682
7683 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)7684 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7685 SDValue Op3, SDValue Op4, SDValue Op5) {
7686 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
7687 return UpdateNodeOperands(N, Ops);
7688 }
7689
7690 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)7691 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
7692 unsigned NumOps = Ops.size();
7693 assert(N->getNumOperands() == NumOps &&
7694 "Update with wrong number of operands");
7695
7696 // If no operands changed just return the input node.
7697 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
7698 return N;
7699
7700 // See if the modified node already exists.
7701 void *InsertPos = nullptr;
7702 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
7703 return Existing;
7704
7705 // Nope it doesn't. Remove the node from its current place in the maps.
7706 if (InsertPos)
7707 if (!RemoveNodeFromCSEMaps(N))
7708 InsertPos = nullptr;
7709
7710 // Now we update the operands.
7711 for (unsigned i = 0; i != NumOps; ++i)
7712 if (N->OperandList[i] != Ops[i])
7713 N->OperandList[i].set(Ops[i]);
7714
7715 updateDivergence(N);
7716 // If this gets put into a CSE map, add it.
7717 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7718 return N;
7719 }
7720
7721 /// DropOperands - Release the operands and set this node to have
7722 /// zero operands.
DropOperands()7723 void SDNode::DropOperands() {
7724 // Unlike the code in MorphNodeTo that does this, we don't need to
7725 // watch for dead nodes here.
7726 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
7727 SDUse &Use = *I++;
7728 Use.set(SDValue());
7729 }
7730 }
7731
setNodeMemRefs(MachineSDNode * N,ArrayRef<MachineMemOperand * > NewMemRefs)7732 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
7733 ArrayRef<MachineMemOperand *> NewMemRefs) {
7734 if (NewMemRefs.empty()) {
7735 N->clearMemRefs();
7736 return;
7737 }
7738
7739 // Check if we can avoid allocating by storing a single reference directly.
7740 if (NewMemRefs.size() == 1) {
7741 N->MemRefs = NewMemRefs[0];
7742 N->NumMemRefs = 1;
7743 return;
7744 }
7745
7746 MachineMemOperand **MemRefsBuffer =
7747 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
7748 llvm::copy(NewMemRefs, MemRefsBuffer);
7749 N->MemRefs = MemRefsBuffer;
7750 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
7751 }
7752
7753 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
7754 /// machine opcode.
7755 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)7756 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7757 EVT VT) {
7758 SDVTList VTs = getVTList(VT);
7759 return SelectNodeTo(N, MachineOpc, VTs, None);
7760 }
7761
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)7762 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7763 EVT VT, SDValue Op1) {
7764 SDVTList VTs = getVTList(VT);
7765 SDValue Ops[] = { Op1 };
7766 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7767 }
7768
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)7769 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7770 EVT VT, SDValue Op1,
7771 SDValue Op2) {
7772 SDVTList VTs = getVTList(VT);
7773 SDValue Ops[] = { Op1, Op2 };
7774 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7775 }
7776
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)7777 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7778 EVT VT, SDValue Op1,
7779 SDValue Op2, SDValue Op3) {
7780 SDVTList VTs = getVTList(VT);
7781 SDValue Ops[] = { Op1, Op2, Op3 };
7782 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7783 }
7784
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)7785 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7786 EVT VT, ArrayRef<SDValue> Ops) {
7787 SDVTList VTs = getVTList(VT);
7788 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7789 }
7790
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)7791 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7792 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
7793 SDVTList VTs = getVTList(VT1, VT2);
7794 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7795 }
7796
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)7797 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7798 EVT VT1, EVT VT2) {
7799 SDVTList VTs = getVTList(VT1, VT2);
7800 return SelectNodeTo(N, MachineOpc, VTs, None);
7801 }
7802
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)7803 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7804 EVT VT1, EVT VT2, EVT VT3,
7805 ArrayRef<SDValue> Ops) {
7806 SDVTList VTs = getVTList(VT1, VT2, VT3);
7807 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7808 }
7809
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)7810 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7811 EVT VT1, EVT VT2,
7812 SDValue Op1, SDValue Op2) {
7813 SDVTList VTs = getVTList(VT1, VT2);
7814 SDValue Ops[] = { Op1, Op2 };
7815 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7816 }
7817
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)7818 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7819 SDVTList VTs,ArrayRef<SDValue> Ops) {
7820 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
7821 // Reset the NodeID to -1.
7822 New->setNodeId(-1);
7823 if (New != N) {
7824 ReplaceAllUsesWith(N, New);
7825 RemoveDeadNode(N);
7826 }
7827 return New;
7828 }
7829
7830 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
7831 /// the line number information on the merged node since it is not possible to
7832 /// preserve the information that operation is associated with multiple lines.
7833 /// This will make the debugger working better at -O0, were there is a higher
7834 /// probability having other instructions associated with that line.
7835 ///
7836 /// For IROrder, we keep the smaller of the two
UpdateSDLocOnMergeSDNode(SDNode * N,const SDLoc & OLoc)7837 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
7838 DebugLoc NLoc = N->getDebugLoc();
7839 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
7840 N->setDebugLoc(DebugLoc());
7841 }
7842 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
7843 N->setIROrder(Order);
7844 return N;
7845 }
7846
7847 /// MorphNodeTo - This *mutates* the specified node to have the specified
7848 /// return type, opcode, and operands.
7849 ///
7850 /// Note that MorphNodeTo returns the resultant node. If there is already a
7851 /// node of the specified opcode and operands, it returns that node instead of
7852 /// the current one. Note that the SDLoc need not be the same.
7853 ///
7854 /// Using MorphNodeTo is faster than creating a new node and swapping it in
7855 /// with ReplaceAllUsesWith both because it often avoids allocating a new
7856 /// node, and because it doesn't require CSE recalculation for any of
7857 /// the node's users.
7858 ///
7859 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
7860 /// As a consequence it isn't appropriate to use from within the DAG combiner or
7861 /// the legalizer which maintain worklists that would need to be updated when
7862 /// deleting things.
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)7863 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
7864 SDVTList VTs, ArrayRef<SDValue> Ops) {
7865 // If an identical node already exists, use it.
7866 void *IP = nullptr;
7867 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
7868 FoldingSetNodeID ID;
7869 AddNodeIDNode(ID, Opc, VTs, Ops);
7870 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
7871 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
7872 }
7873
7874 if (!RemoveNodeFromCSEMaps(N))
7875 IP = nullptr;
7876
7877 // Start the morphing.
7878 N->NodeType = Opc;
7879 N->ValueList = VTs.VTs;
7880 N->NumValues = VTs.NumVTs;
7881
7882 // Clear the operands list, updating used nodes to remove this from their
7883 // use list. Keep track of any operands that become dead as a result.
7884 SmallPtrSet<SDNode*, 16> DeadNodeSet;
7885 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
7886 SDUse &Use = *I++;
7887 SDNode *Used = Use.getNode();
7888 Use.set(SDValue());
7889 if (Used->use_empty())
7890 DeadNodeSet.insert(Used);
7891 }
7892
7893 // For MachineNode, initialize the memory references information.
7894 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
7895 MN->clearMemRefs();
7896
7897 // Swap for an appropriately sized array from the recycler.
7898 removeOperands(N);
7899 createOperands(N, Ops);
7900
7901 // Delete any nodes that are still dead after adding the uses for the
7902 // new operands.
7903 if (!DeadNodeSet.empty()) {
7904 SmallVector<SDNode *, 16> DeadNodes;
7905 for (SDNode *N : DeadNodeSet)
7906 if (N->use_empty())
7907 DeadNodes.push_back(N);
7908 RemoveDeadNodes(DeadNodes);
7909 }
7910
7911 if (IP)
7912 CSEMap.InsertNode(N, IP); // Memoize the new node.
7913 return N;
7914 }
7915
mutateStrictFPToFP(SDNode * Node)7916 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
7917 unsigned OrigOpc = Node->getOpcode();
7918 unsigned NewOpc;
7919 switch (OrigOpc) {
7920 default:
7921 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
7922 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7923 case ISD::STRICT_##DAGN: NewOpc = ISD::DAGN; break;
7924 #define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7925 case ISD::STRICT_##DAGN: NewOpc = ISD::SETCC; break;
7926 #include "llvm/IR/ConstrainedOps.def"
7927 }
7928
7929 assert(Node->getNumValues() == 2 && "Unexpected number of results!");
7930
7931 // We're taking this node out of the chain, so we need to re-link things.
7932 SDValue InputChain = Node->getOperand(0);
7933 SDValue OutputChain = SDValue(Node, 1);
7934 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
7935
7936 SmallVector<SDValue, 3> Ops;
7937 for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
7938 Ops.push_back(Node->getOperand(i));
7939
7940 SDVTList VTs = getVTList(Node->getValueType(0));
7941 SDNode *Res = MorphNodeTo(Node, NewOpc, VTs, Ops);
7942
7943 // MorphNodeTo can operate in two ways: if an existing node with the
7944 // specified operands exists, it can just return it. Otherwise, it
7945 // updates the node in place to have the requested operands.
7946 if (Res == Node) {
7947 // If we updated the node in place, reset the node ID. To the isel,
7948 // this should be just like a newly allocated machine node.
7949 Res->setNodeId(-1);
7950 } else {
7951 ReplaceAllUsesWith(Node, Res);
7952 RemoveDeadNode(Node);
7953 }
7954
7955 return Res;
7956 }
7957
7958 /// getMachineNode - These are used for target selectors to create a new node
7959 /// with specified return type(s), MachineInstr opcode, and operands.
7960 ///
7961 /// Note that getMachineNode returns the resultant node. If there is already a
7962 /// node of the specified opcode and operands, it returns that node instead of
7963 /// the current one.
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT)7964 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7965 EVT VT) {
7966 SDVTList VTs = getVTList(VT);
7967 return getMachineNode(Opcode, dl, VTs, None);
7968 }
7969
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1)7970 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7971 EVT VT, SDValue Op1) {
7972 SDVTList VTs = getVTList(VT);
7973 SDValue Ops[] = { Op1 };
7974 return getMachineNode(Opcode, dl, VTs, Ops);
7975 }
7976
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2)7977 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7978 EVT VT, SDValue Op1, SDValue Op2) {
7979 SDVTList VTs = getVTList(VT);
7980 SDValue Ops[] = { Op1, Op2 };
7981 return getMachineNode(Opcode, dl, VTs, Ops);
7982 }
7983
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)7984 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7985 EVT VT, SDValue Op1, SDValue Op2,
7986 SDValue Op3) {
7987 SDVTList VTs = getVTList(VT);
7988 SDValue Ops[] = { Op1, Op2, Op3 };
7989 return getMachineNode(Opcode, dl, VTs, Ops);
7990 }
7991
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,ArrayRef<SDValue> Ops)7992 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7993 EVT VT, ArrayRef<SDValue> Ops) {
7994 SDVTList VTs = getVTList(VT);
7995 return getMachineNode(Opcode, dl, VTs, Ops);
7996 }
7997
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)7998 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7999 EVT VT1, EVT VT2, SDValue Op1,
8000 SDValue Op2) {
8001 SDVTList VTs = getVTList(VT1, VT2);
8002 SDValue Ops[] = { Op1, Op2 };
8003 return getMachineNode(Opcode, dl, VTs, Ops);
8004 }
8005
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)8006 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8007 EVT VT1, EVT VT2, SDValue Op1,
8008 SDValue Op2, SDValue Op3) {
8009 SDVTList VTs = getVTList(VT1, VT2);
8010 SDValue Ops[] = { Op1, Op2, Op3 };
8011 return getMachineNode(Opcode, dl, VTs, Ops);
8012 }
8013
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)8014 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8015 EVT VT1, EVT VT2,
8016 ArrayRef<SDValue> Ops) {
8017 SDVTList VTs = getVTList(VT1, VT2);
8018 return getMachineNode(Opcode, dl, VTs, Ops);
8019 }
8020
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)8021 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8022 EVT VT1, EVT VT2, EVT VT3,
8023 SDValue Op1, SDValue Op2) {
8024 SDVTList VTs = getVTList(VT1, VT2, VT3);
8025 SDValue Ops[] = { Op1, Op2 };
8026 return getMachineNode(Opcode, dl, VTs, Ops);
8027 }
8028
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)8029 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8030 EVT VT1, EVT VT2, EVT VT3,
8031 SDValue Op1, SDValue Op2,
8032 SDValue Op3) {
8033 SDVTList VTs = getVTList(VT1, VT2, VT3);
8034 SDValue Ops[] = { Op1, Op2, Op3 };
8035 return getMachineNode(Opcode, dl, VTs, Ops);
8036 }
8037
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)8038 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8039 EVT VT1, EVT VT2, EVT VT3,
8040 ArrayRef<SDValue> Ops) {
8041 SDVTList VTs = getVTList(VT1, VT2, VT3);
8042 return getMachineNode(Opcode, dl, VTs, Ops);
8043 }
8044
getMachineNode(unsigned Opcode,const SDLoc & dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)8045 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
8046 ArrayRef<EVT> ResultTys,
8047 ArrayRef<SDValue> Ops) {
8048 SDVTList VTs = getVTList(ResultTys);
8049 return getMachineNode(Opcode, dl, VTs, Ops);
8050 }
8051
getMachineNode(unsigned Opcode,const SDLoc & DL,SDVTList VTs,ArrayRef<SDValue> Ops)8052 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
8053 SDVTList VTs,
8054 ArrayRef<SDValue> Ops) {
8055 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
8056 MachineSDNode *N;
8057 void *IP = nullptr;
8058
8059 if (DoCSE) {
8060 FoldingSetNodeID ID;
8061 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
8062 IP = nullptr;
8063 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
8064 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
8065 }
8066 }
8067
8068 // Allocate a new MachineSDNode.
8069 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
8070 createOperands(N, Ops);
8071
8072 if (DoCSE)
8073 CSEMap.InsertNode(N, IP);
8074
8075 InsertNode(N);
8076 NewSDValueDbgMsg(SDValue(N, 0), "Creating new machine node: ", this);
8077 return N;
8078 }
8079
8080 /// getTargetExtractSubreg - A convenience function for creating
8081 /// TargetOpcode::EXTRACT_SUBREG nodes.
getTargetExtractSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand)8082 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8083 SDValue Operand) {
8084 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8085 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
8086 VT, Operand, SRIdxVal);
8087 return SDValue(Subreg, 0);
8088 }
8089
8090 /// getTargetInsertSubreg - A convenience function for creating
8091 /// TargetOpcode::INSERT_SUBREG nodes.
getTargetInsertSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand,SDValue Subreg)8092 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
8093 SDValue Operand, SDValue Subreg) {
8094 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
8095 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
8096 VT, Operand, Subreg, SRIdxVal);
8097 return SDValue(Result, 0);
8098 }
8099
8100 /// getNodeIfExists - Get the specified node if it's already available, or
8101 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)8102 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
8103 ArrayRef<SDValue> Ops,
8104 const SDNodeFlags Flags) {
8105 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
8106 FoldingSetNodeID ID;
8107 AddNodeIDNode(ID, Opcode, VTList, Ops);
8108 void *IP = nullptr;
8109 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
8110 E->intersectFlagsWith(Flags);
8111 return E;
8112 }
8113 }
8114 return nullptr;
8115 }
8116
8117 /// getDbgValue - Creates a SDDbgValue node.
8118 ///
8119 /// SDNode
getDbgValue(DIVariable * Var,DIExpression * Expr,SDNode * N,unsigned R,bool IsIndirect,const DebugLoc & DL,unsigned O)8120 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
8121 SDNode *N, unsigned R, bool IsIndirect,
8122 const DebugLoc &DL, unsigned O) {
8123 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8124 "Expected inlined-at fields to agree");
8125 return new (DbgInfo->getAlloc())
8126 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O);
8127 }
8128
8129 /// Constant
getConstantDbgValue(DIVariable * Var,DIExpression * Expr,const Value * C,const DebugLoc & DL,unsigned O)8130 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
8131 DIExpression *Expr,
8132 const Value *C,
8133 const DebugLoc &DL, unsigned O) {
8134 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8135 "Expected inlined-at fields to agree");
8136 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O);
8137 }
8138
8139 /// FrameIndex
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,bool IsIndirect,const DebugLoc & DL,unsigned O)8140 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
8141 DIExpression *Expr, unsigned FI,
8142 bool IsIndirect,
8143 const DebugLoc &DL,
8144 unsigned O) {
8145 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8146 "Expected inlined-at fields to agree");
8147 return new (DbgInfo->getAlloc())
8148 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX);
8149 }
8150
8151 /// VReg
getVRegDbgValue(DIVariable * Var,DIExpression * Expr,unsigned VReg,bool IsIndirect,const DebugLoc & DL,unsigned O)8152 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var,
8153 DIExpression *Expr,
8154 unsigned VReg, bool IsIndirect,
8155 const DebugLoc &DL, unsigned O) {
8156 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
8157 "Expected inlined-at fields to agree");
8158 return new (DbgInfo->getAlloc())
8159 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG);
8160 }
8161
transferDbgValues(SDValue From,SDValue To,unsigned OffsetInBits,unsigned SizeInBits,bool InvalidateDbg)8162 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
8163 unsigned OffsetInBits, unsigned SizeInBits,
8164 bool InvalidateDbg) {
8165 SDNode *FromNode = From.getNode();
8166 SDNode *ToNode = To.getNode();
8167 assert(FromNode && ToNode && "Can't modify dbg values");
8168
8169 // PR35338
8170 // TODO: assert(From != To && "Redundant dbg value transfer");
8171 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
8172 if (From == To || FromNode == ToNode)
8173 return;
8174
8175 if (!FromNode->getHasDebugValue())
8176 return;
8177
8178 SmallVector<SDDbgValue *, 2> ClonedDVs;
8179 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
8180 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated())
8181 continue;
8182
8183 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
8184
8185 // Just transfer the dbg value attached to From.
8186 if (Dbg->getResNo() != From.getResNo())
8187 continue;
8188
8189 DIVariable *Var = Dbg->getVariable();
8190 auto *Expr = Dbg->getExpression();
8191 // If a fragment is requested, update the expression.
8192 if (SizeInBits) {
8193 // When splitting a larger (e.g., sign-extended) value whose
8194 // lower bits are described with an SDDbgValue, do not attempt
8195 // to transfer the SDDbgValue to the upper bits.
8196 if (auto FI = Expr->getFragmentInfo())
8197 if (OffsetInBits + SizeInBits > FI->SizeInBits)
8198 continue;
8199 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
8200 SizeInBits);
8201 if (!Fragment)
8202 continue;
8203 Expr = *Fragment;
8204 }
8205 // Clone the SDDbgValue and move it to To.
8206 SDDbgValue *Clone = getDbgValue(
8207 Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(), Dbg->getDebugLoc(),
8208 std::max(ToNode->getIROrder(), Dbg->getOrder()));
8209 ClonedDVs.push_back(Clone);
8210
8211 if (InvalidateDbg) {
8212 // Invalidate value and indicate the SDDbgValue should not be emitted.
8213 Dbg->setIsInvalidated();
8214 Dbg->setIsEmitted();
8215 }
8216 }
8217
8218 for (SDDbgValue *Dbg : ClonedDVs)
8219 AddDbgValue(Dbg, ToNode, false);
8220 }
8221
salvageDebugInfo(SDNode & N)8222 void SelectionDAG::salvageDebugInfo(SDNode &N) {
8223 if (!N.getHasDebugValue())
8224 return;
8225
8226 SmallVector<SDDbgValue *, 2> ClonedDVs;
8227 for (auto DV : GetDbgValues(&N)) {
8228 if (DV->isInvalidated())
8229 continue;
8230 switch (N.getOpcode()) {
8231 default:
8232 break;
8233 case ISD::ADD:
8234 SDValue N0 = N.getOperand(0);
8235 SDValue N1 = N.getOperand(1);
8236 if (!isConstantIntBuildVectorOrConstantInt(N0) &&
8237 isConstantIntBuildVectorOrConstantInt(N1)) {
8238 uint64_t Offset = N.getConstantOperandVal(1);
8239 // Rewrite an ADD constant node into a DIExpression. Since we are
8240 // performing arithmetic to compute the variable's *value* in the
8241 // DIExpression, we need to mark the expression with a
8242 // DW_OP_stack_value.
8243 auto *DIExpr = DV->getExpression();
8244 DIExpr =
8245 DIExpression::prepend(DIExpr, DIExpression::StackValue, Offset);
8246 SDDbgValue *Clone =
8247 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(),
8248 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder());
8249 ClonedDVs.push_back(Clone);
8250 DV->setIsInvalidated();
8251 DV->setIsEmitted();
8252 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
8253 N0.getNode()->dumprFull(this);
8254 dbgs() << " into " << *DIExpr << '\n');
8255 }
8256 }
8257 }
8258
8259 for (SDDbgValue *Dbg : ClonedDVs)
8260 AddDbgValue(Dbg, Dbg->getSDNode(), false);
8261 }
8262
8263 /// Creates a SDDbgLabel node.
getDbgLabel(DILabel * Label,const DebugLoc & DL,unsigned O)8264 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
8265 const DebugLoc &DL, unsigned O) {
8266 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
8267 "Expected inlined-at fields to agree");
8268 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
8269 }
8270
8271 namespace {
8272
8273 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
8274 /// pointed to by a use iterator is deleted, increment the use iterator
8275 /// so that it doesn't dangle.
8276 ///
8277 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
8278 SDNode::use_iterator &UI;
8279 SDNode::use_iterator &UE;
8280
NodeDeleted(SDNode * N,SDNode * E)8281 void NodeDeleted(SDNode *N, SDNode *E) override {
8282 // Increment the iterator as needed.
8283 while (UI != UE && N == *UI)
8284 ++UI;
8285 }
8286
8287 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)8288 RAUWUpdateListener(SelectionDAG &d,
8289 SDNode::use_iterator &ui,
8290 SDNode::use_iterator &ue)
8291 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
8292 };
8293
8294 } // end anonymous namespace
8295
8296 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8297 /// This can cause recursive merging of nodes in the DAG.
8298 ///
8299 /// This version assumes From has a single result value.
8300 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)8301 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
8302 SDNode *From = FromN.getNode();
8303 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
8304 "Cannot replace with this method!");
8305 assert(From != To.getNode() && "Cannot replace uses of with self");
8306
8307 // Preserve Debug Values
8308 transferDbgValues(FromN, To);
8309
8310 // Iterate over all the existing uses of From. New uses will be added
8311 // to the beginning of the use list, which we avoid visiting.
8312 // This specifically avoids visiting uses of From that arise while the
8313 // replacement is happening, because any such uses would be the result
8314 // of CSE: If an existing node looks like From after one of its operands
8315 // is replaced by To, we don't want to replace of all its users with To
8316 // too. See PR3018 for more info.
8317 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8318 RAUWUpdateListener Listener(*this, UI, UE);
8319 while (UI != UE) {
8320 SDNode *User = *UI;
8321
8322 // This node is about to morph, remove its old self from the CSE maps.
8323 RemoveNodeFromCSEMaps(User);
8324
8325 // A user can appear in a use list multiple times, and when this
8326 // happens the uses are usually next to each other in the list.
8327 // To help reduce the number of CSE recomputations, process all
8328 // the uses of this user that we can find this way.
8329 do {
8330 SDUse &Use = UI.getUse();
8331 ++UI;
8332 Use.set(To);
8333 if (To->isDivergent() != From->isDivergent())
8334 updateDivergence(User);
8335 } while (UI != UE && *UI == User);
8336 // Now that we have modified User, add it back to the CSE maps. If it
8337 // already exists there, recursively merge the results together.
8338 AddModifiedNodeToCSEMaps(User);
8339 }
8340
8341 // If we just RAUW'd the root, take note.
8342 if (FromN == getRoot())
8343 setRoot(To);
8344 }
8345
8346 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8347 /// This can cause recursive merging of nodes in the DAG.
8348 ///
8349 /// This version assumes that for each value of From, there is a
8350 /// corresponding value in To in the same position with the same type.
8351 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)8352 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
8353 #ifndef NDEBUG
8354 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8355 assert((!From->hasAnyUseOfValue(i) ||
8356 From->getValueType(i) == To->getValueType(i)) &&
8357 "Cannot use this version of ReplaceAllUsesWith!");
8358 #endif
8359
8360 // Handle the trivial case.
8361 if (From == To)
8362 return;
8363
8364 // Preserve Debug Info. Only do this if there's a use.
8365 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8366 if (From->hasAnyUseOfValue(i)) {
8367 assert((i < To->getNumValues()) && "Invalid To location");
8368 transferDbgValues(SDValue(From, i), SDValue(To, i));
8369 }
8370
8371 // Iterate over just the existing users of From. See the comments in
8372 // the ReplaceAllUsesWith above.
8373 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8374 RAUWUpdateListener Listener(*this, UI, UE);
8375 while (UI != UE) {
8376 SDNode *User = *UI;
8377
8378 // This node is about to morph, remove its old self from the CSE maps.
8379 RemoveNodeFromCSEMaps(User);
8380
8381 // A user can appear in a use list multiple times, and when this
8382 // happens the uses are usually next to each other in the list.
8383 // To help reduce the number of CSE recomputations, process all
8384 // the uses of this user that we can find this way.
8385 do {
8386 SDUse &Use = UI.getUse();
8387 ++UI;
8388 Use.setNode(To);
8389 if (To->isDivergent() != From->isDivergent())
8390 updateDivergence(User);
8391 } while (UI != UE && *UI == User);
8392
8393 // Now that we have modified User, add it back to the CSE maps. If it
8394 // already exists there, recursively merge the results together.
8395 AddModifiedNodeToCSEMaps(User);
8396 }
8397
8398 // If we just RAUW'd the root, take note.
8399 if (From == getRoot().getNode())
8400 setRoot(SDValue(To, getRoot().getResNo()));
8401 }
8402
8403 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8404 /// This can cause recursive merging of nodes in the DAG.
8405 ///
8406 /// This version can replace From with any result values. To must match the
8407 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)8408 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
8409 if (From->getNumValues() == 1) // Handle the simple case efficiently.
8410 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
8411
8412 // Preserve Debug Info.
8413 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8414 transferDbgValues(SDValue(From, i), To[i]);
8415
8416 // Iterate over just the existing users of From. See the comments in
8417 // the ReplaceAllUsesWith above.
8418 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8419 RAUWUpdateListener Listener(*this, UI, UE);
8420 while (UI != UE) {
8421 SDNode *User = *UI;
8422
8423 // This node is about to morph, remove its old self from the CSE maps.
8424 RemoveNodeFromCSEMaps(User);
8425
8426 // A user can appear in a use list multiple times, and when this happens the
8427 // uses are usually next to each other in the list. To help reduce the
8428 // number of CSE and divergence recomputations, process all the uses of this
8429 // user that we can find this way.
8430 bool To_IsDivergent = false;
8431 do {
8432 SDUse &Use = UI.getUse();
8433 const SDValue &ToOp = To[Use.getResNo()];
8434 ++UI;
8435 Use.set(ToOp);
8436 To_IsDivergent |= ToOp->isDivergent();
8437 } while (UI != UE && *UI == User);
8438
8439 if (To_IsDivergent != From->isDivergent())
8440 updateDivergence(User);
8441
8442 // Now that we have modified User, add it back to the CSE maps. If it
8443 // already exists there, recursively merge the results together.
8444 AddModifiedNodeToCSEMaps(User);
8445 }
8446
8447 // If we just RAUW'd the root, take note.
8448 if (From == getRoot().getNode())
8449 setRoot(SDValue(To[getRoot().getResNo()]));
8450 }
8451
8452 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
8453 /// uses of other values produced by From.getNode() alone. The Deleted
8454 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)8455 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
8456 // Handle the really simple, really trivial case efficiently.
8457 if (From == To) return;
8458
8459 // Handle the simple, trivial, case efficiently.
8460 if (From.getNode()->getNumValues() == 1) {
8461 ReplaceAllUsesWith(From, To);
8462 return;
8463 }
8464
8465 // Preserve Debug Info.
8466 transferDbgValues(From, To);
8467
8468 // Iterate over just the existing users of From. See the comments in
8469 // the ReplaceAllUsesWith above.
8470 SDNode::use_iterator UI = From.getNode()->use_begin(),
8471 UE = From.getNode()->use_end();
8472 RAUWUpdateListener Listener(*this, UI, UE);
8473 while (UI != UE) {
8474 SDNode *User = *UI;
8475 bool UserRemovedFromCSEMaps = false;
8476
8477 // A user can appear in a use list multiple times, and when this
8478 // happens the uses are usually next to each other in the list.
8479 // To help reduce the number of CSE recomputations, process all
8480 // the uses of this user that we can find this way.
8481 do {
8482 SDUse &Use = UI.getUse();
8483
8484 // Skip uses of different values from the same node.
8485 if (Use.getResNo() != From.getResNo()) {
8486 ++UI;
8487 continue;
8488 }
8489
8490 // If this node hasn't been modified yet, it's still in the CSE maps,
8491 // so remove its old self from the CSE maps.
8492 if (!UserRemovedFromCSEMaps) {
8493 RemoveNodeFromCSEMaps(User);
8494 UserRemovedFromCSEMaps = true;
8495 }
8496
8497 ++UI;
8498 Use.set(To);
8499 if (To->isDivergent() != From->isDivergent())
8500 updateDivergence(User);
8501 } while (UI != UE && *UI == User);
8502 // We are iterating over all uses of the From node, so if a use
8503 // doesn't use the specific value, no changes are made.
8504 if (!UserRemovedFromCSEMaps)
8505 continue;
8506
8507 // Now that we have modified User, add it back to the CSE maps. If it
8508 // already exists there, recursively merge the results together.
8509 AddModifiedNodeToCSEMaps(User);
8510 }
8511
8512 // If we just RAUW'd the root, take note.
8513 if (From == getRoot())
8514 setRoot(To);
8515 }
8516
8517 namespace {
8518
8519 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
8520 /// to record information about a use.
8521 struct UseMemo {
8522 SDNode *User;
8523 unsigned Index;
8524 SDUse *Use;
8525 };
8526
8527 /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)8528 bool operator<(const UseMemo &L, const UseMemo &R) {
8529 return (intptr_t)L.User < (intptr_t)R.User;
8530 }
8531
8532 } // end anonymous namespace
8533
updateDivergence(SDNode * N)8534 void SelectionDAG::updateDivergence(SDNode * N)
8535 {
8536 if (TLI->isSDNodeAlwaysUniform(N))
8537 return;
8538 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
8539 for (auto &Op : N->ops()) {
8540 if (Op.Val.getValueType() != MVT::Other)
8541 IsDivergent |= Op.getNode()->isDivergent();
8542 }
8543 if (N->SDNodeBits.IsDivergent != IsDivergent) {
8544 N->SDNodeBits.IsDivergent = IsDivergent;
8545 for (auto U : N->uses()) {
8546 updateDivergence(U);
8547 }
8548 }
8549 }
8550
CreateTopologicalOrder(std::vector<SDNode * > & Order)8551 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
8552 DenseMap<SDNode *, unsigned> Degree;
8553 Order.reserve(AllNodes.size());
8554 for (auto &N : allnodes()) {
8555 unsigned NOps = N.getNumOperands();
8556 Degree[&N] = NOps;
8557 if (0 == NOps)
8558 Order.push_back(&N);
8559 }
8560 for (size_t I = 0; I != Order.size(); ++I) {
8561 SDNode *N = Order[I];
8562 for (auto U : N->uses()) {
8563 unsigned &UnsortedOps = Degree[U];
8564 if (0 == --UnsortedOps)
8565 Order.push_back(U);
8566 }
8567 }
8568 }
8569
8570 #ifndef NDEBUG
VerifyDAGDiverence()8571 void SelectionDAG::VerifyDAGDiverence() {
8572 std::vector<SDNode *> TopoOrder;
8573 CreateTopologicalOrder(TopoOrder);
8574 const TargetLowering &TLI = getTargetLoweringInfo();
8575 DenseMap<const SDNode *, bool> DivergenceMap;
8576 for (auto &N : allnodes()) {
8577 DivergenceMap[&N] = false;
8578 }
8579 for (auto N : TopoOrder) {
8580 bool IsDivergent = DivergenceMap[N];
8581 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA);
8582 for (auto &Op : N->ops()) {
8583 if (Op.Val.getValueType() != MVT::Other)
8584 IsSDNodeDivergent |= DivergenceMap[Op.getNode()];
8585 }
8586 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) {
8587 DivergenceMap[N] = true;
8588 }
8589 }
8590 for (auto &N : allnodes()) {
8591 (void)N;
8592 assert(DivergenceMap[&N] == N.isDivergent() &&
8593 "Divergence bit inconsistency detected\n");
8594 }
8595 }
8596 #endif
8597
8598 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
8599 /// uses of other values produced by From.getNode() alone. The same value
8600 /// may appear in both the From and To list. The Deleted vector is
8601 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)8602 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
8603 const SDValue *To,
8604 unsigned Num){
8605 // Handle the simple, trivial case efficiently.
8606 if (Num == 1)
8607 return ReplaceAllUsesOfValueWith(*From, *To);
8608
8609 transferDbgValues(*From, *To);
8610
8611 // Read up all the uses and make records of them. This helps
8612 // processing new uses that are introduced during the
8613 // replacement process.
8614 SmallVector<UseMemo, 4> Uses;
8615 for (unsigned i = 0; i != Num; ++i) {
8616 unsigned FromResNo = From[i].getResNo();
8617 SDNode *FromNode = From[i].getNode();
8618 for (SDNode::use_iterator UI = FromNode->use_begin(),
8619 E = FromNode->use_end(); UI != E; ++UI) {
8620 SDUse &Use = UI.getUse();
8621 if (Use.getResNo() == FromResNo) {
8622 UseMemo Memo = { *UI, i, &Use };
8623 Uses.push_back(Memo);
8624 }
8625 }
8626 }
8627
8628 // Sort the uses, so that all the uses from a given User are together.
8629 llvm::sort(Uses);
8630
8631 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
8632 UseIndex != UseIndexEnd; ) {
8633 // We know that this user uses some value of From. If it is the right
8634 // value, update it.
8635 SDNode *User = Uses[UseIndex].User;
8636
8637 // This node is about to morph, remove its old self from the CSE maps.
8638 RemoveNodeFromCSEMaps(User);
8639
8640 // The Uses array is sorted, so all the uses for a given User
8641 // are next to each other in the list.
8642 // To help reduce the number of CSE recomputations, process all
8643 // the uses of this user that we can find this way.
8644 do {
8645 unsigned i = Uses[UseIndex].Index;
8646 SDUse &Use = *Uses[UseIndex].Use;
8647 ++UseIndex;
8648
8649 Use.set(To[i]);
8650 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
8651
8652 // Now that we have modified User, add it back to the CSE maps. If it
8653 // already exists there, recursively merge the results together.
8654 AddModifiedNodeToCSEMaps(User);
8655 }
8656 }
8657
8658 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
8659 /// based on their topological order. It returns the maximum id and a vector
8660 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()8661 unsigned SelectionDAG::AssignTopologicalOrder() {
8662 unsigned DAGSize = 0;
8663
8664 // SortedPos tracks the progress of the algorithm. Nodes before it are
8665 // sorted, nodes after it are unsorted. When the algorithm completes
8666 // it is at the end of the list.
8667 allnodes_iterator SortedPos = allnodes_begin();
8668
8669 // Visit all the nodes. Move nodes with no operands to the front of
8670 // the list immediately. Annotate nodes that do have operands with their
8671 // operand count. Before we do this, the Node Id fields of the nodes
8672 // may contain arbitrary values. After, the Node Id fields for nodes
8673 // before SortedPos will contain the topological sort index, and the
8674 // Node Id fields for nodes At SortedPos and after will contain the
8675 // count of outstanding operands.
8676 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
8677 SDNode *N = &*I++;
8678 checkForCycles(N, this);
8679 unsigned Degree = N->getNumOperands();
8680 if (Degree == 0) {
8681 // A node with no uses, add it to the result array immediately.
8682 N->setNodeId(DAGSize++);
8683 allnodes_iterator Q(N);
8684 if (Q != SortedPos)
8685 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
8686 assert(SortedPos != AllNodes.end() && "Overran node list");
8687 ++SortedPos;
8688 } else {
8689 // Temporarily use the Node Id as scratch space for the degree count.
8690 N->setNodeId(Degree);
8691 }
8692 }
8693
8694 // Visit all the nodes. As we iterate, move nodes into sorted order,
8695 // such that by the time the end is reached all nodes will be sorted.
8696 for (SDNode &Node : allnodes()) {
8697 SDNode *N = &Node;
8698 checkForCycles(N, this);
8699 // N is in sorted position, so all its uses have one less operand
8700 // that needs to be sorted.
8701 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
8702 UI != UE; ++UI) {
8703 SDNode *P = *UI;
8704 unsigned Degree = P->getNodeId();
8705 assert(Degree != 0 && "Invalid node degree");
8706 --Degree;
8707 if (Degree == 0) {
8708 // All of P's operands are sorted, so P may sorted now.
8709 P->setNodeId(DAGSize++);
8710 if (P->getIterator() != SortedPos)
8711 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
8712 assert(SortedPos != AllNodes.end() && "Overran node list");
8713 ++SortedPos;
8714 } else {
8715 // Update P's outstanding operand count.
8716 P->setNodeId(Degree);
8717 }
8718 }
8719 if (Node.getIterator() == SortedPos) {
8720 #ifndef NDEBUG
8721 allnodes_iterator I(N);
8722 SDNode *S = &*++I;
8723 dbgs() << "Overran sorted position:\n";
8724 S->dumprFull(this); dbgs() << "\n";
8725 dbgs() << "Checking if this is due to cycles\n";
8726 checkForCycles(this, true);
8727 #endif
8728 llvm_unreachable(nullptr);
8729 }
8730 }
8731
8732 assert(SortedPos == AllNodes.end() &&
8733 "Topological sort incomplete!");
8734 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
8735 "First node in topological sort is not the entry token!");
8736 assert(AllNodes.front().getNodeId() == 0 &&
8737 "First node in topological sort has non-zero id!");
8738 assert(AllNodes.front().getNumOperands() == 0 &&
8739 "First node in topological sort has operands!");
8740 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
8741 "Last node in topologic sort has unexpected id!");
8742 assert(AllNodes.back().use_empty() &&
8743 "Last node in topologic sort has users!");
8744 assert(DAGSize == allnodes_size() && "Node count mismatch!");
8745 return DAGSize;
8746 }
8747
8748 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
8749 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,SDNode * SD,bool isParameter)8750 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
8751 if (SD) {
8752 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
8753 SD->setHasDebugValue(true);
8754 }
8755 DbgInfo->add(DB, SD, isParameter);
8756 }
8757
AddDbgLabel(SDDbgLabel * DB)8758 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) {
8759 DbgInfo->add(DB);
8760 }
8761
makeEquivalentMemoryOrdering(LoadSDNode * OldLoad,SDValue NewMemOp)8762 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
8763 SDValue NewMemOp) {
8764 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
8765 // The new memory operation must have the same position as the old load in
8766 // terms of memory dependency. Create a TokenFactor for the old load and new
8767 // memory operation and update uses of the old load's output chain to use that
8768 // TokenFactor.
8769 SDValue OldChain = SDValue(OldLoad, 1);
8770 SDValue NewChain = SDValue(NewMemOp.getNode(), 1);
8771 if (OldChain == NewChain || !OldLoad->hasAnyUseOfValue(1))
8772 return NewChain;
8773
8774 SDValue TokenFactor =
8775 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain);
8776 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
8777 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain);
8778 return TokenFactor;
8779 }
8780
getSymbolFunctionGlobalAddress(SDValue Op,Function ** OutFunction)8781 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
8782 Function **OutFunction) {
8783 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
8784
8785 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
8786 auto *Module = MF->getFunction().getParent();
8787 auto *Function = Module->getFunction(Symbol);
8788
8789 if (OutFunction != nullptr)
8790 *OutFunction = Function;
8791
8792 if (Function != nullptr) {
8793 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
8794 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
8795 }
8796
8797 std::string ErrorStr;
8798 raw_string_ostream ErrorFormatter(ErrorStr);
8799
8800 ErrorFormatter << "Undefined external symbol ";
8801 ErrorFormatter << '"' << Symbol << '"';
8802 ErrorFormatter.flush();
8803
8804 report_fatal_error(ErrorStr);
8805 }
8806
8807 //===----------------------------------------------------------------------===//
8808 // SDNode Class
8809 //===----------------------------------------------------------------------===//
8810
isNullConstant(SDValue V)8811 bool llvm::isNullConstant(SDValue V) {
8812 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8813 return Const != nullptr && Const->isNullValue();
8814 }
8815
isNullFPConstant(SDValue V)8816 bool llvm::isNullFPConstant(SDValue V) {
8817 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
8818 return Const != nullptr && Const->isZero() && !Const->isNegative();
8819 }
8820
isAllOnesConstant(SDValue V)8821 bool llvm::isAllOnesConstant(SDValue V) {
8822 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8823 return Const != nullptr && Const->isAllOnesValue();
8824 }
8825
isOneConstant(SDValue V)8826 bool llvm::isOneConstant(SDValue V) {
8827 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8828 return Const != nullptr && Const->isOne();
8829 }
8830
peekThroughBitcasts(SDValue V)8831 SDValue llvm::peekThroughBitcasts(SDValue V) {
8832 while (V.getOpcode() == ISD::BITCAST)
8833 V = V.getOperand(0);
8834 return V;
8835 }
8836
peekThroughOneUseBitcasts(SDValue V)8837 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
8838 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
8839 V = V.getOperand(0);
8840 return V;
8841 }
8842
peekThroughExtractSubvectors(SDValue V)8843 SDValue llvm::peekThroughExtractSubvectors(SDValue V) {
8844 while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
8845 V = V.getOperand(0);
8846 return V;
8847 }
8848
isBitwiseNot(SDValue V,bool AllowUndefs)8849 bool llvm::isBitwiseNot(SDValue V, bool AllowUndefs) {
8850 if (V.getOpcode() != ISD::XOR)
8851 return false;
8852 V = peekThroughBitcasts(V.getOperand(1));
8853 unsigned NumBits = V.getScalarValueSizeInBits();
8854 ConstantSDNode *C =
8855 isConstOrConstSplat(V, AllowUndefs, /*AllowTruncation*/ true);
8856 return C && (C->getAPIntValue().countTrailingOnes() >= NumBits);
8857 }
8858
isConstOrConstSplat(SDValue N,bool AllowUndefs,bool AllowTruncation)8859 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs,
8860 bool AllowTruncation) {
8861 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
8862 return CN;
8863
8864 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8865 BitVector UndefElements;
8866 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
8867
8868 // BuildVectors can truncate their operands. Ignore that case here unless
8869 // AllowTruncation is set.
8870 if (CN && (UndefElements.none() || AllowUndefs)) {
8871 EVT CVT = CN->getValueType(0);
8872 EVT NSVT = N.getValueType().getScalarType();
8873 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
8874 if (AllowTruncation || (CVT == NSVT))
8875 return CN;
8876 }
8877 }
8878
8879 return nullptr;
8880 }
8881
isConstOrConstSplat(SDValue N,const APInt & DemandedElts,bool AllowUndefs,bool AllowTruncation)8882 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
8883 bool AllowUndefs,
8884 bool AllowTruncation) {
8885 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
8886 return CN;
8887
8888 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8889 BitVector UndefElements;
8890 ConstantSDNode *CN = BV->getConstantSplatNode(DemandedElts, &UndefElements);
8891
8892 // BuildVectors can truncate their operands. Ignore that case here unless
8893 // AllowTruncation is set.
8894 if (CN && (UndefElements.none() || AllowUndefs)) {
8895 EVT CVT = CN->getValueType(0);
8896 EVT NSVT = N.getValueType().getScalarType();
8897 assert(CVT.bitsGE(NSVT) && "Illegal build vector element extension");
8898 if (AllowTruncation || (CVT == NSVT))
8899 return CN;
8900 }
8901 }
8902
8903 return nullptr;
8904 }
8905
isConstOrConstSplatFP(SDValue N,bool AllowUndefs)8906 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
8907 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
8908 return CN;
8909
8910 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8911 BitVector UndefElements;
8912 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
8913 if (CN && (UndefElements.none() || AllowUndefs))
8914 return CN;
8915 }
8916
8917 return nullptr;
8918 }
8919
isConstOrConstSplatFP(SDValue N,const APInt & DemandedElts,bool AllowUndefs)8920 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
8921 const APInt &DemandedElts,
8922 bool AllowUndefs) {
8923 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
8924 return CN;
8925
8926 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8927 BitVector UndefElements;
8928 ConstantFPSDNode *CN =
8929 BV->getConstantFPSplatNode(DemandedElts, &UndefElements);
8930 if (CN && (UndefElements.none() || AllowUndefs))
8931 return CN;
8932 }
8933
8934 return nullptr;
8935 }
8936
isNullOrNullSplat(SDValue N,bool AllowUndefs)8937 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
8938 // TODO: may want to use peekThroughBitcast() here.
8939 ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
8940 return C && C->isNullValue();
8941 }
8942
isOneOrOneSplat(SDValue N)8943 bool llvm::isOneOrOneSplat(SDValue N) {
8944 // TODO: may want to use peekThroughBitcast() here.
8945 unsigned BitWidth = N.getScalarValueSizeInBits();
8946 ConstantSDNode *C = isConstOrConstSplat(N);
8947 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth;
8948 }
8949
isAllOnesOrAllOnesSplat(SDValue N)8950 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) {
8951 N = peekThroughBitcasts(N);
8952 unsigned BitWidth = N.getScalarValueSizeInBits();
8953 ConstantSDNode *C = isConstOrConstSplat(N);
8954 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth;
8955 }
8956
~HandleSDNode()8957 HandleSDNode::~HandleSDNode() {
8958 DropOperands();
8959 }
8960
GlobalAddressSDNode(unsigned Opc,unsigned Order,const DebugLoc & DL,const GlobalValue * GA,EVT VT,int64_t o,unsigned TF)8961 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
8962 const DebugLoc &DL,
8963 const GlobalValue *GA, EVT VT,
8964 int64_t o, unsigned TF)
8965 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
8966 TheGlobal = GA;
8967 }
8968
AddrSpaceCastSDNode(unsigned Order,const DebugLoc & dl,EVT VT,unsigned SrcAS,unsigned DestAS)8969 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
8970 EVT VT, unsigned SrcAS,
8971 unsigned DestAS)
8972 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
8973 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
8974
MemSDNode(unsigned Opc,unsigned Order,const DebugLoc & dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)8975 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
8976 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
8977 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
8978 MemSDNodeBits.IsVolatile = MMO->isVolatile();
8979 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
8980 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
8981 MemSDNodeBits.IsInvariant = MMO->isInvariant();
8982
8983 // We check here that the size of the memory operand fits within the size of
8984 // the MMO. This is because the MMO might indicate only a possible address
8985 // range instead of specifying the affected memory addresses precisely.
8986 // TODO: Make MachineMemOperands aware of scalable vectors.
8987 assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() &&
8988 "Size mismatch!");
8989 }
8990
8991 /// Profile - Gather unique data for the node.
8992 ///
Profile(FoldingSetNodeID & ID) const8993 void SDNode::Profile(FoldingSetNodeID &ID) const {
8994 AddNodeIDNode(ID, this);
8995 }
8996
8997 namespace {
8998
8999 struct EVTArray {
9000 std::vector<EVT> VTs;
9001
EVTArray__anon2e2a20f01011::EVTArray9002 EVTArray() {
9003 VTs.reserve(MVT::LAST_VALUETYPE);
9004 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
9005 VTs.push_back(MVT((MVT::SimpleValueType)i));
9006 }
9007 };
9008
9009 } // end anonymous namespace
9010
9011 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
9012 static ManagedStatic<EVTArray> SimpleVTArray;
9013 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
9014
9015 /// getValueTypeList - Return a pointer to the specified value type.
9016 ///
getValueTypeList(EVT VT)9017 const EVT *SDNode::getValueTypeList(EVT VT) {
9018 if (VT.isExtended()) {
9019 sys::SmartScopedLock<true> Lock(*VTMutex);
9020 return &(*EVTs->insert(VT).first);
9021 } else {
9022 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
9023 "Value type out of range!");
9024 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
9025 }
9026 }
9027
9028 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
9029 /// indicated value. This method ignores uses of other values defined by this
9030 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const9031 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
9032 assert(Value < getNumValues() && "Bad value!");
9033
9034 // TODO: Only iterate over uses of a given value of the node
9035 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
9036 if (UI.getUse().getResNo() == Value) {
9037 if (NUses == 0)
9038 return false;
9039 --NUses;
9040 }
9041 }
9042
9043 // Found exactly the right number of uses?
9044 return NUses == 0;
9045 }
9046
9047 /// hasAnyUseOfValue - Return true if there are any use of the indicated
9048 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const9049 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
9050 assert(Value < getNumValues() && "Bad value!");
9051
9052 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
9053 if (UI.getUse().getResNo() == Value)
9054 return true;
9055
9056 return false;
9057 }
9058
9059 /// isOnlyUserOf - Return true if this node is the only use of N.
isOnlyUserOf(const SDNode * N) const9060 bool SDNode::isOnlyUserOf(const SDNode *N) const {
9061 bool Seen = false;
9062 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9063 SDNode *User = *I;
9064 if (User == this)
9065 Seen = true;
9066 else
9067 return false;
9068 }
9069
9070 return Seen;
9071 }
9072
9073 /// Return true if the only users of N are contained in Nodes.
areOnlyUsersOf(ArrayRef<const SDNode * > Nodes,const SDNode * N)9074 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
9075 bool Seen = false;
9076 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
9077 SDNode *User = *I;
9078 if (llvm::any_of(Nodes,
9079 [&User](const SDNode *Node) { return User == Node; }))
9080 Seen = true;
9081 else
9082 return false;
9083 }
9084
9085 return Seen;
9086 }
9087
9088 /// isOperand - Return true if this node is an operand of N.
isOperandOf(const SDNode * N) const9089 bool SDValue::isOperandOf(const SDNode *N) const {
9090 return any_of(N->op_values(), [this](SDValue Op) { return *this == Op; });
9091 }
9092
isOperandOf(const SDNode * N) const9093 bool SDNode::isOperandOf(const SDNode *N) const {
9094 return any_of(N->op_values(),
9095 [this](SDValue Op) { return this == Op.getNode(); });
9096 }
9097
9098 /// reachesChainWithoutSideEffects - Return true if this operand (which must
9099 /// be a chain) reaches the specified operand without crossing any
9100 /// side-effecting instructions on any chain path. In practice, this looks
9101 /// through token factors and non-volatile loads. In order to remain efficient,
9102 /// this only looks a couple of nodes in, it does not do an exhaustive search.
9103 ///
9104 /// Note that we only need to examine chains when we're searching for
9105 /// side-effects; SelectionDAG requires that all side-effects are represented
9106 /// by chains, even if another operand would force a specific ordering. This
9107 /// constraint is necessary to allow transformations like splitting loads.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const9108 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
9109 unsigned Depth) const {
9110 if (*this == Dest) return true;
9111
9112 // Don't search too deeply, we just want to be able to see through
9113 // TokenFactor's etc.
9114 if (Depth == 0) return false;
9115
9116 // If this is a token factor, all inputs to the TF happen in parallel.
9117 if (getOpcode() == ISD::TokenFactor) {
9118 // First, try a shallow search.
9119 if (is_contained((*this)->ops(), Dest)) {
9120 // We found the chain we want as an operand of this TokenFactor.
9121 // Essentially, we reach the chain without side-effects if we could
9122 // serialize the TokenFactor into a simple chain of operations with
9123 // Dest as the last operation. This is automatically true if the
9124 // chain has one use: there are no other ordering constraints.
9125 // If the chain has more than one use, we give up: some other
9126 // use of Dest might force a side-effect between Dest and the current
9127 // node.
9128 if (Dest.hasOneUse())
9129 return true;
9130 }
9131 // Next, try a deep search: check whether every operand of the TokenFactor
9132 // reaches Dest.
9133 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
9134 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
9135 });
9136 }
9137
9138 // Loads don't have side effects, look through them.
9139 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
9140 if (Ld->isUnordered())
9141 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
9142 }
9143 return false;
9144 }
9145
hasPredecessor(const SDNode * N) const9146 bool SDNode::hasPredecessor(const SDNode *N) const {
9147 SmallPtrSet<const SDNode *, 32> Visited;
9148 SmallVector<const SDNode *, 16> Worklist;
9149 Worklist.push_back(this);
9150 return hasPredecessorHelper(N, Visited, Worklist);
9151 }
9152
intersectFlagsWith(const SDNodeFlags Flags)9153 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
9154 this->Flags.intersectWith(Flags);
9155 }
9156
9157 SDValue
matchBinOpReduction(SDNode * Extract,ISD::NodeType & BinOp,ArrayRef<ISD::NodeType> CandidateBinOps,bool AllowPartials)9158 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
9159 ArrayRef<ISD::NodeType> CandidateBinOps,
9160 bool AllowPartials) {
9161 // The pattern must end in an extract from index 0.
9162 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9163 !isNullConstant(Extract->getOperand(1)))
9164 return SDValue();
9165
9166 // Match against one of the candidate binary ops.
9167 SDValue Op = Extract->getOperand(0);
9168 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
9169 return Op.getOpcode() == unsigned(BinOp);
9170 }))
9171 return SDValue();
9172
9173 // Floating-point reductions may require relaxed constraints on the final step
9174 // of the reduction because they may reorder intermediate operations.
9175 unsigned CandidateBinOp = Op.getOpcode();
9176 if (Op.getValueType().isFloatingPoint()) {
9177 SDNodeFlags Flags = Op->getFlags();
9178 switch (CandidateBinOp) {
9179 case ISD::FADD:
9180 if (!Flags.hasNoSignedZeros() || !Flags.hasAllowReassociation())
9181 return SDValue();
9182 break;
9183 default:
9184 llvm_unreachable("Unhandled FP opcode for binop reduction");
9185 }
9186 }
9187
9188 // Matching failed - attempt to see if we did enough stages that a partial
9189 // reduction from a subvector is possible.
9190 auto PartialReduction = [&](SDValue Op, unsigned NumSubElts) {
9191 if (!AllowPartials || !Op)
9192 return SDValue();
9193 EVT OpVT = Op.getValueType();
9194 EVT OpSVT = OpVT.getScalarType();
9195 EVT SubVT = EVT::getVectorVT(*getContext(), OpSVT, NumSubElts);
9196 if (!TLI->isExtractSubvectorCheap(SubVT, OpVT, 0))
9197 return SDValue();
9198 BinOp = (ISD::NodeType)CandidateBinOp;
9199 return getNode(
9200 ISD::EXTRACT_SUBVECTOR, SDLoc(Op), SubVT, Op,
9201 getConstant(0, SDLoc(Op), TLI->getVectorIdxTy(getDataLayout())));
9202 };
9203
9204 // At each stage, we're looking for something that looks like:
9205 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
9206 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
9207 // i32 undef, i32 undef, i32 undef, i32 undef>
9208 // %a = binop <8 x i32> %op, %s
9209 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
9210 // we expect something like:
9211 // <4,5,6,7,u,u,u,u>
9212 // <2,3,u,u,u,u,u,u>
9213 // <1,u,u,u,u,u,u,u>
9214 // While a partial reduction match would be:
9215 // <2,3,u,u,u,u,u,u>
9216 // <1,u,u,u,u,u,u,u>
9217 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
9218 SDValue PrevOp;
9219 for (unsigned i = 0; i < Stages; ++i) {
9220 unsigned MaskEnd = (1 << i);
9221
9222 if (Op.getOpcode() != CandidateBinOp)
9223 return PartialReduction(PrevOp, MaskEnd);
9224
9225 SDValue Op0 = Op.getOperand(0);
9226 SDValue Op1 = Op.getOperand(1);
9227
9228 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
9229 if (Shuffle) {
9230 Op = Op1;
9231 } else {
9232 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
9233 Op = Op0;
9234 }
9235
9236 // The first operand of the shuffle should be the same as the other operand
9237 // of the binop.
9238 if (!Shuffle || Shuffle->getOperand(0) != Op)
9239 return PartialReduction(PrevOp, MaskEnd);
9240
9241 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
9242 for (int Index = 0; Index < (int)MaskEnd; ++Index)
9243 if (Shuffle->getMaskElt(Index) != (int)(MaskEnd + Index))
9244 return PartialReduction(PrevOp, MaskEnd);
9245
9246 PrevOp = Op;
9247 }
9248
9249 BinOp = (ISD::NodeType)CandidateBinOp;
9250 return Op;
9251 }
9252
UnrollVectorOp(SDNode * N,unsigned ResNE)9253 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
9254 assert(N->getNumValues() == 1 &&
9255 "Can't unroll a vector with multiple results!");
9256
9257 EVT VT = N->getValueType(0);
9258 unsigned NE = VT.getVectorNumElements();
9259 EVT EltVT = VT.getVectorElementType();
9260 SDLoc dl(N);
9261
9262 SmallVector<SDValue, 8> Scalars;
9263 SmallVector<SDValue, 4> Operands(N->getNumOperands());
9264
9265 // If ResNE is 0, fully unroll the vector op.
9266 if (ResNE == 0)
9267 ResNE = NE;
9268 else if (NE > ResNE)
9269 NE = ResNE;
9270
9271 unsigned i;
9272 for (i= 0; i != NE; ++i) {
9273 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
9274 SDValue Operand = N->getOperand(j);
9275 EVT OperandVT = Operand.getValueType();
9276 if (OperandVT.isVector()) {
9277 // A vector operand; extract a single element.
9278 EVT OperandEltVT = OperandVT.getVectorElementType();
9279 Operands[j] =
9280 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
9281 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
9282 } else {
9283 // A scalar operand; just use it as is.
9284 Operands[j] = Operand;
9285 }
9286 }
9287
9288 switch (N->getOpcode()) {
9289 default: {
9290 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
9291 N->getFlags()));
9292 break;
9293 }
9294 case ISD::VSELECT:
9295 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
9296 break;
9297 case ISD::SHL:
9298 case ISD::SRA:
9299 case ISD::SRL:
9300 case ISD::ROTL:
9301 case ISD::ROTR:
9302 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
9303 getShiftAmountOperand(Operands[0].getValueType(),
9304 Operands[1])));
9305 break;
9306 case ISD::SIGN_EXTEND_INREG: {
9307 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
9308 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
9309 Operands[0],
9310 getValueType(ExtVT)));
9311 }
9312 }
9313 }
9314
9315 for (; i < ResNE; ++i)
9316 Scalars.push_back(getUNDEF(EltVT));
9317
9318 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
9319 return getBuildVector(VecVT, dl, Scalars);
9320 }
9321
UnrollVectorOverflowOp(SDNode * N,unsigned ResNE)9322 std::pair<SDValue, SDValue> SelectionDAG::UnrollVectorOverflowOp(
9323 SDNode *N, unsigned ResNE) {
9324 unsigned Opcode = N->getOpcode();
9325 assert((Opcode == ISD::UADDO || Opcode == ISD::SADDO ||
9326 Opcode == ISD::USUBO || Opcode == ISD::SSUBO ||
9327 Opcode == ISD::UMULO || Opcode == ISD::SMULO) &&
9328 "Expected an overflow opcode");
9329
9330 EVT ResVT = N->getValueType(0);
9331 EVT OvVT = N->getValueType(1);
9332 EVT ResEltVT = ResVT.getVectorElementType();
9333 EVT OvEltVT = OvVT.getVectorElementType();
9334 SDLoc dl(N);
9335
9336 // If ResNE is 0, fully unroll the vector op.
9337 unsigned NE = ResVT.getVectorNumElements();
9338 if (ResNE == 0)
9339 ResNE = NE;
9340 else if (NE > ResNE)
9341 NE = ResNE;
9342
9343 SmallVector<SDValue, 8> LHSScalars;
9344 SmallVector<SDValue, 8> RHSScalars;
9345 ExtractVectorElements(N->getOperand(0), LHSScalars, 0, NE);
9346 ExtractVectorElements(N->getOperand(1), RHSScalars, 0, NE);
9347
9348 EVT SVT = TLI->getSetCCResultType(getDataLayout(), *getContext(), ResEltVT);
9349 SDVTList VTs = getVTList(ResEltVT, SVT);
9350 SmallVector<SDValue, 8> ResScalars;
9351 SmallVector<SDValue, 8> OvScalars;
9352 for (unsigned i = 0; i < NE; ++i) {
9353 SDValue Res = getNode(Opcode, dl, VTs, LHSScalars[i], RHSScalars[i]);
9354 SDValue Ov =
9355 getSelect(dl, OvEltVT, Res.getValue(1),
9356 getBoolConstant(true, dl, OvEltVT, ResVT),
9357 getConstant(0, dl, OvEltVT));
9358
9359 ResScalars.push_back(Res);
9360 OvScalars.push_back(Ov);
9361 }
9362
9363 ResScalars.append(ResNE - NE, getUNDEF(ResEltVT));
9364 OvScalars.append(ResNE - NE, getUNDEF(OvEltVT));
9365
9366 EVT NewResVT = EVT::getVectorVT(*getContext(), ResEltVT, ResNE);
9367 EVT NewOvVT = EVT::getVectorVT(*getContext(), OvEltVT, ResNE);
9368 return std::make_pair(getBuildVector(NewResVT, dl, ResScalars),
9369 getBuildVector(NewOvVT, dl, OvScalars));
9370 }
9371
areNonVolatileConsecutiveLoads(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const9372 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
9373 LoadSDNode *Base,
9374 unsigned Bytes,
9375 int Dist) const {
9376 if (LD->isVolatile() || Base->isVolatile())
9377 return false;
9378 // TODO: probably too restrictive for atomics, revisit
9379 if (!LD->isSimple())
9380 return false;
9381 if (LD->isIndexed() || Base->isIndexed())
9382 return false;
9383 if (LD->getChain() != Base->getChain())
9384 return false;
9385 EVT VT = LD->getValueType(0);
9386 if (VT.getSizeInBits() / 8 != Bytes)
9387 return false;
9388
9389 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
9390 auto LocDecomp = BaseIndexOffset::match(LD, *this);
9391
9392 int64_t Offset = 0;
9393 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
9394 return (Dist * Bytes == Offset);
9395 return false;
9396 }
9397
9398 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
9399 /// it cannot be inferred.
InferPtrAlignment(SDValue Ptr) const9400 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
9401 // If this is a GlobalAddress + cst, return the alignment.
9402 const GlobalValue *GV = nullptr;
9403 int64_t GVOffset = 0;
9404 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
9405 unsigned PtrWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
9406 KnownBits Known(PtrWidth);
9407 llvm::computeKnownBits(GV, Known, getDataLayout());
9408 unsigned AlignBits = Known.countMinTrailingZeros();
9409 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
9410 if (Align)
9411 return MinAlign(Align, GVOffset);
9412 }
9413
9414 // If this is a direct reference to a stack slot, use information about the
9415 // stack slot's alignment.
9416 int FrameIdx = INT_MIN;
9417 int64_t FrameOffset = 0;
9418 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
9419 FrameIdx = FI->getIndex();
9420 } else if (isBaseWithConstantOffset(Ptr) &&
9421 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9422 // Handle FI+Cst
9423 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9424 FrameOffset = Ptr.getConstantOperandVal(1);
9425 }
9426
9427 if (FrameIdx != INT_MIN) {
9428 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
9429 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
9430 FrameOffset);
9431 return FIInfoAlign;
9432 }
9433
9434 return 0;
9435 }
9436
9437 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
9438 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const9439 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
9440 // Currently all types are split in half.
9441 EVT LoVT, HiVT;
9442 if (!VT.isVector())
9443 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
9444 else
9445 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
9446
9447 return std::make_pair(LoVT, HiVT);
9448 }
9449
9450 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
9451 /// low/high part.
9452 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)9453 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
9454 const EVT &HiVT) {
9455 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
9456 N.getValueType().getVectorNumElements() &&
9457 "More vector elements requested than available!");
9458 SDValue Lo, Hi;
9459 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
9460 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
9461 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
9462 getConstant(LoVT.getVectorNumElements(), DL,
9463 TLI->getVectorIdxTy(getDataLayout())));
9464 return std::make_pair(Lo, Hi);
9465 }
9466
9467 /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
WidenVector(const SDValue & N,const SDLoc & DL)9468 SDValue SelectionDAG::WidenVector(const SDValue &N, const SDLoc &DL) {
9469 EVT VT = N.getValueType();
9470 EVT WideVT = EVT::getVectorVT(*getContext(), VT.getVectorElementType(),
9471 NextPowerOf2(VT.getVectorNumElements()));
9472 return getNode(ISD::INSERT_SUBVECTOR, DL, WideVT, getUNDEF(WideVT), N,
9473 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
9474 }
9475
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count)9476 void SelectionDAG::ExtractVectorElements(SDValue Op,
9477 SmallVectorImpl<SDValue> &Args,
9478 unsigned Start, unsigned Count) {
9479 EVT VT = Op.getValueType();
9480 if (Count == 0)
9481 Count = VT.getVectorNumElements();
9482
9483 EVT EltVT = VT.getVectorElementType();
9484 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
9485 SDLoc SL(Op);
9486 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
9487 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9488 Op, getConstant(i, SL, IdxTy)));
9489 }
9490 }
9491
9492 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const9493 unsigned GlobalAddressSDNode::getAddressSpace() const {
9494 return getGlobal()->getType()->getAddressSpace();
9495 }
9496
getType() const9497 Type *ConstantPoolSDNode::getType() const {
9498 if (isMachineConstantPoolEntry())
9499 return Val.MachineCPVal->getType();
9500 return Val.ConstVal->getType();
9501 }
9502
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool IsBigEndian) const9503 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
9504 unsigned &SplatBitSize,
9505 bool &HasAnyUndefs,
9506 unsigned MinSplatBits,
9507 bool IsBigEndian) const {
9508 EVT VT = getValueType(0);
9509 assert(VT.isVector() && "Expected a vector type");
9510 unsigned VecWidth = VT.getSizeInBits();
9511 if (MinSplatBits > VecWidth)
9512 return false;
9513
9514 // FIXME: The widths are based on this node's type, but build vectors can
9515 // truncate their operands.
9516 SplatValue = APInt(VecWidth, 0);
9517 SplatUndef = APInt(VecWidth, 0);
9518
9519 // Get the bits. Bits with undefined values (when the corresponding element
9520 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
9521 // in SplatValue. If any of the values are not constant, give up and return
9522 // false.
9523 unsigned int NumOps = getNumOperands();
9524 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
9525 unsigned EltWidth = VT.getScalarSizeInBits();
9526
9527 for (unsigned j = 0; j < NumOps; ++j) {
9528 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
9529 SDValue OpVal = getOperand(i);
9530 unsigned BitPos = j * EltWidth;
9531
9532 if (OpVal.isUndef())
9533 SplatUndef.setBits(BitPos, BitPos + EltWidth);
9534 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
9535 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
9536 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
9537 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
9538 else
9539 return false;
9540 }
9541
9542 // The build_vector is all constants or undefs. Find the smallest element
9543 // size that splats the vector.
9544 HasAnyUndefs = (SplatUndef != 0);
9545
9546 // FIXME: This does not work for vectors with elements less than 8 bits.
9547 while (VecWidth > 8) {
9548 unsigned HalfSize = VecWidth / 2;
9549 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
9550 APInt LowValue = SplatValue.trunc(HalfSize);
9551 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
9552 APInt LowUndef = SplatUndef.trunc(HalfSize);
9553
9554 // If the two halves do not match (ignoring undef bits), stop here.
9555 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
9556 MinSplatBits > HalfSize)
9557 break;
9558
9559 SplatValue = HighValue | LowValue;
9560 SplatUndef = HighUndef & LowUndef;
9561
9562 VecWidth = HalfSize;
9563 }
9564
9565 SplatBitSize = VecWidth;
9566 return true;
9567 }
9568
getSplatValue(const APInt & DemandedElts,BitVector * UndefElements) const9569 SDValue BuildVectorSDNode::getSplatValue(const APInt &DemandedElts,
9570 BitVector *UndefElements) const {
9571 if (UndefElements) {
9572 UndefElements->clear();
9573 UndefElements->resize(getNumOperands());
9574 }
9575 assert(getNumOperands() == DemandedElts.getBitWidth() &&
9576 "Unexpected vector size");
9577 if (!DemandedElts)
9578 return SDValue();
9579 SDValue Splatted;
9580 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
9581 if (!DemandedElts[i])
9582 continue;
9583 SDValue Op = getOperand(i);
9584 if (Op.isUndef()) {
9585 if (UndefElements)
9586 (*UndefElements)[i] = true;
9587 } else if (!Splatted) {
9588 Splatted = Op;
9589 } else if (Splatted != Op) {
9590 return SDValue();
9591 }
9592 }
9593
9594 if (!Splatted) {
9595 unsigned FirstDemandedIdx = DemandedElts.countTrailingZeros();
9596 assert(getOperand(FirstDemandedIdx).isUndef() &&
9597 "Can only have a splat without a constant for all undefs.");
9598 return getOperand(FirstDemandedIdx);
9599 }
9600
9601 return Splatted;
9602 }
9603
getSplatValue(BitVector * UndefElements) const9604 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
9605 APInt DemandedElts = APInt::getAllOnesValue(getNumOperands());
9606 return getSplatValue(DemandedElts, UndefElements);
9607 }
9608
9609 ConstantSDNode *
getConstantSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const9610 BuildVectorSDNode::getConstantSplatNode(const APInt &DemandedElts,
9611 BitVector *UndefElements) const {
9612 return dyn_cast_or_null<ConstantSDNode>(
9613 getSplatValue(DemandedElts, UndefElements));
9614 }
9615
9616 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const9617 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
9618 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
9619 }
9620
9621 ConstantFPSDNode *
getConstantFPSplatNode(const APInt & DemandedElts,BitVector * UndefElements) const9622 BuildVectorSDNode::getConstantFPSplatNode(const APInt &DemandedElts,
9623 BitVector *UndefElements) const {
9624 return dyn_cast_or_null<ConstantFPSDNode>(
9625 getSplatValue(DemandedElts, UndefElements));
9626 }
9627
9628 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const9629 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
9630 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
9631 }
9632
9633 int32_t
getConstantFPSplatPow2ToLog2Int(BitVector * UndefElements,uint32_t BitWidth) const9634 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
9635 uint32_t BitWidth) const {
9636 if (ConstantFPSDNode *CN =
9637 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
9638 bool IsExact;
9639 APSInt IntVal(BitWidth);
9640 const APFloat &APF = CN->getValueAPF();
9641 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
9642 APFloat::opOK ||
9643 !IsExact)
9644 return -1;
9645
9646 return IntVal.exactLogBase2();
9647 }
9648 return -1;
9649 }
9650
isConstant() const9651 bool BuildVectorSDNode::isConstant() const {
9652 for (const SDValue &Op : op_values()) {
9653 unsigned Opc = Op.getOpcode();
9654 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
9655 return false;
9656 }
9657 return true;
9658 }
9659
isSplatMask(const int * Mask,EVT VT)9660 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
9661 // Find the first non-undef value in the shuffle mask.
9662 unsigned i, e;
9663 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
9664 /* search */;
9665
9666 // If all elements are undefined, this shuffle can be considered a splat
9667 // (although it should eventually get simplified away completely).
9668 if (i == e)
9669 return true;
9670
9671 // Make sure all remaining elements are either undef or the same as the first
9672 // non-undef value.
9673 for (int Idx = Mask[i]; i != e; ++i)
9674 if (Mask[i] >= 0 && Mask[i] != Idx)
9675 return false;
9676 return true;
9677 }
9678
9679 // Returns the SDNode if it is a constant integer BuildVector
9680 // or constant integer.
isConstantIntBuildVectorOrConstantInt(SDValue N)9681 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) {
9682 if (isa<ConstantSDNode>(N))
9683 return N.getNode();
9684 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
9685 return N.getNode();
9686 // Treat a GlobalAddress supporting constant offset folding as a
9687 // constant integer.
9688 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
9689 if (GA->getOpcode() == ISD::GlobalAddress &&
9690 TLI->isOffsetFoldingLegal(GA))
9691 return GA;
9692 return nullptr;
9693 }
9694
isConstantFPBuildVectorOrConstantFP(SDValue N)9695 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) {
9696 if (isa<ConstantFPSDNode>(N))
9697 return N.getNode();
9698
9699 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
9700 return N.getNode();
9701
9702 return nullptr;
9703 }
9704
createOperands(SDNode * Node,ArrayRef<SDValue> Vals)9705 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
9706 assert(!Node->OperandList && "Node already has operands");
9707 assert(SDNode::getMaxNumOperands() >= Vals.size() &&
9708 "too many operands to fit into SDNode");
9709 SDUse *Ops = OperandRecycler.allocate(
9710 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
9711
9712 bool IsDivergent = false;
9713 for (unsigned I = 0; I != Vals.size(); ++I) {
9714 Ops[I].setUser(Node);
9715 Ops[I].setInitial(Vals[I]);
9716 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
9717 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent();
9718 }
9719 Node->NumOperands = Vals.size();
9720 Node->OperandList = Ops;
9721 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
9722 if (!TLI->isSDNodeAlwaysUniform(Node))
9723 Node->SDNodeBits.IsDivergent = IsDivergent;
9724 checkForCycles(Node);
9725 }
9726
getTokenFactor(const SDLoc & DL,SmallVectorImpl<SDValue> & Vals)9727 SDValue SelectionDAG::getTokenFactor(const SDLoc &DL,
9728 SmallVectorImpl<SDValue> &Vals) {
9729 size_t Limit = SDNode::getMaxNumOperands();
9730 while (Vals.size() > Limit) {
9731 unsigned SliceIdx = Vals.size() - Limit;
9732 auto ExtractedTFs = ArrayRef<SDValue>(Vals).slice(SliceIdx, Limit);
9733 SDValue NewTF = getNode(ISD::TokenFactor, DL, MVT::Other, ExtractedTFs);
9734 Vals.erase(Vals.begin() + SliceIdx, Vals.end());
9735 Vals.emplace_back(NewTF);
9736 }
9737 return getNode(ISD::TokenFactor, DL, MVT::Other, Vals);
9738 }
9739
9740 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallPtrSetImpl<const SDNode * > & Checked,const llvm::SelectionDAG * DAG)9741 static void checkForCyclesHelper(const SDNode *N,
9742 SmallPtrSetImpl<const SDNode*> &Visited,
9743 SmallPtrSetImpl<const SDNode*> &Checked,
9744 const llvm::SelectionDAG *DAG) {
9745 // If this node has already been checked, don't check it again.
9746 if (Checked.count(N))
9747 return;
9748
9749 // If a node has already been visited on this depth-first walk, reject it as
9750 // a cycle.
9751 if (!Visited.insert(N).second) {
9752 errs() << "Detected cycle in SelectionDAG\n";
9753 dbgs() << "Offending node:\n";
9754 N->dumprFull(DAG); dbgs() << "\n";
9755 abort();
9756 }
9757
9758 for (const SDValue &Op : N->op_values())
9759 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
9760
9761 Checked.insert(N);
9762 Visited.erase(N);
9763 }
9764 #endif
9765
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)9766 void llvm::checkForCycles(const llvm::SDNode *N,
9767 const llvm::SelectionDAG *DAG,
9768 bool force) {
9769 #ifndef NDEBUG
9770 bool check = force;
9771 #ifdef EXPENSIVE_CHECKS
9772 check = true;
9773 #endif // EXPENSIVE_CHECKS
9774 if (check) {
9775 assert(N && "Checking nonexistent SDNode");
9776 SmallPtrSet<const SDNode*, 32> visited;
9777 SmallPtrSet<const SDNode*, 32> checked;
9778 checkForCyclesHelper(N, visited, checked, DAG);
9779 }
9780 #endif // !NDEBUG
9781 }
9782
checkForCycles(const llvm::SelectionDAG * DAG,bool force)9783 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
9784 checkForCycles(DAG->getRoot().getNode(), DAG, force);
9785 }
9786