1 //===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the SelectionDAG class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/APSInt.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/FoldingSet.h"
22 #include "llvm/ADT/None.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/CodeGen/ISDOpcodes.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineConstantPool.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineMemOperand.h"
35 #include "llvm/CodeGen/RuntimeLibcalls.h"
36 #include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
37 #include "llvm/CodeGen/SelectionDAGNodes.h"
38 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
39 #include "llvm/CodeGen/TargetLowering.h"
40 #include "llvm/CodeGen/TargetRegisterInfo.h"
41 #include "llvm/CodeGen/TargetSubtargetInfo.h"
42 #include "llvm/CodeGen/ValueTypes.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DebugInfoMetadata.h"
47 #include "llvm/IR/DebugLoc.h"
48 #include "llvm/IR/DerivedTypes.h"
49 #include "llvm/IR/Function.h"
50 #include "llvm/IR/GlobalValue.h"
51 #include "llvm/IR/Metadata.h"
52 #include "llvm/IR/Type.h"
53 #include "llvm/IR/Value.h"
54 #include "llvm/Support/Casting.h"
55 #include "llvm/Support/CodeGen.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/KnownBits.h"
60 #include "llvm/Support/MachineValueType.h"
61 #include "llvm/Support/ManagedStatic.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/Mutex.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "llvm/Target/TargetMachine.h"
66 #include "llvm/Target/TargetOptions.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstdint>
70 #include <cstdlib>
71 #include <limits>
72 #include <set>
73 #include <string>
74 #include <utility>
75 #include <vector>
76
77 using namespace llvm;
78
79 /// makeVTList - Return an instance of the SDVTList struct initialized with the
80 /// specified members.
makeVTList(const EVT * VTs,unsigned NumVTs)81 static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
82 SDVTList Res = {VTs, NumVTs};
83 return Res;
84 }
85
86 // Default null implementations of the callbacks.
NodeDeleted(SDNode *,SDNode *)87 void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
NodeUpdated(SDNode *)88 void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
89
anchor()90 void SelectionDAG::DAGNodeDeletedListener::anchor() {}
91
92 #define DEBUG_TYPE "selectiondag"
93
94 static cl::opt<bool> EnableMemCpyDAGOpt("enable-memcpy-dag-opt",
95 cl::Hidden, cl::init(true),
96 cl::desc("Gang up loads and stores generated by inlining of memcpy"));
97
98 static cl::opt<int> MaxLdStGlue("ldstmemcpy-glue-max",
99 cl::desc("Number limit for gluing ld/st of memcpy."),
100 cl::Hidden, cl::init(0));
101
NewSDValueDbgMsg(SDValue V,StringRef Msg,SelectionDAG * G)102 static void NewSDValueDbgMsg(SDValue V, StringRef Msg, SelectionDAG *G) {
103 LLVM_DEBUG(dbgs() << Msg; V.getNode()->dump(G););
104 }
105
106 //===----------------------------------------------------------------------===//
107 // ConstantFPSDNode Class
108 //===----------------------------------------------------------------------===//
109
110 /// isExactlyValue - We don't rely on operator== working on double values, as
111 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
112 /// As such, this method can be used to do an exact bit-for-bit comparison of
113 /// two floating point values.
isExactlyValue(const APFloat & V) const114 bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
115 return getValueAPF().bitwiseIsEqual(V);
116 }
117
isValueValidForType(EVT VT,const APFloat & Val)118 bool ConstantFPSDNode::isValueValidForType(EVT VT,
119 const APFloat& Val) {
120 assert(VT.isFloatingPoint() && "Can only convert between FP types");
121
122 // convert modifies in place, so make a copy.
123 APFloat Val2 = APFloat(Val);
124 bool losesInfo;
125 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
126 APFloat::rmNearestTiesToEven,
127 &losesInfo);
128 return !losesInfo;
129 }
130
131 //===----------------------------------------------------------------------===//
132 // ISD Namespace
133 //===----------------------------------------------------------------------===//
134
isConstantSplatVector(const SDNode * N,APInt & SplatVal)135 bool ISD::isConstantSplatVector(const SDNode *N, APInt &SplatVal) {
136 auto *BV = dyn_cast<BuildVectorSDNode>(N);
137 if (!BV)
138 return false;
139
140 APInt SplatUndef;
141 unsigned SplatBitSize;
142 bool HasUndefs;
143 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
144 return BV->isConstantSplat(SplatVal, SplatUndef, SplatBitSize, HasUndefs,
145 EltSize) &&
146 EltSize == SplatBitSize;
147 }
148
149 // FIXME: AllOnes and AllZeros duplicate a lot of code. Could these be
150 // specializations of the more general isConstantSplatVector()?
151
isBuildVectorAllOnes(const SDNode * N)152 bool ISD::isBuildVectorAllOnes(const SDNode *N) {
153 // Look through a bit convert.
154 while (N->getOpcode() == ISD::BITCAST)
155 N = N->getOperand(0).getNode();
156
157 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
158
159 unsigned i = 0, e = N->getNumOperands();
160
161 // Skip over all of the undef values.
162 while (i != e && N->getOperand(i).isUndef())
163 ++i;
164
165 // Do not accept an all-undef vector.
166 if (i == e) return false;
167
168 // Do not accept build_vectors that aren't all constants or which have non-~0
169 // elements. We have to be a bit careful here, as the type of the constant
170 // may not be the same as the type of the vector elements due to type
171 // legalization (the elements are promoted to a legal type for the target and
172 // a vector of a type may be legal when the base element type is not).
173 // We only want to check enough bits to cover the vector elements, because
174 // we care if the resultant vector is all ones, not whether the individual
175 // constants are.
176 SDValue NotZero = N->getOperand(i);
177 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
178 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
179 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
180 return false;
181 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
182 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
183 return false;
184 } else
185 return false;
186
187 // Okay, we have at least one ~0 value, check to see if the rest match or are
188 // undefs. Even with the above element type twiddling, this should be OK, as
189 // the same type legalization should have applied to all the elements.
190 for (++i; i != e; ++i)
191 if (N->getOperand(i) != NotZero && !N->getOperand(i).isUndef())
192 return false;
193 return true;
194 }
195
isBuildVectorAllZeros(const SDNode * N)196 bool ISD::isBuildVectorAllZeros(const SDNode *N) {
197 // Look through a bit convert.
198 while (N->getOpcode() == ISD::BITCAST)
199 N = N->getOperand(0).getNode();
200
201 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
202
203 bool IsAllUndef = true;
204 for (const SDValue &Op : N->op_values()) {
205 if (Op.isUndef())
206 continue;
207 IsAllUndef = false;
208 // Do not accept build_vectors that aren't all constants or which have non-0
209 // elements. We have to be a bit careful here, as the type of the constant
210 // may not be the same as the type of the vector elements due to type
211 // legalization (the elements are promoted to a legal type for the target
212 // and a vector of a type may be legal when the base element type is not).
213 // We only want to check enough bits to cover the vector elements, because
214 // we care if the resultant vector is all zeros, not whether the individual
215 // constants are.
216 unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
217 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
218 if (CN->getAPIntValue().countTrailingZeros() < EltSize)
219 return false;
220 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Op)) {
221 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize)
222 return false;
223 } else
224 return false;
225 }
226
227 // Do not accept an all-undef vector.
228 if (IsAllUndef)
229 return false;
230 return true;
231 }
232
isBuildVectorOfConstantSDNodes(const SDNode * N)233 bool ISD::isBuildVectorOfConstantSDNodes(const SDNode *N) {
234 if (N->getOpcode() != ISD::BUILD_VECTOR)
235 return false;
236
237 for (const SDValue &Op : N->op_values()) {
238 if (Op.isUndef())
239 continue;
240 if (!isa<ConstantSDNode>(Op))
241 return false;
242 }
243 return true;
244 }
245
isBuildVectorOfConstantFPSDNodes(const SDNode * N)246 bool ISD::isBuildVectorOfConstantFPSDNodes(const SDNode *N) {
247 if (N->getOpcode() != ISD::BUILD_VECTOR)
248 return false;
249
250 for (const SDValue &Op : N->op_values()) {
251 if (Op.isUndef())
252 continue;
253 if (!isa<ConstantFPSDNode>(Op))
254 return false;
255 }
256 return true;
257 }
258
allOperandsUndef(const SDNode * N)259 bool ISD::allOperandsUndef(const SDNode *N) {
260 // Return false if the node has no operands.
261 // This is "logically inconsistent" with the definition of "all" but
262 // is probably the desired behavior.
263 if (N->getNumOperands() == 0)
264 return false;
265
266 for (const SDValue &Op : N->op_values())
267 if (!Op.isUndef())
268 return false;
269
270 return true;
271 }
272
matchUnaryPredicate(SDValue Op,std::function<bool (ConstantSDNode *)> Match,bool AllowUndefs)273 bool ISD::matchUnaryPredicate(SDValue Op,
274 std::function<bool(ConstantSDNode *)> Match,
275 bool AllowUndefs) {
276 // FIXME: Add support for scalar UNDEF cases?
277 if (auto *Cst = dyn_cast<ConstantSDNode>(Op))
278 return Match(Cst);
279
280 // FIXME: Add support for vector UNDEF cases?
281 if (ISD::BUILD_VECTOR != Op.getOpcode())
282 return false;
283
284 EVT SVT = Op.getValueType().getScalarType();
285 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
286 if (AllowUndefs && Op.getOperand(i).isUndef()) {
287 if (!Match(nullptr))
288 return false;
289 continue;
290 }
291
292 auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(i));
293 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst))
294 return false;
295 }
296 return true;
297 }
298
matchBinaryPredicate(SDValue LHS,SDValue RHS,std::function<bool (ConstantSDNode *,ConstantSDNode *)> Match,bool AllowUndefs)299 bool ISD::matchBinaryPredicate(
300 SDValue LHS, SDValue RHS,
301 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
302 bool AllowUndefs) {
303 if (LHS.getValueType() != RHS.getValueType())
304 return false;
305
306 // TODO: Add support for scalar UNDEF cases?
307 if (auto *LHSCst = dyn_cast<ConstantSDNode>(LHS))
308 if (auto *RHSCst = dyn_cast<ConstantSDNode>(RHS))
309 return Match(LHSCst, RHSCst);
310
311 // TODO: Add support for vector UNDEF cases?
312 if (ISD::BUILD_VECTOR != LHS.getOpcode() ||
313 ISD::BUILD_VECTOR != RHS.getOpcode())
314 return false;
315
316 EVT SVT = LHS.getValueType().getScalarType();
317 for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) {
318 SDValue LHSOp = LHS.getOperand(i);
319 SDValue RHSOp = RHS.getOperand(i);
320 bool LHSUndef = AllowUndefs && LHSOp.isUndef();
321 bool RHSUndef = AllowUndefs && RHSOp.isUndef();
322 auto *LHSCst = dyn_cast<ConstantSDNode>(LHSOp);
323 auto *RHSCst = dyn_cast<ConstantSDNode>(RHSOp);
324 if ((!LHSCst && !LHSUndef) || (!RHSCst && !RHSUndef))
325 return false;
326 if (LHSOp.getValueType() != SVT ||
327 LHSOp.getValueType() != RHSOp.getValueType())
328 return false;
329 if (!Match(LHSCst, RHSCst))
330 return false;
331 }
332 return true;
333 }
334
getExtForLoadExtType(bool IsFP,ISD::LoadExtType ExtType)335 ISD::NodeType ISD::getExtForLoadExtType(bool IsFP, ISD::LoadExtType ExtType) {
336 switch (ExtType) {
337 case ISD::EXTLOAD:
338 return IsFP ? ISD::FP_EXTEND : ISD::ANY_EXTEND;
339 case ISD::SEXTLOAD:
340 return ISD::SIGN_EXTEND;
341 case ISD::ZEXTLOAD:
342 return ISD::ZERO_EXTEND;
343 default:
344 break;
345 }
346
347 llvm_unreachable("Invalid LoadExtType");
348 }
349
getSetCCSwappedOperands(ISD::CondCode Operation)350 ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
351 // To perform this operation, we just need to swap the L and G bits of the
352 // operation.
353 unsigned OldL = (Operation >> 2) & 1;
354 unsigned OldG = (Operation >> 1) & 1;
355 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
356 (OldL << 1) | // New G bit
357 (OldG << 2)); // New L bit.
358 }
359
getSetCCInverse(ISD::CondCode Op,bool isInteger)360 ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
361 unsigned Operation = Op;
362 if (isInteger)
363 Operation ^= 7; // Flip L, G, E bits, but not U.
364 else
365 Operation ^= 15; // Flip all of the condition bits.
366
367 if (Operation > ISD::SETTRUE2)
368 Operation &= ~8; // Don't let N and U bits get set.
369
370 return ISD::CondCode(Operation);
371 }
372
373 /// For an integer comparison, return 1 if the comparison is a signed operation
374 /// and 2 if the result is an unsigned comparison. Return zero if the operation
375 /// does not depend on the sign of the input (setne and seteq).
isSignedOp(ISD::CondCode Opcode)376 static int isSignedOp(ISD::CondCode Opcode) {
377 switch (Opcode) {
378 default: llvm_unreachable("Illegal integer setcc operation!");
379 case ISD::SETEQ:
380 case ISD::SETNE: return 0;
381 case ISD::SETLT:
382 case ISD::SETLE:
383 case ISD::SETGT:
384 case ISD::SETGE: return 1;
385 case ISD::SETULT:
386 case ISD::SETULE:
387 case ISD::SETUGT:
388 case ISD::SETUGE: return 2;
389 }
390 }
391
getSetCCOrOperation(ISD::CondCode Op1,ISD::CondCode Op2,bool IsInteger)392 ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
393 bool IsInteger) {
394 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
395 // Cannot fold a signed integer setcc with an unsigned integer setcc.
396 return ISD::SETCC_INVALID;
397
398 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
399
400 // If the N and U bits get set, then the resultant comparison DOES suddenly
401 // care about orderedness, and it is true when ordered.
402 if (Op > ISD::SETTRUE2)
403 Op &= ~16; // Clear the U bit if the N bit is set.
404
405 // Canonicalize illegal integer setcc's.
406 if (IsInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
407 Op = ISD::SETNE;
408
409 return ISD::CondCode(Op);
410 }
411
getSetCCAndOperation(ISD::CondCode Op1,ISD::CondCode Op2,bool IsInteger)412 ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
413 bool IsInteger) {
414 if (IsInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
415 // Cannot fold a signed setcc with an unsigned setcc.
416 return ISD::SETCC_INVALID;
417
418 // Combine all of the condition bits.
419 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
420
421 // Canonicalize illegal integer setcc's.
422 if (IsInteger) {
423 switch (Result) {
424 default: break;
425 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
426 case ISD::SETOEQ: // SETEQ & SETU[LG]E
427 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
428 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
429 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
430 }
431 }
432
433 return Result;
434 }
435
436 //===----------------------------------------------------------------------===//
437 // SDNode Profile Support
438 //===----------------------------------------------------------------------===//
439
440 /// AddNodeIDOpcode - Add the node opcode to the NodeID data.
AddNodeIDOpcode(FoldingSetNodeID & ID,unsigned OpC)441 static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
442 ID.AddInteger(OpC);
443 }
444
445 /// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
446 /// solely with their pointer.
AddNodeIDValueTypes(FoldingSetNodeID & ID,SDVTList VTList)447 static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
448 ID.AddPointer(VTList.VTs);
449 }
450
451 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDValue> Ops)452 static void AddNodeIDOperands(FoldingSetNodeID &ID,
453 ArrayRef<SDValue> Ops) {
454 for (auto& Op : Ops) {
455 ID.AddPointer(Op.getNode());
456 ID.AddInteger(Op.getResNo());
457 }
458 }
459
460 /// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
AddNodeIDOperands(FoldingSetNodeID & ID,ArrayRef<SDUse> Ops)461 static void AddNodeIDOperands(FoldingSetNodeID &ID,
462 ArrayRef<SDUse> Ops) {
463 for (auto& Op : Ops) {
464 ID.AddPointer(Op.getNode());
465 ID.AddInteger(Op.getResNo());
466 }
467 }
468
AddNodeIDNode(FoldingSetNodeID & ID,unsigned short OpC,SDVTList VTList,ArrayRef<SDValue> OpList)469 static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC,
470 SDVTList VTList, ArrayRef<SDValue> OpList) {
471 AddNodeIDOpcode(ID, OpC);
472 AddNodeIDValueTypes(ID, VTList);
473 AddNodeIDOperands(ID, OpList);
474 }
475
476 /// If this is an SDNode with special info, add this info to the NodeID data.
AddNodeIDCustom(FoldingSetNodeID & ID,const SDNode * N)477 static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
478 switch (N->getOpcode()) {
479 case ISD::TargetExternalSymbol:
480 case ISD::ExternalSymbol:
481 case ISD::MCSymbol:
482 llvm_unreachable("Should only be used on nodes with operands");
483 default: break; // Normal nodes don't need extra info.
484 case ISD::TargetConstant:
485 case ISD::Constant: {
486 const ConstantSDNode *C = cast<ConstantSDNode>(N);
487 ID.AddPointer(C->getConstantIntValue());
488 ID.AddBoolean(C->isOpaque());
489 break;
490 }
491 case ISD::TargetConstantFP:
492 case ISD::ConstantFP:
493 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
494 break;
495 case ISD::TargetGlobalAddress:
496 case ISD::GlobalAddress:
497 case ISD::TargetGlobalTLSAddress:
498 case ISD::GlobalTLSAddress: {
499 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
500 ID.AddPointer(GA->getGlobal());
501 ID.AddInteger(GA->getOffset());
502 ID.AddInteger(GA->getTargetFlags());
503 break;
504 }
505 case ISD::BasicBlock:
506 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
507 break;
508 case ISD::Register:
509 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
510 break;
511 case ISD::RegisterMask:
512 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
513 break;
514 case ISD::SRCVALUE:
515 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
516 break;
517 case ISD::FrameIndex:
518 case ISD::TargetFrameIndex:
519 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
520 break;
521 case ISD::JumpTable:
522 case ISD::TargetJumpTable:
523 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
524 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
525 break;
526 case ISD::ConstantPool:
527 case ISD::TargetConstantPool: {
528 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
529 ID.AddInteger(CP->getAlignment());
530 ID.AddInteger(CP->getOffset());
531 if (CP->isMachineConstantPoolEntry())
532 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
533 else
534 ID.AddPointer(CP->getConstVal());
535 ID.AddInteger(CP->getTargetFlags());
536 break;
537 }
538 case ISD::TargetIndex: {
539 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
540 ID.AddInteger(TI->getIndex());
541 ID.AddInteger(TI->getOffset());
542 ID.AddInteger(TI->getTargetFlags());
543 break;
544 }
545 case ISD::LOAD: {
546 const LoadSDNode *LD = cast<LoadSDNode>(N);
547 ID.AddInteger(LD->getMemoryVT().getRawBits());
548 ID.AddInteger(LD->getRawSubclassData());
549 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
550 break;
551 }
552 case ISD::STORE: {
553 const StoreSDNode *ST = cast<StoreSDNode>(N);
554 ID.AddInteger(ST->getMemoryVT().getRawBits());
555 ID.AddInteger(ST->getRawSubclassData());
556 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
557 break;
558 }
559 case ISD::MLOAD: {
560 const MaskedLoadSDNode *MLD = cast<MaskedLoadSDNode>(N);
561 ID.AddInteger(MLD->getMemoryVT().getRawBits());
562 ID.AddInteger(MLD->getRawSubclassData());
563 ID.AddInteger(MLD->getPointerInfo().getAddrSpace());
564 break;
565 }
566 case ISD::MSTORE: {
567 const MaskedStoreSDNode *MST = cast<MaskedStoreSDNode>(N);
568 ID.AddInteger(MST->getMemoryVT().getRawBits());
569 ID.AddInteger(MST->getRawSubclassData());
570 ID.AddInteger(MST->getPointerInfo().getAddrSpace());
571 break;
572 }
573 case ISD::MGATHER: {
574 const MaskedGatherSDNode *MG = cast<MaskedGatherSDNode>(N);
575 ID.AddInteger(MG->getMemoryVT().getRawBits());
576 ID.AddInteger(MG->getRawSubclassData());
577 ID.AddInteger(MG->getPointerInfo().getAddrSpace());
578 break;
579 }
580 case ISD::MSCATTER: {
581 const MaskedScatterSDNode *MS = cast<MaskedScatterSDNode>(N);
582 ID.AddInteger(MS->getMemoryVT().getRawBits());
583 ID.AddInteger(MS->getRawSubclassData());
584 ID.AddInteger(MS->getPointerInfo().getAddrSpace());
585 break;
586 }
587 case ISD::ATOMIC_CMP_SWAP:
588 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
589 case ISD::ATOMIC_SWAP:
590 case ISD::ATOMIC_LOAD_ADD:
591 case ISD::ATOMIC_LOAD_SUB:
592 case ISD::ATOMIC_LOAD_AND:
593 case ISD::ATOMIC_LOAD_CLR:
594 case ISD::ATOMIC_LOAD_OR:
595 case ISD::ATOMIC_LOAD_XOR:
596 case ISD::ATOMIC_LOAD_NAND:
597 case ISD::ATOMIC_LOAD_MIN:
598 case ISD::ATOMIC_LOAD_MAX:
599 case ISD::ATOMIC_LOAD_UMIN:
600 case ISD::ATOMIC_LOAD_UMAX:
601 case ISD::ATOMIC_LOAD:
602 case ISD::ATOMIC_STORE: {
603 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
604 ID.AddInteger(AT->getMemoryVT().getRawBits());
605 ID.AddInteger(AT->getRawSubclassData());
606 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
607 break;
608 }
609 case ISD::PREFETCH: {
610 const MemSDNode *PF = cast<MemSDNode>(N);
611 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
612 break;
613 }
614 case ISD::VECTOR_SHUFFLE: {
615 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
616 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
617 i != e; ++i)
618 ID.AddInteger(SVN->getMaskElt(i));
619 break;
620 }
621 case ISD::TargetBlockAddress:
622 case ISD::BlockAddress: {
623 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
624 ID.AddPointer(BA->getBlockAddress());
625 ID.AddInteger(BA->getOffset());
626 ID.AddInteger(BA->getTargetFlags());
627 break;
628 }
629 } // end switch (N->getOpcode())
630
631 // Target specific memory nodes could also have address spaces to check.
632 if (N->isTargetMemoryOpcode())
633 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
634 }
635
636 /// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
637 /// data.
AddNodeIDNode(FoldingSetNodeID & ID,const SDNode * N)638 static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
639 AddNodeIDOpcode(ID, N->getOpcode());
640 // Add the return value info.
641 AddNodeIDValueTypes(ID, N->getVTList());
642 // Add the operand info.
643 AddNodeIDOperands(ID, N->ops());
644
645 // Handle SDNode leafs with special info.
646 AddNodeIDCustom(ID, N);
647 }
648
649 //===----------------------------------------------------------------------===//
650 // SelectionDAG Class
651 //===----------------------------------------------------------------------===//
652
653 /// doNotCSE - Return true if CSE should not be performed for this node.
doNotCSE(SDNode * N)654 static bool doNotCSE(SDNode *N) {
655 if (N->getValueType(0) == MVT::Glue)
656 return true; // Never CSE anything that produces a flag.
657
658 switch (N->getOpcode()) {
659 default: break;
660 case ISD::HANDLENODE:
661 case ISD::EH_LABEL:
662 return true; // Never CSE these nodes.
663 }
664
665 // Check that remaining values produced are not flags.
666 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
667 if (N->getValueType(i) == MVT::Glue)
668 return true; // Never CSE anything that produces a flag.
669
670 return false;
671 }
672
673 /// RemoveDeadNodes - This method deletes all unreachable nodes in the
674 /// SelectionDAG.
RemoveDeadNodes()675 void SelectionDAG::RemoveDeadNodes() {
676 // Create a dummy node (which is not added to allnodes), that adds a reference
677 // to the root node, preventing it from being deleted.
678 HandleSDNode Dummy(getRoot());
679
680 SmallVector<SDNode*, 128> DeadNodes;
681
682 // Add all obviously-dead nodes to the DeadNodes worklist.
683 for (SDNode &Node : allnodes())
684 if (Node.use_empty())
685 DeadNodes.push_back(&Node);
686
687 RemoveDeadNodes(DeadNodes);
688
689 // If the root changed (e.g. it was a dead load, update the root).
690 setRoot(Dummy.getValue());
691 }
692
693 /// RemoveDeadNodes - This method deletes the unreachable nodes in the
694 /// given list, and any nodes that become unreachable as a result.
RemoveDeadNodes(SmallVectorImpl<SDNode * > & DeadNodes)695 void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
696
697 // Process the worklist, deleting the nodes and adding their uses to the
698 // worklist.
699 while (!DeadNodes.empty()) {
700 SDNode *N = DeadNodes.pop_back_val();
701 // Skip to next node if we've already managed to delete the node. This could
702 // happen if replacing a node causes a node previously added to the node to
703 // be deleted.
704 if (N->getOpcode() == ISD::DELETED_NODE)
705 continue;
706
707 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
708 DUL->NodeDeleted(N, nullptr);
709
710 // Take the node out of the appropriate CSE map.
711 RemoveNodeFromCSEMaps(N);
712
713 // Next, brutally remove the operand list. This is safe to do, as there are
714 // no cycles in the graph.
715 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
716 SDUse &Use = *I++;
717 SDNode *Operand = Use.getNode();
718 Use.set(SDValue());
719
720 // Now that we removed this operand, see if there are no uses of it left.
721 if (Operand->use_empty())
722 DeadNodes.push_back(Operand);
723 }
724
725 DeallocateNode(N);
726 }
727 }
728
RemoveDeadNode(SDNode * N)729 void SelectionDAG::RemoveDeadNode(SDNode *N){
730 SmallVector<SDNode*, 16> DeadNodes(1, N);
731
732 // Create a dummy node that adds a reference to the root node, preventing
733 // it from being deleted. (This matters if the root is an operand of the
734 // dead node.)
735 HandleSDNode Dummy(getRoot());
736
737 RemoveDeadNodes(DeadNodes);
738 }
739
DeleteNode(SDNode * N)740 void SelectionDAG::DeleteNode(SDNode *N) {
741 // First take this out of the appropriate CSE map.
742 RemoveNodeFromCSEMaps(N);
743
744 // Finally, remove uses due to operands of this node, remove from the
745 // AllNodes list, and delete the node.
746 DeleteNodeNotInCSEMaps(N);
747 }
748
DeleteNodeNotInCSEMaps(SDNode * N)749 void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
750 assert(N->getIterator() != AllNodes.begin() &&
751 "Cannot delete the entry node!");
752 assert(N->use_empty() && "Cannot delete a node that is not dead!");
753
754 // Drop all of the operands and decrement used node's use counts.
755 N->DropOperands();
756
757 DeallocateNode(N);
758 }
759
erase(const SDNode * Node)760 void SDDbgInfo::erase(const SDNode *Node) {
761 DbgValMapType::iterator I = DbgValMap.find(Node);
762 if (I == DbgValMap.end())
763 return;
764 for (auto &Val: I->second)
765 Val->setIsInvalidated();
766 DbgValMap.erase(I);
767 }
768
DeallocateNode(SDNode * N)769 void SelectionDAG::DeallocateNode(SDNode *N) {
770 // If we have operands, deallocate them.
771 removeOperands(N);
772
773 NodeAllocator.Deallocate(AllNodes.remove(N));
774
775 // Set the opcode to DELETED_NODE to help catch bugs when node
776 // memory is reallocated.
777 // FIXME: There are places in SDag that have grown a dependency on the opcode
778 // value in the released node.
779 __asan_unpoison_memory_region(&N->NodeType, sizeof(N->NodeType));
780 N->NodeType = ISD::DELETED_NODE;
781
782 // If any of the SDDbgValue nodes refer to this SDNode, invalidate
783 // them and forget about that node.
784 DbgInfo->erase(N);
785 }
786
787 #ifndef NDEBUG
788 /// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
VerifySDNode(SDNode * N)789 static void VerifySDNode(SDNode *N) {
790 switch (N->getOpcode()) {
791 default:
792 break;
793 case ISD::BUILD_PAIR: {
794 EVT VT = N->getValueType(0);
795 assert(N->getNumValues() == 1 && "Too many results!");
796 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
797 "Wrong return type!");
798 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
799 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
800 "Mismatched operand types!");
801 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
802 "Wrong operand type!");
803 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
804 "Wrong return type size");
805 break;
806 }
807 case ISD::BUILD_VECTOR: {
808 assert(N->getNumValues() == 1 && "Too many results!");
809 assert(N->getValueType(0).isVector() && "Wrong return type!");
810 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
811 "Wrong number of operands!");
812 EVT EltVT = N->getValueType(0).getVectorElementType();
813 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
814 assert((I->getValueType() == EltVT ||
815 (EltVT.isInteger() && I->getValueType().isInteger() &&
816 EltVT.bitsLE(I->getValueType()))) &&
817 "Wrong operand type!");
818 assert(I->getValueType() == N->getOperand(0).getValueType() &&
819 "Operands must all have the same type");
820 }
821 break;
822 }
823 }
824 }
825 #endif // NDEBUG
826
827 /// Insert a newly allocated node into the DAG.
828 ///
829 /// Handles insertion into the all nodes list and CSE map, as well as
830 /// verification and other common operations when a new node is allocated.
InsertNode(SDNode * N)831 void SelectionDAG::InsertNode(SDNode *N) {
832 AllNodes.push_back(N);
833 #ifndef NDEBUG
834 N->PersistentId = NextPersistentId++;
835 VerifySDNode(N);
836 #endif
837 }
838
839 /// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
840 /// correspond to it. This is useful when we're about to delete or repurpose
841 /// the node. We don't want future request for structurally identical nodes
842 /// to return N anymore.
RemoveNodeFromCSEMaps(SDNode * N)843 bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
844 bool Erased = false;
845 switch (N->getOpcode()) {
846 case ISD::HANDLENODE: return false; // noop.
847 case ISD::CONDCODE:
848 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
849 "Cond code doesn't exist!");
850 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != nullptr;
851 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = nullptr;
852 break;
853 case ISD::ExternalSymbol:
854 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
855 break;
856 case ISD::TargetExternalSymbol: {
857 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
858 Erased = TargetExternalSymbols.erase(
859 std::pair<std::string,unsigned char>(ESN->getSymbol(),
860 ESN->getTargetFlags()));
861 break;
862 }
863 case ISD::MCSymbol: {
864 auto *MCSN = cast<MCSymbolSDNode>(N);
865 Erased = MCSymbols.erase(MCSN->getMCSymbol());
866 break;
867 }
868 case ISD::VALUETYPE: {
869 EVT VT = cast<VTSDNode>(N)->getVT();
870 if (VT.isExtended()) {
871 Erased = ExtendedValueTypeNodes.erase(VT);
872 } else {
873 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != nullptr;
874 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = nullptr;
875 }
876 break;
877 }
878 default:
879 // Remove it from the CSE Map.
880 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
881 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
882 Erased = CSEMap.RemoveNode(N);
883 break;
884 }
885 #ifndef NDEBUG
886 // Verify that the node was actually in one of the CSE maps, unless it has a
887 // flag result (which cannot be CSE'd) or is one of the special cases that are
888 // not subject to CSE.
889 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
890 !N->isMachineOpcode() && !doNotCSE(N)) {
891 N->dump(this);
892 dbgs() << "\n";
893 llvm_unreachable("Node is not in map!");
894 }
895 #endif
896 return Erased;
897 }
898
899 /// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
900 /// maps and modified in place. Add it back to the CSE maps, unless an identical
901 /// node already exists, in which case transfer all its users to the existing
902 /// node. This transfer can potentially trigger recursive merging.
903 void
AddModifiedNodeToCSEMaps(SDNode * N)904 SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
905 // For node types that aren't CSE'd, just act as if no identical node
906 // already exists.
907 if (!doNotCSE(N)) {
908 SDNode *Existing = CSEMap.GetOrInsertNode(N);
909 if (Existing != N) {
910 // If there was already an existing matching node, use ReplaceAllUsesWith
911 // to replace the dead one with the existing one. This can cause
912 // recursive merging of other unrelated nodes down the line.
913 ReplaceAllUsesWith(N, Existing);
914
915 // N is now dead. Inform the listeners and delete it.
916 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
917 DUL->NodeDeleted(N, Existing);
918 DeleteNodeNotInCSEMaps(N);
919 return;
920 }
921 }
922
923 // If the node doesn't already exist, we updated it. Inform listeners.
924 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
925 DUL->NodeUpdated(N);
926 }
927
928 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
929 /// were replaced with those specified. If this node is never memoized,
930 /// return null, otherwise return a pointer to the slot it would take. If a
931 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op,void * & InsertPos)932 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
933 void *&InsertPos) {
934 if (doNotCSE(N))
935 return nullptr;
936
937 SDValue Ops[] = { Op };
938 FoldingSetNodeID ID;
939 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
940 AddNodeIDCustom(ID, N);
941 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
942 if (Node)
943 Node->intersectFlagsWith(N->getFlags());
944 return Node;
945 }
946
947 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
948 /// were replaced with those specified. If this node is never memoized,
949 /// return null, otherwise return a pointer to the slot it would take. If a
950 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,SDValue Op1,SDValue Op2,void * & InsertPos)951 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
952 SDValue Op1, SDValue Op2,
953 void *&InsertPos) {
954 if (doNotCSE(N))
955 return nullptr;
956
957 SDValue Ops[] = { Op1, Op2 };
958 FoldingSetNodeID ID;
959 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
960 AddNodeIDCustom(ID, N);
961 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
962 if (Node)
963 Node->intersectFlagsWith(N->getFlags());
964 return Node;
965 }
966
967 /// FindModifiedNodeSlot - Find a slot for the specified node if its operands
968 /// were replaced with those specified. If this node is never memoized,
969 /// return null, otherwise return a pointer to the slot it would take. If a
970 /// node already exists with these operands, the slot will be non-null.
FindModifiedNodeSlot(SDNode * N,ArrayRef<SDValue> Ops,void * & InsertPos)971 SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
972 void *&InsertPos) {
973 if (doNotCSE(N))
974 return nullptr;
975
976 FoldingSetNodeID ID;
977 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops);
978 AddNodeIDCustom(ID, N);
979 SDNode *Node = FindNodeOrInsertPos(ID, SDLoc(N), InsertPos);
980 if (Node)
981 Node->intersectFlagsWith(N->getFlags());
982 return Node;
983 }
984
getEVTAlignment(EVT VT) const985 unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
986 Type *Ty = VT == MVT::iPTR ?
987 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
988 VT.getTypeForEVT(*getContext());
989
990 return getDataLayout().getABITypeAlignment(Ty);
991 }
992
993 // EntryNode could meaningfully have debug info if we can find it...
SelectionDAG(const TargetMachine & tm,CodeGenOpt::Level OL)994 SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
995 : TM(tm), OptLevel(OL),
996 EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
997 Root(getEntryNode()) {
998 InsertNode(&EntryNode);
999 DbgInfo = new SDDbgInfo();
1000 }
1001
init(MachineFunction & NewMF,OptimizationRemarkEmitter & NewORE,Pass * PassPtr,const TargetLibraryInfo * LibraryInfo,LegacyDivergenceAnalysis * Divergence)1002 void SelectionDAG::init(MachineFunction &NewMF,
1003 OptimizationRemarkEmitter &NewORE,
1004 Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
1005 LegacyDivergenceAnalysis * Divergence) {
1006 MF = &NewMF;
1007 SDAGISelPass = PassPtr;
1008 ORE = &NewORE;
1009 TLI = getSubtarget().getTargetLowering();
1010 TSI = getSubtarget().getSelectionDAGInfo();
1011 LibInfo = LibraryInfo;
1012 Context = &MF->getFunction().getContext();
1013 DA = Divergence;
1014 }
1015
~SelectionDAG()1016 SelectionDAG::~SelectionDAG() {
1017 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
1018 allnodes_clear();
1019 OperandRecycler.clear(OperandAllocator);
1020 delete DbgInfo;
1021 }
1022
allnodes_clear()1023 void SelectionDAG::allnodes_clear() {
1024 assert(&*AllNodes.begin() == &EntryNode);
1025 AllNodes.remove(AllNodes.begin());
1026 while (!AllNodes.empty())
1027 DeallocateNode(&AllNodes.front());
1028 #ifndef NDEBUG
1029 NextPersistentId = 0;
1030 #endif
1031 }
1032
FindNodeOrInsertPos(const FoldingSetNodeID & ID,void * & InsertPos)1033 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1034 void *&InsertPos) {
1035 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1036 if (N) {
1037 switch (N->getOpcode()) {
1038 default: break;
1039 case ISD::Constant:
1040 case ISD::ConstantFP:
1041 llvm_unreachable("Querying for Constant and ConstantFP nodes requires "
1042 "debug location. Use another overload.");
1043 }
1044 }
1045 return N;
1046 }
1047
FindNodeOrInsertPos(const FoldingSetNodeID & ID,const SDLoc & DL,void * & InsertPos)1048 SDNode *SelectionDAG::FindNodeOrInsertPos(const FoldingSetNodeID &ID,
1049 const SDLoc &DL, void *&InsertPos) {
1050 SDNode *N = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
1051 if (N) {
1052 switch (N->getOpcode()) {
1053 case ISD::Constant:
1054 case ISD::ConstantFP:
1055 // Erase debug location from the node if the node is used at several
1056 // different places. Do not propagate one location to all uses as it
1057 // will cause a worse single stepping debugging experience.
1058 if (N->getDebugLoc() != DL.getDebugLoc())
1059 N->setDebugLoc(DebugLoc());
1060 break;
1061 default:
1062 // When the node's point of use is located earlier in the instruction
1063 // sequence than its prior point of use, update its debug info to the
1064 // earlier location.
1065 if (DL.getIROrder() && DL.getIROrder() < N->getIROrder())
1066 N->setDebugLoc(DL.getDebugLoc());
1067 break;
1068 }
1069 }
1070 return N;
1071 }
1072
clear()1073 void SelectionDAG::clear() {
1074 allnodes_clear();
1075 OperandRecycler.clear(OperandAllocator);
1076 OperandAllocator.Reset();
1077 CSEMap.clear();
1078
1079 ExtendedValueTypeNodes.clear();
1080 ExternalSymbols.clear();
1081 TargetExternalSymbols.clear();
1082 MCSymbols.clear();
1083 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
1084 static_cast<CondCodeSDNode*>(nullptr));
1085 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
1086 static_cast<SDNode*>(nullptr));
1087
1088 EntryNode.UseList = nullptr;
1089 InsertNode(&EntryNode);
1090 Root = getEntryNode();
1091 DbgInfo->clear();
1092 }
1093
getFPExtendOrRound(SDValue Op,const SDLoc & DL,EVT VT)1094 SDValue SelectionDAG::getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT) {
1095 return VT.bitsGT(Op.getValueType())
1096 ? getNode(ISD::FP_EXTEND, DL, VT, Op)
1097 : getNode(ISD::FP_ROUND, DL, VT, Op, getIntPtrConstant(0, DL));
1098 }
1099
getAnyExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1100 SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1101 return VT.bitsGT(Op.getValueType()) ?
1102 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
1103 getNode(ISD::TRUNCATE, DL, VT, Op);
1104 }
1105
getSExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1106 SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1107 return VT.bitsGT(Op.getValueType()) ?
1108 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
1109 getNode(ISD::TRUNCATE, DL, VT, Op);
1110 }
1111
getZExtOrTrunc(SDValue Op,const SDLoc & DL,EVT VT)1112 SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
1113 return VT.bitsGT(Op.getValueType()) ?
1114 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
1115 getNode(ISD::TRUNCATE, DL, VT, Op);
1116 }
1117
getBoolExtOrTrunc(SDValue Op,const SDLoc & SL,EVT VT,EVT OpVT)1118 SDValue SelectionDAG::getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT,
1119 EVT OpVT) {
1120 if (VT.bitsLE(Op.getValueType()))
1121 return getNode(ISD::TRUNCATE, SL, VT, Op);
1122
1123 TargetLowering::BooleanContent BType = TLI->getBooleanContents(OpVT);
1124 return getNode(TLI->getExtendForContent(BType), SL, VT, Op);
1125 }
1126
getZeroExtendInReg(SDValue Op,const SDLoc & DL,EVT VT)1127 SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
1128 assert(!VT.isVector() &&
1129 "getZeroExtendInReg should use the vector element type instead of "
1130 "the vector type!");
1131 if (Op.getValueType().getScalarType() == VT) return Op;
1132 unsigned BitWidth = Op.getScalarValueSizeInBits();
1133 APInt Imm = APInt::getLowBitsSet(BitWidth,
1134 VT.getSizeInBits());
1135 return getNode(ISD::AND, DL, Op.getValueType(), Op,
1136 getConstant(Imm, DL, Op.getValueType()));
1137 }
1138
1139 /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
getNOT(const SDLoc & DL,SDValue Val,EVT VT)1140 SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1141 EVT EltVT = VT.getScalarType();
1142 SDValue NegOne =
1143 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), DL, VT);
1144 return getNode(ISD::XOR, DL, VT, Val, NegOne);
1145 }
1146
getLogicalNOT(const SDLoc & DL,SDValue Val,EVT VT)1147 SDValue SelectionDAG::getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT) {
1148 SDValue TrueValue = getBoolConstant(true, DL, VT, VT);
1149 return getNode(ISD::XOR, DL, VT, Val, TrueValue);
1150 }
1151
getBoolConstant(bool V,const SDLoc & DL,EVT VT,EVT OpVT)1152 SDValue SelectionDAG::getBoolConstant(bool V, const SDLoc &DL, EVT VT,
1153 EVT OpVT) {
1154 if (!V)
1155 return getConstant(0, DL, VT);
1156
1157 switch (TLI->getBooleanContents(OpVT)) {
1158 case TargetLowering::ZeroOrOneBooleanContent:
1159 case TargetLowering::UndefinedBooleanContent:
1160 return getConstant(1, DL, VT);
1161 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1162 return getAllOnesConstant(DL, VT);
1163 }
1164 llvm_unreachable("Unexpected boolean content enum!");
1165 }
1166
getConstant(uint64_t Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1167 SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
1168 bool isT, bool isO) {
1169 EVT EltVT = VT.getScalarType();
1170 assert((EltVT.getSizeInBits() >= 64 ||
1171 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
1172 "getConstant with a uint64_t value that doesn't fit in the type!");
1173 return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
1174 }
1175
getConstant(const APInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1176 SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
1177 bool isT, bool isO) {
1178 return getConstant(*ConstantInt::get(*Context, Val), DL, VT, isT, isO);
1179 }
1180
getConstant(const ConstantInt & Val,const SDLoc & DL,EVT VT,bool isT,bool isO)1181 SDValue SelectionDAG::getConstant(const ConstantInt &Val, const SDLoc &DL,
1182 EVT VT, bool isT, bool isO) {
1183 assert(VT.isInteger() && "Cannot create FP integer constant!");
1184
1185 EVT EltVT = VT.getScalarType();
1186 const ConstantInt *Elt = &Val;
1187
1188 // In some cases the vector type is legal but the element type is illegal and
1189 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
1190 // inserted value (the type does not need to match the vector element type).
1191 // Any extra bits introduced will be truncated away.
1192 if (VT.isVector() && TLI->getTypeAction(*getContext(), EltVT) ==
1193 TargetLowering::TypePromoteInteger) {
1194 EltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1195 APInt NewVal = Elt->getValue().zextOrTrunc(EltVT.getSizeInBits());
1196 Elt = ConstantInt::get(*getContext(), NewVal);
1197 }
1198 // In other cases the element type is illegal and needs to be expanded, for
1199 // example v2i64 on MIPS32. In this case, find the nearest legal type, split
1200 // the value into n parts and use a vector type with n-times the elements.
1201 // Then bitcast to the type requested.
1202 // Legalizing constants too early makes the DAGCombiner's job harder so we
1203 // only legalize if the DAG tells us we must produce legal types.
1204 else if (NewNodesMustHaveLegalTypes && VT.isVector() &&
1205 TLI->getTypeAction(*getContext(), EltVT) ==
1206 TargetLowering::TypeExpandInteger) {
1207 const APInt &NewVal = Elt->getValue();
1208 EVT ViaEltVT = TLI->getTypeToTransformTo(*getContext(), EltVT);
1209 unsigned ViaEltSizeInBits = ViaEltVT.getSizeInBits();
1210 unsigned ViaVecNumElts = VT.getSizeInBits() / ViaEltSizeInBits;
1211 EVT ViaVecVT = EVT::getVectorVT(*getContext(), ViaEltVT, ViaVecNumElts);
1212
1213 // Check the temporary vector is the correct size. If this fails then
1214 // getTypeToTransformTo() probably returned a type whose size (in bits)
1215 // isn't a power-of-2 factor of the requested type size.
1216 assert(ViaVecVT.getSizeInBits() == VT.getSizeInBits());
1217
1218 SmallVector<SDValue, 2> EltParts;
1219 for (unsigned i = 0; i < ViaVecNumElts / VT.getVectorNumElements(); ++i) {
1220 EltParts.push_back(getConstant(NewVal.lshr(i * ViaEltSizeInBits)
1221 .zextOrTrunc(ViaEltSizeInBits), DL,
1222 ViaEltVT, isT, isO));
1223 }
1224
1225 // EltParts is currently in little endian order. If we actually want
1226 // big-endian order then reverse it now.
1227 if (getDataLayout().isBigEndian())
1228 std::reverse(EltParts.begin(), EltParts.end());
1229
1230 // The elements must be reversed when the element order is different
1231 // to the endianness of the elements (because the BITCAST is itself a
1232 // vector shuffle in this situation). However, we do not need any code to
1233 // perform this reversal because getConstant() is producing a vector
1234 // splat.
1235 // This situation occurs in MIPS MSA.
1236
1237 SmallVector<SDValue, 8> Ops;
1238 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1239 Ops.insert(Ops.end(), EltParts.begin(), EltParts.end());
1240
1241 SDValue V = getNode(ISD::BITCAST, DL, VT, getBuildVector(ViaVecVT, DL, Ops));
1242 return V;
1243 }
1244
1245 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
1246 "APInt size does not match type size!");
1247 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
1248 FoldingSetNodeID ID;
1249 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1250 ID.AddPointer(Elt);
1251 ID.AddBoolean(isO);
1252 void *IP = nullptr;
1253 SDNode *N = nullptr;
1254 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1255 if (!VT.isVector())
1256 return SDValue(N, 0);
1257
1258 if (!N) {
1259 N = newSDNode<ConstantSDNode>(isT, isO, Elt, EltVT);
1260 CSEMap.InsertNode(N, IP);
1261 InsertNode(N);
1262 NewSDValueDbgMsg(SDValue(N, 0), "Creating constant: ", this);
1263 }
1264
1265 SDValue Result(N, 0);
1266 if (VT.isVector())
1267 Result = getSplatBuildVector(VT, DL, Result);
1268
1269 return Result;
1270 }
1271
getIntPtrConstant(uint64_t Val,const SDLoc & DL,bool isTarget)1272 SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, const SDLoc &DL,
1273 bool isTarget) {
1274 return getConstant(Val, DL, TLI->getPointerTy(getDataLayout()), isTarget);
1275 }
1276
getConstantFP(const APFloat & V,const SDLoc & DL,EVT VT,bool isTarget)1277 SDValue SelectionDAG::getConstantFP(const APFloat &V, const SDLoc &DL, EVT VT,
1278 bool isTarget) {
1279 return getConstantFP(*ConstantFP::get(*getContext(), V), DL, VT, isTarget);
1280 }
1281
getConstantFP(const ConstantFP & V,const SDLoc & DL,EVT VT,bool isTarget)1282 SDValue SelectionDAG::getConstantFP(const ConstantFP &V, const SDLoc &DL,
1283 EVT VT, bool isTarget) {
1284 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1285
1286 EVT EltVT = VT.getScalarType();
1287
1288 // Do the map lookup using the actual bit pattern for the floating point
1289 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1290 // we don't have issues with SNANs.
1291 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1292 FoldingSetNodeID ID;
1293 AddNodeIDNode(ID, Opc, getVTList(EltVT), None);
1294 ID.AddPointer(&V);
1295 void *IP = nullptr;
1296 SDNode *N = nullptr;
1297 if ((N = FindNodeOrInsertPos(ID, DL, IP)))
1298 if (!VT.isVector())
1299 return SDValue(N, 0);
1300
1301 if (!N) {
1302 N = newSDNode<ConstantFPSDNode>(isTarget, &V, EltVT);
1303 CSEMap.InsertNode(N, IP);
1304 InsertNode(N);
1305 }
1306
1307 SDValue Result(N, 0);
1308 if (VT.isVector())
1309 Result = getSplatBuildVector(VT, DL, Result);
1310 NewSDValueDbgMsg(Result, "Creating fp constant: ", this);
1311 return Result;
1312 }
1313
getConstantFP(double Val,const SDLoc & DL,EVT VT,bool isTarget)1314 SDValue SelectionDAG::getConstantFP(double Val, const SDLoc &DL, EVT VT,
1315 bool isTarget) {
1316 EVT EltVT = VT.getScalarType();
1317 if (EltVT == MVT::f32)
1318 return getConstantFP(APFloat((float)Val), DL, VT, isTarget);
1319 else if (EltVT == MVT::f64)
1320 return getConstantFP(APFloat(Val), DL, VT, isTarget);
1321 else if (EltVT == MVT::f80 || EltVT == MVT::f128 || EltVT == MVT::ppcf128 ||
1322 EltVT == MVT::f16) {
1323 bool Ignored;
1324 APFloat APF = APFloat(Val);
1325 APF.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
1326 &Ignored);
1327 return getConstantFP(APF, DL, VT, isTarget);
1328 } else
1329 llvm_unreachable("Unsupported type in getConstantFP");
1330 }
1331
getGlobalAddress(const GlobalValue * GV,const SDLoc & DL,EVT VT,int64_t Offset,bool isTargetGA,unsigned char TargetFlags)1332 SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
1333 EVT VT, int64_t Offset, bool isTargetGA,
1334 unsigned char TargetFlags) {
1335 assert((TargetFlags == 0 || isTargetGA) &&
1336 "Cannot set target flags on target-independent globals");
1337
1338 // Truncate (with sign-extension) the offset value to the pointer size.
1339 unsigned BitWidth = getDataLayout().getPointerTypeSizeInBits(GV->getType());
1340 if (BitWidth < 64)
1341 Offset = SignExtend64(Offset, BitWidth);
1342
1343 unsigned Opc;
1344 if (GV->isThreadLocal())
1345 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1346 else
1347 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1348
1349 FoldingSetNodeID ID;
1350 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1351 ID.AddPointer(GV);
1352 ID.AddInteger(Offset);
1353 ID.AddInteger(TargetFlags);
1354 void *IP = nullptr;
1355 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
1356 return SDValue(E, 0);
1357
1358 auto *N = newSDNode<GlobalAddressSDNode>(
1359 Opc, DL.getIROrder(), DL.getDebugLoc(), GV, VT, Offset, TargetFlags);
1360 CSEMap.InsertNode(N, IP);
1361 InsertNode(N);
1362 return SDValue(N, 0);
1363 }
1364
getFrameIndex(int FI,EVT VT,bool isTarget)1365 SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1366 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1367 FoldingSetNodeID ID;
1368 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1369 ID.AddInteger(FI);
1370 void *IP = nullptr;
1371 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1372 return SDValue(E, 0);
1373
1374 auto *N = newSDNode<FrameIndexSDNode>(FI, VT, isTarget);
1375 CSEMap.InsertNode(N, IP);
1376 InsertNode(N);
1377 return SDValue(N, 0);
1378 }
1379
getJumpTable(int JTI,EVT VT,bool isTarget,unsigned char TargetFlags)1380 SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1381 unsigned char TargetFlags) {
1382 assert((TargetFlags == 0 || isTarget) &&
1383 "Cannot set target flags on target-independent jump tables");
1384 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1385 FoldingSetNodeID ID;
1386 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1387 ID.AddInteger(JTI);
1388 ID.AddInteger(TargetFlags);
1389 void *IP = nullptr;
1390 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1391 return SDValue(E, 0);
1392
1393 auto *N = newSDNode<JumpTableSDNode>(JTI, VT, isTarget, TargetFlags);
1394 CSEMap.InsertNode(N, IP);
1395 InsertNode(N);
1396 return SDValue(N, 0);
1397 }
1398
getConstantPool(const Constant * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned char TargetFlags)1399 SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1400 unsigned Alignment, int Offset,
1401 bool isTarget,
1402 unsigned char TargetFlags) {
1403 assert((TargetFlags == 0 || isTarget) &&
1404 "Cannot set target flags on target-independent globals");
1405 if (Alignment == 0)
1406 Alignment = MF->getFunction().optForSize()
1407 ? getDataLayout().getABITypeAlignment(C->getType())
1408 : getDataLayout().getPrefTypeAlignment(C->getType());
1409 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1410 FoldingSetNodeID ID;
1411 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1412 ID.AddInteger(Alignment);
1413 ID.AddInteger(Offset);
1414 ID.AddPointer(C);
1415 ID.AddInteger(TargetFlags);
1416 void *IP = nullptr;
1417 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1418 return SDValue(E, 0);
1419
1420 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1421 TargetFlags);
1422 CSEMap.InsertNode(N, IP);
1423 InsertNode(N);
1424 return SDValue(N, 0);
1425 }
1426
getConstantPool(MachineConstantPoolValue * C,EVT VT,unsigned Alignment,int Offset,bool isTarget,unsigned char TargetFlags)1427 SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1428 unsigned Alignment, int Offset,
1429 bool isTarget,
1430 unsigned char TargetFlags) {
1431 assert((TargetFlags == 0 || isTarget) &&
1432 "Cannot set target flags on target-independent globals");
1433 if (Alignment == 0)
1434 Alignment = getDataLayout().getPrefTypeAlignment(C->getType());
1435 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1436 FoldingSetNodeID ID;
1437 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1438 ID.AddInteger(Alignment);
1439 ID.AddInteger(Offset);
1440 C->addSelectionDAGCSEId(ID);
1441 ID.AddInteger(TargetFlags);
1442 void *IP = nullptr;
1443 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1444 return SDValue(E, 0);
1445
1446 auto *N = newSDNode<ConstantPoolSDNode>(isTarget, C, VT, Offset, Alignment,
1447 TargetFlags);
1448 CSEMap.InsertNode(N, IP);
1449 InsertNode(N);
1450 return SDValue(N, 0);
1451 }
1452
getTargetIndex(int Index,EVT VT,int64_t Offset,unsigned char TargetFlags)1453 SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1454 unsigned char TargetFlags) {
1455 FoldingSetNodeID ID;
1456 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), None);
1457 ID.AddInteger(Index);
1458 ID.AddInteger(Offset);
1459 ID.AddInteger(TargetFlags);
1460 void *IP = nullptr;
1461 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1462 return SDValue(E, 0);
1463
1464 auto *N = newSDNode<TargetIndexSDNode>(Index, VT, Offset, TargetFlags);
1465 CSEMap.InsertNode(N, IP);
1466 InsertNode(N);
1467 return SDValue(N, 0);
1468 }
1469
getBasicBlock(MachineBasicBlock * MBB)1470 SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1471 FoldingSetNodeID ID;
1472 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), None);
1473 ID.AddPointer(MBB);
1474 void *IP = nullptr;
1475 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1476 return SDValue(E, 0);
1477
1478 auto *N = newSDNode<BasicBlockSDNode>(MBB);
1479 CSEMap.InsertNode(N, IP);
1480 InsertNode(N);
1481 return SDValue(N, 0);
1482 }
1483
getValueType(EVT VT)1484 SDValue SelectionDAG::getValueType(EVT VT) {
1485 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1486 ValueTypeNodes.size())
1487 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1488
1489 SDNode *&N = VT.isExtended() ?
1490 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1491
1492 if (N) return SDValue(N, 0);
1493 N = newSDNode<VTSDNode>(VT);
1494 InsertNode(N);
1495 return SDValue(N, 0);
1496 }
1497
getExternalSymbol(const char * Sym,EVT VT)1498 SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1499 SDNode *&N = ExternalSymbols[Sym];
1500 if (N) return SDValue(N, 0);
1501 N = newSDNode<ExternalSymbolSDNode>(false, Sym, 0, VT);
1502 InsertNode(N);
1503 return SDValue(N, 0);
1504 }
1505
getMCSymbol(MCSymbol * Sym,EVT VT)1506 SDValue SelectionDAG::getMCSymbol(MCSymbol *Sym, EVT VT) {
1507 SDNode *&N = MCSymbols[Sym];
1508 if (N)
1509 return SDValue(N, 0);
1510 N = newSDNode<MCSymbolSDNode>(Sym, VT);
1511 InsertNode(N);
1512 return SDValue(N, 0);
1513 }
1514
getTargetExternalSymbol(const char * Sym,EVT VT,unsigned char TargetFlags)1515 SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1516 unsigned char TargetFlags) {
1517 SDNode *&N =
1518 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1519 TargetFlags)];
1520 if (N) return SDValue(N, 0);
1521 N = newSDNode<ExternalSymbolSDNode>(true, Sym, TargetFlags, VT);
1522 InsertNode(N);
1523 return SDValue(N, 0);
1524 }
1525
getCondCode(ISD::CondCode Cond)1526 SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1527 if ((unsigned)Cond >= CondCodeNodes.size())
1528 CondCodeNodes.resize(Cond+1);
1529
1530 if (!CondCodeNodes[Cond]) {
1531 auto *N = newSDNode<CondCodeSDNode>(Cond);
1532 CondCodeNodes[Cond] = N;
1533 InsertNode(N);
1534 }
1535
1536 return SDValue(CondCodeNodes[Cond], 0);
1537 }
1538
1539 /// Swaps the values of N1 and N2. Swaps all indices in the shuffle mask M that
1540 /// point at N1 to point at N2 and indices that point at N2 to point at N1.
commuteShuffle(SDValue & N1,SDValue & N2,MutableArrayRef<int> M)1541 static void commuteShuffle(SDValue &N1, SDValue &N2, MutableArrayRef<int> M) {
1542 std::swap(N1, N2);
1543 ShuffleVectorSDNode::commuteMask(M);
1544 }
1545
getVectorShuffle(EVT VT,const SDLoc & dl,SDValue N1,SDValue N2,ArrayRef<int> Mask)1546 SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
1547 SDValue N2, ArrayRef<int> Mask) {
1548 assert(VT.getVectorNumElements() == Mask.size() &&
1549 "Must have the same number of vector elements as mask elements!");
1550 assert(VT == N1.getValueType() && VT == N2.getValueType() &&
1551 "Invalid VECTOR_SHUFFLE");
1552
1553 // Canonicalize shuffle undef, undef -> undef
1554 if (N1.isUndef() && N2.isUndef())
1555 return getUNDEF(VT);
1556
1557 // Validate that all indices in Mask are within the range of the elements
1558 // input to the shuffle.
1559 int NElts = Mask.size();
1560 assert(llvm::all_of(Mask,
1561 [&](int M) { return M < (NElts * 2) && M >= -1; }) &&
1562 "Index out of range");
1563
1564 // Copy the mask so we can do any needed cleanup.
1565 SmallVector<int, 8> MaskVec(Mask.begin(), Mask.end());
1566
1567 // Canonicalize shuffle v, v -> v, undef
1568 if (N1 == N2) {
1569 N2 = getUNDEF(VT);
1570 for (int i = 0; i != NElts; ++i)
1571 if (MaskVec[i] >= NElts) MaskVec[i] -= NElts;
1572 }
1573
1574 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1575 if (N1.isUndef())
1576 commuteShuffle(N1, N2, MaskVec);
1577
1578 if (TLI->hasVectorBlend()) {
1579 // If shuffling a splat, try to blend the splat instead. We do this here so
1580 // that even when this arises during lowering we don't have to re-handle it.
1581 auto BlendSplat = [&](BuildVectorSDNode *BV, int Offset) {
1582 BitVector UndefElements;
1583 SDValue Splat = BV->getSplatValue(&UndefElements);
1584 if (!Splat)
1585 return;
1586
1587 for (int i = 0; i < NElts; ++i) {
1588 if (MaskVec[i] < Offset || MaskVec[i] >= (Offset + NElts))
1589 continue;
1590
1591 // If this input comes from undef, mark it as such.
1592 if (UndefElements[MaskVec[i] - Offset]) {
1593 MaskVec[i] = -1;
1594 continue;
1595 }
1596
1597 // If we can blend a non-undef lane, use that instead.
1598 if (!UndefElements[i])
1599 MaskVec[i] = i + Offset;
1600 }
1601 };
1602 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
1603 BlendSplat(N1BV, 0);
1604 if (auto *N2BV = dyn_cast<BuildVectorSDNode>(N2))
1605 BlendSplat(N2BV, NElts);
1606 }
1607
1608 // Canonicalize all index into lhs, -> shuffle lhs, undef
1609 // Canonicalize all index into rhs, -> shuffle rhs, undef
1610 bool AllLHS = true, AllRHS = true;
1611 bool N2Undef = N2.isUndef();
1612 for (int i = 0; i != NElts; ++i) {
1613 if (MaskVec[i] >= NElts) {
1614 if (N2Undef)
1615 MaskVec[i] = -1;
1616 else
1617 AllLHS = false;
1618 } else if (MaskVec[i] >= 0) {
1619 AllRHS = false;
1620 }
1621 }
1622 if (AllLHS && AllRHS)
1623 return getUNDEF(VT);
1624 if (AllLHS && !N2Undef)
1625 N2 = getUNDEF(VT);
1626 if (AllRHS) {
1627 N1 = getUNDEF(VT);
1628 commuteShuffle(N1, N2, MaskVec);
1629 }
1630 // Reset our undef status after accounting for the mask.
1631 N2Undef = N2.isUndef();
1632 // Re-check whether both sides ended up undef.
1633 if (N1.isUndef() && N2Undef)
1634 return getUNDEF(VT);
1635
1636 // If Identity shuffle return that node.
1637 bool Identity = true, AllSame = true;
1638 for (int i = 0; i != NElts; ++i) {
1639 if (MaskVec[i] >= 0 && MaskVec[i] != i) Identity = false;
1640 if (MaskVec[i] != MaskVec[0]) AllSame = false;
1641 }
1642 if (Identity && NElts)
1643 return N1;
1644
1645 // Shuffling a constant splat doesn't change the result.
1646 if (N2Undef) {
1647 SDValue V = N1;
1648
1649 // Look through any bitcasts. We check that these don't change the number
1650 // (and size) of elements and just changes their types.
1651 while (V.getOpcode() == ISD::BITCAST)
1652 V = V->getOperand(0);
1653
1654 // A splat should always show up as a build vector node.
1655 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) {
1656 BitVector UndefElements;
1657 SDValue Splat = BV->getSplatValue(&UndefElements);
1658 // If this is a splat of an undef, shuffling it is also undef.
1659 if (Splat && Splat.isUndef())
1660 return getUNDEF(VT);
1661
1662 bool SameNumElts =
1663 V.getValueType().getVectorNumElements() == VT.getVectorNumElements();
1664
1665 // We only have a splat which can skip shuffles if there is a splatted
1666 // value and no undef lanes rearranged by the shuffle.
1667 if (Splat && UndefElements.none()) {
1668 // Splat of <x, x, ..., x>, return <x, x, ..., x>, provided that the
1669 // number of elements match or the value splatted is a zero constant.
1670 if (SameNumElts)
1671 return N1;
1672 if (auto *C = dyn_cast<ConstantSDNode>(Splat))
1673 if (C->isNullValue())
1674 return N1;
1675 }
1676
1677 // If the shuffle itself creates a splat, build the vector directly.
1678 if (AllSame && SameNumElts) {
1679 EVT BuildVT = BV->getValueType(0);
1680 const SDValue &Splatted = BV->getOperand(MaskVec[0]);
1681 SDValue NewBV = getSplatBuildVector(BuildVT, dl, Splatted);
1682
1683 // We may have jumped through bitcasts, so the type of the
1684 // BUILD_VECTOR may not match the type of the shuffle.
1685 if (BuildVT != VT)
1686 NewBV = getNode(ISD::BITCAST, dl, VT, NewBV);
1687 return NewBV;
1688 }
1689 }
1690 }
1691
1692 FoldingSetNodeID ID;
1693 SDValue Ops[2] = { N1, N2 };
1694 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops);
1695 for (int i = 0; i != NElts; ++i)
1696 ID.AddInteger(MaskVec[i]);
1697
1698 void* IP = nullptr;
1699 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1700 return SDValue(E, 0);
1701
1702 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1703 // SDNode doesn't have access to it. This memory will be "leaked" when
1704 // the node is deallocated, but recovered when the NodeAllocator is released.
1705 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1706 llvm::copy(MaskVec, MaskAlloc);
1707
1708 auto *N = newSDNode<ShuffleVectorSDNode>(VT, dl.getIROrder(),
1709 dl.getDebugLoc(), MaskAlloc);
1710 createOperands(N, Ops);
1711
1712 CSEMap.InsertNode(N, IP);
1713 InsertNode(N);
1714 SDValue V = SDValue(N, 0);
1715 NewSDValueDbgMsg(V, "Creating new node: ", this);
1716 return V;
1717 }
1718
getCommutedVectorShuffle(const ShuffleVectorSDNode & SV)1719 SDValue SelectionDAG::getCommutedVectorShuffle(const ShuffleVectorSDNode &SV) {
1720 EVT VT = SV.getValueType(0);
1721 SmallVector<int, 8> MaskVec(SV.getMask().begin(), SV.getMask().end());
1722 ShuffleVectorSDNode::commuteMask(MaskVec);
1723
1724 SDValue Op0 = SV.getOperand(0);
1725 SDValue Op1 = SV.getOperand(1);
1726 return getVectorShuffle(VT, SDLoc(&SV), Op1, Op0, MaskVec);
1727 }
1728
getRegister(unsigned RegNo,EVT VT)1729 SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1730 FoldingSetNodeID ID;
1731 AddNodeIDNode(ID, ISD::Register, getVTList(VT), None);
1732 ID.AddInteger(RegNo);
1733 void *IP = nullptr;
1734 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1735 return SDValue(E, 0);
1736
1737 auto *N = newSDNode<RegisterSDNode>(RegNo, VT);
1738 N->SDNodeBits.IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
1739 CSEMap.InsertNode(N, IP);
1740 InsertNode(N);
1741 return SDValue(N, 0);
1742 }
1743
getRegisterMask(const uint32_t * RegMask)1744 SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1745 FoldingSetNodeID ID;
1746 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), None);
1747 ID.AddPointer(RegMask);
1748 void *IP = nullptr;
1749 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1750 return SDValue(E, 0);
1751
1752 auto *N = newSDNode<RegisterMaskSDNode>(RegMask);
1753 CSEMap.InsertNode(N, IP);
1754 InsertNode(N);
1755 return SDValue(N, 0);
1756 }
1757
getEHLabel(const SDLoc & dl,SDValue Root,MCSymbol * Label)1758 SDValue SelectionDAG::getEHLabel(const SDLoc &dl, SDValue Root,
1759 MCSymbol *Label) {
1760 return getLabelNode(ISD::EH_LABEL, dl, Root, Label);
1761 }
1762
getLabelNode(unsigned Opcode,const SDLoc & dl,SDValue Root,MCSymbol * Label)1763 SDValue SelectionDAG::getLabelNode(unsigned Opcode, const SDLoc &dl,
1764 SDValue Root, MCSymbol *Label) {
1765 FoldingSetNodeID ID;
1766 SDValue Ops[] = { Root };
1767 AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), Ops);
1768 ID.AddPointer(Label);
1769 void *IP = nullptr;
1770 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1771 return SDValue(E, 0);
1772
1773 auto *N = newSDNode<LabelSDNode>(dl.getIROrder(), dl.getDebugLoc(), Label);
1774 createOperands(N, Ops);
1775
1776 CSEMap.InsertNode(N, IP);
1777 InsertNode(N);
1778 return SDValue(N, 0);
1779 }
1780
getBlockAddress(const BlockAddress * BA,EVT VT,int64_t Offset,bool isTarget,unsigned char TargetFlags)1781 SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1782 int64_t Offset,
1783 bool isTarget,
1784 unsigned char TargetFlags) {
1785 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1786
1787 FoldingSetNodeID ID;
1788 AddNodeIDNode(ID, Opc, getVTList(VT), None);
1789 ID.AddPointer(BA);
1790 ID.AddInteger(Offset);
1791 ID.AddInteger(TargetFlags);
1792 void *IP = nullptr;
1793 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1794 return SDValue(E, 0);
1795
1796 auto *N = newSDNode<BlockAddressSDNode>(Opc, VT, BA, Offset, TargetFlags);
1797 CSEMap.InsertNode(N, IP);
1798 InsertNode(N);
1799 return SDValue(N, 0);
1800 }
1801
getSrcValue(const Value * V)1802 SDValue SelectionDAG::getSrcValue(const Value *V) {
1803 assert((!V || V->getType()->isPointerTy()) &&
1804 "SrcValue is not a pointer?");
1805
1806 FoldingSetNodeID ID;
1807 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), None);
1808 ID.AddPointer(V);
1809
1810 void *IP = nullptr;
1811 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1812 return SDValue(E, 0);
1813
1814 auto *N = newSDNode<SrcValueSDNode>(V);
1815 CSEMap.InsertNode(N, IP);
1816 InsertNode(N);
1817 return SDValue(N, 0);
1818 }
1819
getMDNode(const MDNode * MD)1820 SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1821 FoldingSetNodeID ID;
1822 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), None);
1823 ID.AddPointer(MD);
1824
1825 void *IP = nullptr;
1826 if (SDNode *E = FindNodeOrInsertPos(ID, IP))
1827 return SDValue(E, 0);
1828
1829 auto *N = newSDNode<MDNodeSDNode>(MD);
1830 CSEMap.InsertNode(N, IP);
1831 InsertNode(N);
1832 return SDValue(N, 0);
1833 }
1834
getBitcast(EVT VT,SDValue V)1835 SDValue SelectionDAG::getBitcast(EVT VT, SDValue V) {
1836 if (VT == V.getValueType())
1837 return V;
1838
1839 return getNode(ISD::BITCAST, SDLoc(V), VT, V);
1840 }
1841
getAddrSpaceCast(const SDLoc & dl,EVT VT,SDValue Ptr,unsigned SrcAS,unsigned DestAS)1842 SDValue SelectionDAG::getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr,
1843 unsigned SrcAS, unsigned DestAS) {
1844 SDValue Ops[] = {Ptr};
1845 FoldingSetNodeID ID;
1846 AddNodeIDNode(ID, ISD::ADDRSPACECAST, getVTList(VT), Ops);
1847 ID.AddInteger(SrcAS);
1848 ID.AddInteger(DestAS);
1849
1850 void *IP = nullptr;
1851 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
1852 return SDValue(E, 0);
1853
1854 auto *N = newSDNode<AddrSpaceCastSDNode>(dl.getIROrder(), dl.getDebugLoc(),
1855 VT, SrcAS, DestAS);
1856 createOperands(N, Ops);
1857
1858 CSEMap.InsertNode(N, IP);
1859 InsertNode(N);
1860 return SDValue(N, 0);
1861 }
1862
1863 /// getShiftAmountOperand - Return the specified value casted to
1864 /// the target's desired shift amount type.
getShiftAmountOperand(EVT LHSTy,SDValue Op)1865 SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1866 EVT OpTy = Op.getValueType();
1867 EVT ShTy = TLI->getShiftAmountTy(LHSTy, getDataLayout());
1868 if (OpTy == ShTy || OpTy.isVector()) return Op;
1869
1870 return getZExtOrTrunc(Op, SDLoc(Op), ShTy);
1871 }
1872
expandVAArg(SDNode * Node)1873 SDValue SelectionDAG::expandVAArg(SDNode *Node) {
1874 SDLoc dl(Node);
1875 const TargetLowering &TLI = getTargetLoweringInfo();
1876 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1877 EVT VT = Node->getValueType(0);
1878 SDValue Tmp1 = Node->getOperand(0);
1879 SDValue Tmp2 = Node->getOperand(1);
1880 unsigned Align = Node->getConstantOperandVal(3);
1881
1882 SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1,
1883 Tmp2, MachinePointerInfo(V));
1884 SDValue VAList = VAListLoad;
1885
1886 if (Align > TLI.getMinStackArgumentAlignment()) {
1887 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
1888
1889 VAList = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1890 getConstant(Align - 1, dl, VAList.getValueType()));
1891
1892 VAList = getNode(ISD::AND, dl, VAList.getValueType(), VAList,
1893 getConstant(-(int64_t)Align, dl, VAList.getValueType()));
1894 }
1895
1896 // Increment the pointer, VAList, to the next vaarg
1897 Tmp1 = getNode(ISD::ADD, dl, VAList.getValueType(), VAList,
1898 getConstant(getDataLayout().getTypeAllocSize(
1899 VT.getTypeForEVT(*getContext())),
1900 dl, VAList.getValueType()));
1901 // Store the incremented VAList to the legalized pointer
1902 Tmp1 =
1903 getStore(VAListLoad.getValue(1), dl, Tmp1, Tmp2, MachinePointerInfo(V));
1904 // Load the actual argument out of the pointer VAList
1905 return getLoad(VT, dl, Tmp1, VAList, MachinePointerInfo());
1906 }
1907
expandVACopy(SDNode * Node)1908 SDValue SelectionDAG::expandVACopy(SDNode *Node) {
1909 SDLoc dl(Node);
1910 const TargetLowering &TLI = getTargetLoweringInfo();
1911 // This defaults to loading a pointer from the input and storing it to the
1912 // output, returning the chain.
1913 const Value *VD = cast<SrcValueSDNode>(Node->getOperand(3))->getValue();
1914 const Value *VS = cast<SrcValueSDNode>(Node->getOperand(4))->getValue();
1915 SDValue Tmp1 =
1916 getLoad(TLI.getPointerTy(getDataLayout()), dl, Node->getOperand(0),
1917 Node->getOperand(2), MachinePointerInfo(VS));
1918 return getStore(Tmp1.getValue(1), dl, Tmp1, Node->getOperand(1),
1919 MachinePointerInfo(VD));
1920 }
1921
CreateStackTemporary(EVT VT,unsigned minAlign)1922 SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1923 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1924 unsigned ByteSize = VT.getStoreSize();
1925 Type *Ty = VT.getTypeForEVT(*getContext());
1926 unsigned StackAlign =
1927 std::max((unsigned)getDataLayout().getPrefTypeAlignment(Ty), minAlign);
1928
1929 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
1930 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1931 }
1932
CreateStackTemporary(EVT VT1,EVT VT2)1933 SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1934 unsigned Bytes = std::max(VT1.getStoreSize(), VT2.getStoreSize());
1935 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1936 Type *Ty2 = VT2.getTypeForEVT(*getContext());
1937 const DataLayout &DL = getDataLayout();
1938 unsigned Align =
1939 std::max(DL.getPrefTypeAlignment(Ty1), DL.getPrefTypeAlignment(Ty2));
1940
1941 MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
1942 int FrameIdx = MFI.CreateStackObject(Bytes, Align, false);
1943 return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout()));
1944 }
1945
FoldSetCC(EVT VT,SDValue N1,SDValue N2,ISD::CondCode Cond,const SDLoc & dl)1946 SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, SDValue N2,
1947 ISD::CondCode Cond, const SDLoc &dl) {
1948 EVT OpVT = N1.getValueType();
1949
1950 // These setcc operations always fold.
1951 switch (Cond) {
1952 default: break;
1953 case ISD::SETFALSE:
1954 case ISD::SETFALSE2: return getBoolConstant(false, dl, VT, OpVT);
1955 case ISD::SETTRUE:
1956 case ISD::SETTRUE2: return getBoolConstant(true, dl, VT, OpVT);
1957
1958 case ISD::SETOEQ:
1959 case ISD::SETOGT:
1960 case ISD::SETOGE:
1961 case ISD::SETOLT:
1962 case ISD::SETOLE:
1963 case ISD::SETONE:
1964 case ISD::SETO:
1965 case ISD::SETUO:
1966 case ISD::SETUEQ:
1967 case ISD::SETUNE:
1968 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1969 break;
1970 }
1971
1972 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2)) {
1973 const APInt &C2 = N2C->getAPIntValue();
1974 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1)) {
1975 const APInt &C1 = N1C->getAPIntValue();
1976
1977 switch (Cond) {
1978 default: llvm_unreachable("Unknown integer setcc!");
1979 case ISD::SETEQ: return getBoolConstant(C1 == C2, dl, VT, OpVT);
1980 case ISD::SETNE: return getBoolConstant(C1 != C2, dl, VT, OpVT);
1981 case ISD::SETULT: return getBoolConstant(C1.ult(C2), dl, VT, OpVT);
1982 case ISD::SETUGT: return getBoolConstant(C1.ugt(C2), dl, VT, OpVT);
1983 case ISD::SETULE: return getBoolConstant(C1.ule(C2), dl, VT, OpVT);
1984 case ISD::SETUGE: return getBoolConstant(C1.uge(C2), dl, VT, OpVT);
1985 case ISD::SETLT: return getBoolConstant(C1.slt(C2), dl, VT, OpVT);
1986 case ISD::SETGT: return getBoolConstant(C1.sgt(C2), dl, VT, OpVT);
1987 case ISD::SETLE: return getBoolConstant(C1.sle(C2), dl, VT, OpVT);
1988 case ISD::SETGE: return getBoolConstant(C1.sge(C2), dl, VT, OpVT);
1989 }
1990 }
1991 }
1992 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1)) {
1993 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2)) {
1994 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1995 switch (Cond) {
1996 default: break;
1997 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1998 return getUNDEF(VT);
1999 LLVM_FALLTHROUGH;
2000 case ISD::SETOEQ: return getBoolConstant(R==APFloat::cmpEqual, dl, VT,
2001 OpVT);
2002 case ISD::SETNE: if (R==APFloat::cmpUnordered)
2003 return getUNDEF(VT);
2004 LLVM_FALLTHROUGH;
2005 case ISD::SETONE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2006 R==APFloat::cmpLessThan, dl, VT,
2007 OpVT);
2008 case ISD::SETLT: if (R==APFloat::cmpUnordered)
2009 return getUNDEF(VT);
2010 LLVM_FALLTHROUGH;
2011 case ISD::SETOLT: return getBoolConstant(R==APFloat::cmpLessThan, dl, VT,
2012 OpVT);
2013 case ISD::SETGT: if (R==APFloat::cmpUnordered)
2014 return getUNDEF(VT);
2015 LLVM_FALLTHROUGH;
2016 case ISD::SETOGT: return getBoolConstant(R==APFloat::cmpGreaterThan, dl,
2017 VT, OpVT);
2018 case ISD::SETLE: if (R==APFloat::cmpUnordered)
2019 return getUNDEF(VT);
2020 LLVM_FALLTHROUGH;
2021 case ISD::SETOLE: return getBoolConstant(R==APFloat::cmpLessThan ||
2022 R==APFloat::cmpEqual, dl, VT,
2023 OpVT);
2024 case ISD::SETGE: if (R==APFloat::cmpUnordered)
2025 return getUNDEF(VT);
2026 LLVM_FALLTHROUGH;
2027 case ISD::SETOGE: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2028 R==APFloat::cmpEqual, dl, VT, OpVT);
2029 case ISD::SETO: return getBoolConstant(R!=APFloat::cmpUnordered, dl, VT,
2030 OpVT);
2031 case ISD::SETUO: return getBoolConstant(R==APFloat::cmpUnordered, dl, VT,
2032 OpVT);
2033 case ISD::SETUEQ: return getBoolConstant(R==APFloat::cmpUnordered ||
2034 R==APFloat::cmpEqual, dl, VT,
2035 OpVT);
2036 case ISD::SETUNE: return getBoolConstant(R!=APFloat::cmpEqual, dl, VT,
2037 OpVT);
2038 case ISD::SETULT: return getBoolConstant(R==APFloat::cmpUnordered ||
2039 R==APFloat::cmpLessThan, dl, VT,
2040 OpVT);
2041 case ISD::SETUGT: return getBoolConstant(R==APFloat::cmpGreaterThan ||
2042 R==APFloat::cmpUnordered, dl, VT,
2043 OpVT);
2044 case ISD::SETULE: return getBoolConstant(R!=APFloat::cmpGreaterThan, dl,
2045 VT, OpVT);
2046 case ISD::SETUGE: return getBoolConstant(R!=APFloat::cmpLessThan, dl, VT,
2047 OpVT);
2048 }
2049 } else {
2050 // Ensure that the constant occurs on the RHS.
2051 ISD::CondCode SwappedCond = ISD::getSetCCSwappedOperands(Cond);
2052 MVT CompVT = N1.getValueType().getSimpleVT();
2053 if (!TLI->isCondCodeLegal(SwappedCond, CompVT))
2054 return SDValue();
2055
2056 return getSetCC(dl, VT, N2, N1, SwappedCond);
2057 }
2058 }
2059
2060 // Could not fold it.
2061 return SDValue();
2062 }
2063
2064 /// See if the specified operand can be simplified with the knowledge that only
2065 /// the bits specified by Mask are used.
GetDemandedBits(SDValue V,const APInt & Mask)2066 SDValue SelectionDAG::GetDemandedBits(SDValue V, const APInt &Mask) {
2067 switch (V.getOpcode()) {
2068 default:
2069 break;
2070 case ISD::Constant: {
2071 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode());
2072 assert(CV && "Const value should be ConstSDNode.");
2073 const APInt &CVal = CV->getAPIntValue();
2074 APInt NewVal = CVal & Mask;
2075 if (NewVal != CVal)
2076 return getConstant(NewVal, SDLoc(V), V.getValueType());
2077 break;
2078 }
2079 case ISD::OR:
2080 case ISD::XOR:
2081 // If the LHS or RHS don't contribute bits to the or, drop them.
2082 if (MaskedValueIsZero(V.getOperand(0), Mask))
2083 return V.getOperand(1);
2084 if (MaskedValueIsZero(V.getOperand(1), Mask))
2085 return V.getOperand(0);
2086 break;
2087 case ISD::SRL:
2088 // Only look at single-use SRLs.
2089 if (!V.getNode()->hasOneUse())
2090 break;
2091 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
2092 // See if we can recursively simplify the LHS.
2093 unsigned Amt = RHSC->getZExtValue();
2094
2095 // Watch out for shift count overflow though.
2096 if (Amt >= Mask.getBitWidth())
2097 break;
2098 APInt NewMask = Mask << Amt;
2099 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask))
2100 return getNode(ISD::SRL, SDLoc(V), V.getValueType(), SimplifyLHS,
2101 V.getOperand(1));
2102 }
2103 break;
2104 case ISD::AND: {
2105 // X & -1 -> X (ignoring bits which aren't demanded).
2106 ConstantSDNode *AndVal = isConstOrConstSplat(V.getOperand(1));
2107 if (AndVal && Mask.isSubsetOf(AndVal->getAPIntValue()))
2108 return V.getOperand(0);
2109 break;
2110 }
2111 case ISD::ANY_EXTEND: {
2112 SDValue Src = V.getOperand(0);
2113 unsigned SrcBitWidth = Src.getScalarValueSizeInBits();
2114 // Being conservative here - only peek through if we only demand bits in the
2115 // non-extended source (even though the extended bits are technically undef).
2116 if (Mask.getActiveBits() > SrcBitWidth)
2117 break;
2118 APInt SrcMask = Mask.trunc(SrcBitWidth);
2119 if (SDValue DemandedSrc = GetDemandedBits(Src, SrcMask))
2120 return getNode(ISD::ANY_EXTEND, SDLoc(V), V.getValueType(), DemandedSrc);
2121 break;
2122 }
2123 case ISD::SIGN_EXTEND_INREG:
2124 EVT ExVT = cast<VTSDNode>(V.getOperand(1))->getVT();
2125 unsigned ExVTBits = ExVT.getScalarSizeInBits();
2126
2127 // If none of the extended bits are demanded, eliminate the sextinreg.
2128 if (Mask.getActiveBits() <= ExVTBits)
2129 return V.getOperand(0);
2130
2131 break;
2132 }
2133 return SDValue();
2134 }
2135
2136 /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
2137 /// use this predicate to simplify operations downstream.
SignBitIsZero(SDValue Op,unsigned Depth) const2138 bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
2139 unsigned BitWidth = Op.getScalarValueSizeInBits();
2140 return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
2141 }
2142
2143 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
2144 /// this predicate to simplify operations downstream. Mask is known to be zero
2145 /// for bits that V cannot have.
MaskedValueIsZero(SDValue Op,const APInt & Mask,unsigned Depth) const2146 bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
2147 unsigned Depth) const {
2148 return Mask.isSubsetOf(computeKnownBits(Op, Depth).Zero);
2149 }
2150
2151 /// isSplatValue - Return true if the vector V has the same value
2152 /// across all DemandedElts.
isSplatValue(SDValue V,const APInt & DemandedElts,APInt & UndefElts)2153 bool SelectionDAG::isSplatValue(SDValue V, const APInt &DemandedElts,
2154 APInt &UndefElts) {
2155 if (!DemandedElts)
2156 return false; // No demanded elts, better to assume we don't know anything.
2157
2158 EVT VT = V.getValueType();
2159 assert(VT.isVector() && "Vector type expected");
2160
2161 unsigned NumElts = VT.getVectorNumElements();
2162 assert(NumElts == DemandedElts.getBitWidth() && "Vector size mismatch");
2163 UndefElts = APInt::getNullValue(NumElts);
2164
2165 switch (V.getOpcode()) {
2166 case ISD::BUILD_VECTOR: {
2167 SDValue Scl;
2168 for (unsigned i = 0; i != NumElts; ++i) {
2169 SDValue Op = V.getOperand(i);
2170 if (Op.isUndef()) {
2171 UndefElts.setBit(i);
2172 continue;
2173 }
2174 if (!DemandedElts[i])
2175 continue;
2176 if (Scl && Scl != Op)
2177 return false;
2178 Scl = Op;
2179 }
2180 return true;
2181 }
2182 case ISD::VECTOR_SHUFFLE: {
2183 // Check if this is a shuffle node doing a splat.
2184 // TODO: Do we need to handle shuffle(splat, undef, mask)?
2185 int SplatIndex = -1;
2186 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(V)->getMask();
2187 for (int i = 0; i != (int)NumElts; ++i) {
2188 int M = Mask[i];
2189 if (M < 0) {
2190 UndefElts.setBit(i);
2191 continue;
2192 }
2193 if (!DemandedElts[i])
2194 continue;
2195 if (0 <= SplatIndex && SplatIndex != M)
2196 return false;
2197 SplatIndex = M;
2198 }
2199 return true;
2200 }
2201 case ISD::EXTRACT_SUBVECTOR: {
2202 SDValue Src = V.getOperand(0);
2203 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(V.getOperand(1));
2204 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2205 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2206 // Offset the demanded elts by the subvector index.
2207 uint64_t Idx = SubIdx->getZExtValue();
2208 APInt UndefSrcElts;
2209 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2210 if (isSplatValue(Src, DemandedSrc, UndefSrcElts)) {
2211 UndefElts = UndefSrcElts.extractBits(NumElts, Idx);
2212 return true;
2213 }
2214 }
2215 break;
2216 }
2217 case ISD::ADD:
2218 case ISD::SUB:
2219 case ISD::AND: {
2220 APInt UndefLHS, UndefRHS;
2221 SDValue LHS = V.getOperand(0);
2222 SDValue RHS = V.getOperand(1);
2223 if (isSplatValue(LHS, DemandedElts, UndefLHS) &&
2224 isSplatValue(RHS, DemandedElts, UndefRHS)) {
2225 UndefElts = UndefLHS | UndefRHS;
2226 return true;
2227 }
2228 break;
2229 }
2230 }
2231
2232 return false;
2233 }
2234
2235 /// Helper wrapper to main isSplatValue function.
isSplatValue(SDValue V,bool AllowUndefs)2236 bool SelectionDAG::isSplatValue(SDValue V, bool AllowUndefs) {
2237 EVT VT = V.getValueType();
2238 assert(VT.isVector() && "Vector type expected");
2239 unsigned NumElts = VT.getVectorNumElements();
2240
2241 APInt UndefElts;
2242 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
2243 return isSplatValue(V, DemandedElts, UndefElts) &&
2244 (AllowUndefs || !UndefElts);
2245 }
2246
2247 /// Helper function that checks to see if a node is a constant or a
2248 /// build vector of splat constants at least within the demanded elts.
isConstOrDemandedConstSplat(SDValue N,const APInt & DemandedElts)2249 static ConstantSDNode *isConstOrDemandedConstSplat(SDValue N,
2250 const APInt &DemandedElts) {
2251 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
2252 return CN;
2253 if (N.getOpcode() != ISD::BUILD_VECTOR)
2254 return nullptr;
2255 EVT VT = N.getValueType();
2256 ConstantSDNode *Cst = nullptr;
2257 unsigned NumElts = VT.getVectorNumElements();
2258 assert(DemandedElts.getBitWidth() == NumElts && "Unexpected vector size");
2259 for (unsigned i = 0; i != NumElts; ++i) {
2260 if (!DemandedElts[i])
2261 continue;
2262 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(i));
2263 if (!C || (Cst && Cst->getAPIntValue() != C->getAPIntValue()) ||
2264 C->getValueType(0) != VT.getScalarType())
2265 return nullptr;
2266 Cst = C;
2267 }
2268 return Cst;
2269 }
2270
2271 /// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
2272 /// is less than the element bit-width of the shift node, return it.
getValidShiftAmountConstant(SDValue V)2273 static const APInt *getValidShiftAmountConstant(SDValue V) {
2274 if (ConstantSDNode *SA = isConstOrConstSplat(V.getOperand(1))) {
2275 // Shifting more than the bitwidth is not valid.
2276 const APInt &ShAmt = SA->getAPIntValue();
2277 if (ShAmt.ult(V.getScalarValueSizeInBits()))
2278 return &ShAmt;
2279 }
2280 return nullptr;
2281 }
2282
2283 /// Determine which bits of Op are known to be either zero or one and return
2284 /// them in Known. For vectors, the known bits are those that are shared by
2285 /// every vector element.
computeKnownBits(SDValue Op,unsigned Depth) const2286 KnownBits SelectionDAG::computeKnownBits(SDValue Op, unsigned Depth) const {
2287 EVT VT = Op.getValueType();
2288 APInt DemandedElts = VT.isVector()
2289 ? APInt::getAllOnesValue(VT.getVectorNumElements())
2290 : APInt(1, 1);
2291 return computeKnownBits(Op, DemandedElts, Depth);
2292 }
2293
2294 /// Determine which bits of Op are known to be either zero or one and return
2295 /// them in Known. The DemandedElts argument allows us to only collect the known
2296 /// bits that are shared by the requested vector elements.
computeKnownBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const2297 KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
2298 unsigned Depth) const {
2299 unsigned BitWidth = Op.getScalarValueSizeInBits();
2300
2301 KnownBits Known(BitWidth); // Don't know anything.
2302
2303 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2304 // We know all of the bits for a constant!
2305 Known.One = C->getAPIntValue();
2306 Known.Zero = ~Known.One;
2307 return Known;
2308 }
2309 if (auto *C = dyn_cast<ConstantFPSDNode>(Op)) {
2310 // We know all of the bits for a constant fp!
2311 Known.One = C->getValueAPF().bitcastToAPInt();
2312 Known.Zero = ~Known.One;
2313 return Known;
2314 }
2315
2316 if (Depth == 6)
2317 return Known; // Limit search depth.
2318
2319 KnownBits Known2;
2320 unsigned NumElts = DemandedElts.getBitWidth();
2321 assert((!Op.getValueType().isVector() ||
2322 NumElts == Op.getValueType().getVectorNumElements()) &&
2323 "Unexpected vector size");
2324
2325 if (!DemandedElts)
2326 return Known; // No demanded elts, better to assume we don't know anything.
2327
2328 unsigned Opcode = Op.getOpcode();
2329 switch (Opcode) {
2330 case ISD::BUILD_VECTOR:
2331 // Collect the known bits that are shared by every demanded vector element.
2332 Known.Zero.setAllBits(); Known.One.setAllBits();
2333 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
2334 if (!DemandedElts[i])
2335 continue;
2336
2337 SDValue SrcOp = Op.getOperand(i);
2338 Known2 = computeKnownBits(SrcOp, Depth + 1);
2339
2340 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
2341 if (SrcOp.getValueSizeInBits() != BitWidth) {
2342 assert(SrcOp.getValueSizeInBits() > BitWidth &&
2343 "Expected BUILD_VECTOR implicit truncation");
2344 Known2 = Known2.trunc(BitWidth);
2345 }
2346
2347 // Known bits are the values that are shared by every demanded element.
2348 Known.One &= Known2.One;
2349 Known.Zero &= Known2.Zero;
2350
2351 // If we don't know any bits, early out.
2352 if (Known.isUnknown())
2353 break;
2354 }
2355 break;
2356 case ISD::VECTOR_SHUFFLE: {
2357 // Collect the known bits that are shared by every vector element referenced
2358 // by the shuffle.
2359 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2360 Known.Zero.setAllBits(); Known.One.setAllBits();
2361 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
2362 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
2363 for (unsigned i = 0; i != NumElts; ++i) {
2364 if (!DemandedElts[i])
2365 continue;
2366
2367 int M = SVN->getMaskElt(i);
2368 if (M < 0) {
2369 // For UNDEF elements, we don't know anything about the common state of
2370 // the shuffle result.
2371 Known.resetAll();
2372 DemandedLHS.clearAllBits();
2373 DemandedRHS.clearAllBits();
2374 break;
2375 }
2376
2377 if ((unsigned)M < NumElts)
2378 DemandedLHS.setBit((unsigned)M % NumElts);
2379 else
2380 DemandedRHS.setBit((unsigned)M % NumElts);
2381 }
2382 // Known bits are the values that are shared by every demanded element.
2383 if (!!DemandedLHS) {
2384 SDValue LHS = Op.getOperand(0);
2385 Known2 = computeKnownBits(LHS, DemandedLHS, Depth + 1);
2386 Known.One &= Known2.One;
2387 Known.Zero &= Known2.Zero;
2388 }
2389 // If we don't know any bits, early out.
2390 if (Known.isUnknown())
2391 break;
2392 if (!!DemandedRHS) {
2393 SDValue RHS = Op.getOperand(1);
2394 Known2 = computeKnownBits(RHS, DemandedRHS, Depth + 1);
2395 Known.One &= Known2.One;
2396 Known.Zero &= Known2.Zero;
2397 }
2398 break;
2399 }
2400 case ISD::CONCAT_VECTORS: {
2401 // Split DemandedElts and test each of the demanded subvectors.
2402 Known.Zero.setAllBits(); Known.One.setAllBits();
2403 EVT SubVectorVT = Op.getOperand(0).getValueType();
2404 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
2405 unsigned NumSubVectors = Op.getNumOperands();
2406 for (unsigned i = 0; i != NumSubVectors; ++i) {
2407 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
2408 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
2409 if (!!DemandedSub) {
2410 SDValue Sub = Op.getOperand(i);
2411 Known2 = computeKnownBits(Sub, DemandedSub, Depth + 1);
2412 Known.One &= Known2.One;
2413 Known.Zero &= Known2.Zero;
2414 }
2415 // If we don't know any bits, early out.
2416 if (Known.isUnknown())
2417 break;
2418 }
2419 break;
2420 }
2421 case ISD::INSERT_SUBVECTOR: {
2422 // If we know the element index, demand any elements from the subvector and
2423 // the remainder from the src its inserted into, otherwise demand them all.
2424 SDValue Src = Op.getOperand(0);
2425 SDValue Sub = Op.getOperand(1);
2426 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2427 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
2428 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
2429 Known.One.setAllBits();
2430 Known.Zero.setAllBits();
2431 uint64_t Idx = SubIdx->getZExtValue();
2432 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
2433 if (!!DemandedSubElts) {
2434 Known = computeKnownBits(Sub, DemandedSubElts, Depth + 1);
2435 if (Known.isUnknown())
2436 break; // early-out.
2437 }
2438 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
2439 APInt DemandedSrcElts = DemandedElts & ~SubMask;
2440 if (!!DemandedSrcElts) {
2441 Known2 = computeKnownBits(Src, DemandedSrcElts, Depth + 1);
2442 Known.One &= Known2.One;
2443 Known.Zero &= Known2.Zero;
2444 }
2445 } else {
2446 Known = computeKnownBits(Sub, Depth + 1);
2447 if (Known.isUnknown())
2448 break; // early-out.
2449 Known2 = computeKnownBits(Src, Depth + 1);
2450 Known.One &= Known2.One;
2451 Known.Zero &= Known2.Zero;
2452 }
2453 break;
2454 }
2455 case ISD::EXTRACT_SUBVECTOR: {
2456 // If we know the element index, just demand that subvector elements,
2457 // otherwise demand them all.
2458 SDValue Src = Op.getOperand(0);
2459 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2460 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
2461 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
2462 // Offset the demanded elts by the subvector index.
2463 uint64_t Idx = SubIdx->getZExtValue();
2464 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
2465 Known = computeKnownBits(Src, DemandedSrc, Depth + 1);
2466 } else {
2467 Known = computeKnownBits(Src, Depth + 1);
2468 }
2469 break;
2470 }
2471 case ISD::SCALAR_TO_VECTOR: {
2472 // We know about scalar_to_vector as much as we know about it source,
2473 // which becomes the first element of otherwise unknown vector.
2474 if (DemandedElts != 1)
2475 break;
2476
2477 SDValue N0 = Op.getOperand(0);
2478 Known = computeKnownBits(N0, Depth + 1);
2479 if (N0.getValueSizeInBits() != BitWidth)
2480 Known = Known.trunc(BitWidth);
2481
2482 break;
2483 }
2484 case ISD::BITCAST: {
2485 SDValue N0 = Op.getOperand(0);
2486 EVT SubVT = N0.getValueType();
2487 unsigned SubBitWidth = SubVT.getScalarSizeInBits();
2488
2489 // Ignore bitcasts from unsupported types.
2490 if (!(SubVT.isInteger() || SubVT.isFloatingPoint()))
2491 break;
2492
2493 // Fast handling of 'identity' bitcasts.
2494 if (BitWidth == SubBitWidth) {
2495 Known = computeKnownBits(N0, DemandedElts, Depth + 1);
2496 break;
2497 }
2498
2499 bool IsLE = getDataLayout().isLittleEndian();
2500
2501 // Bitcast 'small element' vector to 'large element' scalar/vector.
2502 if ((BitWidth % SubBitWidth) == 0) {
2503 assert(N0.getValueType().isVector() && "Expected bitcast from vector");
2504
2505 // Collect known bits for the (larger) output by collecting the known
2506 // bits from each set of sub elements and shift these into place.
2507 // We need to separately call computeKnownBits for each set of
2508 // sub elements as the knownbits for each is likely to be different.
2509 unsigned SubScale = BitWidth / SubBitWidth;
2510 APInt SubDemandedElts(NumElts * SubScale, 0);
2511 for (unsigned i = 0; i != NumElts; ++i)
2512 if (DemandedElts[i])
2513 SubDemandedElts.setBit(i * SubScale);
2514
2515 for (unsigned i = 0; i != SubScale; ++i) {
2516 Known2 = computeKnownBits(N0, SubDemandedElts.shl(i),
2517 Depth + 1);
2518 unsigned Shifts = IsLE ? i : SubScale - 1 - i;
2519 Known.One |= Known2.One.zext(BitWidth).shl(SubBitWidth * Shifts);
2520 Known.Zero |= Known2.Zero.zext(BitWidth).shl(SubBitWidth * Shifts);
2521 }
2522 }
2523
2524 // Bitcast 'large element' scalar/vector to 'small element' vector.
2525 if ((SubBitWidth % BitWidth) == 0) {
2526 assert(Op.getValueType().isVector() && "Expected bitcast to vector");
2527
2528 // Collect known bits for the (smaller) output by collecting the known
2529 // bits from the overlapping larger input elements and extracting the
2530 // sub sections we actually care about.
2531 unsigned SubScale = SubBitWidth / BitWidth;
2532 APInt SubDemandedElts(NumElts / SubScale, 0);
2533 for (unsigned i = 0; i != NumElts; ++i)
2534 if (DemandedElts[i])
2535 SubDemandedElts.setBit(i / SubScale);
2536
2537 Known2 = computeKnownBits(N0, SubDemandedElts, Depth + 1);
2538
2539 Known.Zero.setAllBits(); Known.One.setAllBits();
2540 for (unsigned i = 0; i != NumElts; ++i)
2541 if (DemandedElts[i]) {
2542 unsigned Shifts = IsLE ? i : NumElts - 1 - i;
2543 unsigned Offset = (Shifts % SubScale) * BitWidth;
2544 Known.One &= Known2.One.lshr(Offset).trunc(BitWidth);
2545 Known.Zero &= Known2.Zero.lshr(Offset).trunc(BitWidth);
2546 // If we don't know any bits, early out.
2547 if (Known.isUnknown())
2548 break;
2549 }
2550 }
2551 break;
2552 }
2553 case ISD::AND:
2554 // If either the LHS or the RHS are Zero, the result is zero.
2555 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2556 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2557
2558 // Output known-1 bits are only known if set in both the LHS & RHS.
2559 Known.One &= Known2.One;
2560 // Output known-0 are known to be clear if zero in either the LHS | RHS.
2561 Known.Zero |= Known2.Zero;
2562 break;
2563 case ISD::OR:
2564 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2565 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2566
2567 // Output known-0 bits are only known if clear in both the LHS & RHS.
2568 Known.Zero &= Known2.Zero;
2569 // Output known-1 are known to be set if set in either the LHS | RHS.
2570 Known.One |= Known2.One;
2571 break;
2572 case ISD::XOR: {
2573 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2574 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2575
2576 // Output known-0 bits are known if clear or set in both the LHS & RHS.
2577 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
2578 // Output known-1 are known to be set if set in only one of the LHS, RHS.
2579 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
2580 Known.Zero = KnownZeroOut;
2581 break;
2582 }
2583 case ISD::MUL: {
2584 Known = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2585 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2586
2587 // If low bits are zero in either operand, output low known-0 bits.
2588 // Also compute a conservative estimate for high known-0 bits.
2589 // More trickiness is possible, but this is sufficient for the
2590 // interesting case of alignment computation.
2591 unsigned TrailZ = Known.countMinTrailingZeros() +
2592 Known2.countMinTrailingZeros();
2593 unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
2594 Known2.countMinLeadingZeros(),
2595 BitWidth) - BitWidth;
2596
2597 Known.resetAll();
2598 Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
2599 Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
2600 break;
2601 }
2602 case ISD::UDIV: {
2603 // For the purposes of computing leading zeros we can conservatively
2604 // treat a udiv as a logical right shift by the power of 2 known to
2605 // be less than the denominator.
2606 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2607 unsigned LeadZ = Known2.countMinLeadingZeros();
2608
2609 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2610 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
2611 if (RHSMaxLeadingZeros != BitWidth)
2612 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
2613
2614 Known.Zero.setHighBits(LeadZ);
2615 break;
2616 }
2617 case ISD::SELECT:
2618 case ISD::VSELECT:
2619 Known = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2620 // If we don't know any bits, early out.
2621 if (Known.isUnknown())
2622 break;
2623 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth+1);
2624
2625 // Only known if known in both the LHS and RHS.
2626 Known.One &= Known2.One;
2627 Known.Zero &= Known2.Zero;
2628 break;
2629 case ISD::SELECT_CC:
2630 Known = computeKnownBits(Op.getOperand(3), DemandedElts, Depth+1);
2631 // If we don't know any bits, early out.
2632 if (Known.isUnknown())
2633 break;
2634 Known2 = computeKnownBits(Op.getOperand(2), DemandedElts, Depth+1);
2635
2636 // Only known if known in both the LHS and RHS.
2637 Known.One &= Known2.One;
2638 Known.Zero &= Known2.Zero;
2639 break;
2640 case ISD::SMULO:
2641 case ISD::UMULO:
2642 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
2643 if (Op.getResNo() != 1)
2644 break;
2645 // The boolean result conforms to getBooleanContents.
2646 // If we know the result of a setcc has the top bits zero, use this info.
2647 // We know that we have an integer-based boolean since these operations
2648 // are only available for integer.
2649 if (TLI->getBooleanContents(Op.getValueType().isVector(), false) ==
2650 TargetLowering::ZeroOrOneBooleanContent &&
2651 BitWidth > 1)
2652 Known.Zero.setBitsFrom(1);
2653 break;
2654 case ISD::SETCC:
2655 // If we know the result of a setcc has the top bits zero, use this info.
2656 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2657 TargetLowering::ZeroOrOneBooleanContent &&
2658 BitWidth > 1)
2659 Known.Zero.setBitsFrom(1);
2660 break;
2661 case ISD::SHL:
2662 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2663 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2664 unsigned Shift = ShAmt->getZExtValue();
2665 Known.Zero <<= Shift;
2666 Known.One <<= Shift;
2667 // Low bits are known zero.
2668 Known.Zero.setLowBits(Shift);
2669 }
2670 break;
2671 case ISD::SRL:
2672 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2673 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2674 unsigned Shift = ShAmt->getZExtValue();
2675 Known.Zero.lshrInPlace(Shift);
2676 Known.One.lshrInPlace(Shift);
2677 // High bits are known zero.
2678 Known.Zero.setHighBits(Shift);
2679 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(Op.getOperand(1))) {
2680 // If the shift amount is a vector of constants see if we can bound
2681 // the number of upper zero bits.
2682 unsigned ShiftAmountMin = BitWidth;
2683 for (unsigned i = 0; i != BV->getNumOperands(); ++i) {
2684 if (auto *C = dyn_cast<ConstantSDNode>(BV->getOperand(i))) {
2685 const APInt &ShAmt = C->getAPIntValue();
2686 if (ShAmt.ult(BitWidth)) {
2687 ShiftAmountMin = std::min<unsigned>(ShiftAmountMin,
2688 ShAmt.getZExtValue());
2689 continue;
2690 }
2691 }
2692 // Don't know anything.
2693 ShiftAmountMin = 0;
2694 break;
2695 }
2696
2697 Known.Zero.setHighBits(ShiftAmountMin);
2698 }
2699 break;
2700 case ISD::SRA:
2701 if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
2702 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2703 unsigned Shift = ShAmt->getZExtValue();
2704 // Sign extend known zero/one bit (else is unknown).
2705 Known.Zero.ashrInPlace(Shift);
2706 Known.One.ashrInPlace(Shift);
2707 }
2708 break;
2709 case ISD::FSHL:
2710 case ISD::FSHR:
2711 if (ConstantSDNode *C =
2712 isConstOrDemandedConstSplat(Op.getOperand(2), DemandedElts)) {
2713 unsigned Amt = C->getAPIntValue().urem(BitWidth);
2714
2715 // For fshl, 0-shift returns the 1st arg.
2716 // For fshr, 0-shift returns the 2nd arg.
2717 if (Amt == 0) {
2718 Known = computeKnownBits(Op.getOperand(Opcode == ISD::FSHL ? 0 : 1),
2719 DemandedElts, Depth + 1);
2720 break;
2721 }
2722
2723 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
2724 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
2725 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2726 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2727 if (Opcode == ISD::FSHL) {
2728 Known.One <<= Amt;
2729 Known.Zero <<= Amt;
2730 Known2.One.lshrInPlace(BitWidth - Amt);
2731 Known2.Zero.lshrInPlace(BitWidth - Amt);
2732 } else {
2733 Known.One <<= BitWidth - Amt;
2734 Known.Zero <<= BitWidth - Amt;
2735 Known2.One.lshrInPlace(Amt);
2736 Known2.Zero.lshrInPlace(Amt);
2737 }
2738 Known.One |= Known2.One;
2739 Known.Zero |= Known2.Zero;
2740 }
2741 break;
2742 case ISD::SIGN_EXTEND_INREG: {
2743 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2744 unsigned EBits = EVT.getScalarSizeInBits();
2745
2746 // Sign extension. Compute the demanded bits in the result that are not
2747 // present in the input.
2748 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
2749
2750 APInt InSignMask = APInt::getSignMask(EBits);
2751 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
2752
2753 // If the sign extended bits are demanded, we know that the sign
2754 // bit is demanded.
2755 InSignMask = InSignMask.zext(BitWidth);
2756 if (NewBits.getBoolValue())
2757 InputDemandedBits |= InSignMask;
2758
2759 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2760 Known.One &= InputDemandedBits;
2761 Known.Zero &= InputDemandedBits;
2762
2763 // If the sign bit of the input is known set or clear, then we know the
2764 // top bits of the result.
2765 if (Known.Zero.intersects(InSignMask)) { // Input sign bit known clear
2766 Known.Zero |= NewBits;
2767 Known.One &= ~NewBits;
2768 } else if (Known.One.intersects(InSignMask)) { // Input sign bit known set
2769 Known.One |= NewBits;
2770 Known.Zero &= ~NewBits;
2771 } else { // Input sign bit unknown
2772 Known.Zero &= ~NewBits;
2773 Known.One &= ~NewBits;
2774 }
2775 break;
2776 }
2777 case ISD::CTTZ:
2778 case ISD::CTTZ_ZERO_UNDEF: {
2779 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2780 // If we have a known 1, its position is our upper bound.
2781 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
2782 unsigned LowBits = Log2_32(PossibleTZ) + 1;
2783 Known.Zero.setBitsFrom(LowBits);
2784 break;
2785 }
2786 case ISD::CTLZ:
2787 case ISD::CTLZ_ZERO_UNDEF: {
2788 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2789 // If we have a known 1, its position is our upper bound.
2790 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
2791 unsigned LowBits = Log2_32(PossibleLZ) + 1;
2792 Known.Zero.setBitsFrom(LowBits);
2793 break;
2794 }
2795 case ISD::CTPOP: {
2796 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2797 // If we know some of the bits are zero, they can't be one.
2798 unsigned PossibleOnes = Known2.countMaxPopulation();
2799 Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
2800 break;
2801 }
2802 case ISD::LOAD: {
2803 LoadSDNode *LD = cast<LoadSDNode>(Op);
2804 // If this is a ZEXTLoad and we are looking at the loaded value.
2805 if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
2806 EVT VT = LD->getMemoryVT();
2807 unsigned MemBits = VT.getScalarSizeInBits();
2808 Known.Zero.setBitsFrom(MemBits);
2809 } else if (const MDNode *Ranges = LD->getRanges()) {
2810 if (LD->getExtensionType() == ISD::NON_EXTLOAD)
2811 computeKnownBitsFromRangeMetadata(*Ranges, Known);
2812 }
2813 break;
2814 }
2815 case ISD::ZERO_EXTEND_VECTOR_INREG: {
2816 EVT InVT = Op.getOperand(0).getValueType();
2817 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
2818 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
2819 Known = Known.zext(BitWidth);
2820 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits());
2821 break;
2822 }
2823 case ISD::ZERO_EXTEND: {
2824 EVT InVT = Op.getOperand(0).getValueType();
2825 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2826 Known = Known.zext(BitWidth);
2827 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits());
2828 break;
2829 }
2830 case ISD::SIGN_EXTEND_VECTOR_INREG: {
2831 EVT InVT = Op.getOperand(0).getValueType();
2832 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
2833 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
2834 // If the sign bit is known to be zero or one, then sext will extend
2835 // it to the top bits, else it will just zext.
2836 Known = Known.sext(BitWidth);
2837 break;
2838 }
2839 case ISD::SIGN_EXTEND: {
2840 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2841 // If the sign bit is known to be zero or one, then sext will extend
2842 // it to the top bits, else it will just zext.
2843 Known = Known.sext(BitWidth);
2844 break;
2845 }
2846 case ISD::ANY_EXTEND: {
2847 Known = computeKnownBits(Op.getOperand(0), Depth+1);
2848 Known = Known.zext(BitWidth);
2849 break;
2850 }
2851 case ISD::TRUNCATE: {
2852 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2853 Known = Known.trunc(BitWidth);
2854 break;
2855 }
2856 case ISD::AssertZext: {
2857 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2858 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
2859 Known = computeKnownBits(Op.getOperand(0), Depth+1);
2860 Known.Zero |= (~InMask);
2861 Known.One &= (~Known.Zero);
2862 break;
2863 }
2864 case ISD::FGETSIGN:
2865 // All bits are zero except the low bit.
2866 Known.Zero.setBitsFrom(1);
2867 break;
2868 case ISD::USUBO:
2869 case ISD::SSUBO:
2870 if (Op.getResNo() == 1) {
2871 // If we know the result of a setcc has the top bits zero, use this info.
2872 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2873 TargetLowering::ZeroOrOneBooleanContent &&
2874 BitWidth > 1)
2875 Known.Zero.setBitsFrom(1);
2876 break;
2877 }
2878 LLVM_FALLTHROUGH;
2879 case ISD::SUB:
2880 case ISD::SUBC: {
2881 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0))) {
2882 // We know that the top bits of C-X are clear if X contains less bits
2883 // than C (i.e. no wrap-around can happen). For example, 20-X is
2884 // positive if we can prove that X is >= 0 and < 16.
2885 if (CLHS->getAPIntValue().isNonNegative()) {
2886 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2887 // NLZ can't be BitWidth with no sign bit
2888 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2889 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts,
2890 Depth + 1);
2891
2892 // If all of the MaskV bits are known to be zero, then we know the
2893 // output top bits are zero, because we now know that the output is
2894 // from [0-C].
2895 if ((Known2.Zero & MaskV) == MaskV) {
2896 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2897 // Top bits known zero.
2898 Known.Zero.setHighBits(NLZ2);
2899 }
2900 }
2901 }
2902
2903 // If low bits are know to be zero in both operands, then we know they are
2904 // going to be 0 in the result. Both addition and complement operations
2905 // preserve the low zero bits.
2906 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2907 unsigned KnownZeroLow = Known2.countMinTrailingZeros();
2908 if (KnownZeroLow == 0)
2909 break;
2910
2911 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2912 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros());
2913 Known.Zero.setLowBits(KnownZeroLow);
2914 break;
2915 }
2916 case ISD::UADDO:
2917 case ISD::SADDO:
2918 case ISD::ADDCARRY:
2919 if (Op.getResNo() == 1) {
2920 // If we know the result of a setcc has the top bits zero, use this info.
2921 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
2922 TargetLowering::ZeroOrOneBooleanContent &&
2923 BitWidth > 1)
2924 Known.Zero.setBitsFrom(1);
2925 break;
2926 }
2927 LLVM_FALLTHROUGH;
2928 case ISD::ADD:
2929 case ISD::ADDC:
2930 case ISD::ADDE: {
2931 // Output known-0 bits are known if clear or set in both the low clear bits
2932 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2933 // low 3 bits clear.
2934 // Output known-0 bits are also known if the top bits of each input are
2935 // known to be clear. For example, if one input has the top 10 bits clear
2936 // and the other has the top 8 bits clear, we know the top 7 bits of the
2937 // output must be clear.
2938 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2939 unsigned KnownZeroHigh = Known2.countMinLeadingZeros();
2940 unsigned KnownZeroLow = Known2.countMinTrailingZeros();
2941
2942 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
2943 KnownZeroHigh = std::min(KnownZeroHigh, Known2.countMinLeadingZeros());
2944 KnownZeroLow = std::min(KnownZeroLow, Known2.countMinTrailingZeros());
2945
2946 if (Opcode == ISD::ADDE || Opcode == ISD::ADDCARRY) {
2947 // With ADDE and ADDCARRY, a carry bit may be added in, so we can only
2948 // use this information if we know (at least) that the low two bits are
2949 // clear. We then return to the caller that the low bit is unknown but
2950 // that other bits are known zero.
2951 if (KnownZeroLow >= 2)
2952 Known.Zero.setBits(1, KnownZeroLow);
2953 break;
2954 }
2955
2956 Known.Zero.setLowBits(KnownZeroLow);
2957 if (KnownZeroHigh > 1)
2958 Known.Zero.setHighBits(KnownZeroHigh - 1);
2959 break;
2960 }
2961 case ISD::SREM:
2962 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2963 const APInt &RA = Rem->getAPIntValue().abs();
2964 if (RA.isPowerOf2()) {
2965 APInt LowBits = RA - 1;
2966 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2967
2968 // The low bits of the first operand are unchanged by the srem.
2969 Known.Zero = Known2.Zero & LowBits;
2970 Known.One = Known2.One & LowBits;
2971
2972 // If the first operand is non-negative or has all low bits zero, then
2973 // the upper bits are all zero.
2974 if (Known2.Zero[BitWidth-1] || ((Known2.Zero & LowBits) == LowBits))
2975 Known.Zero |= ~LowBits;
2976
2977 // If the first operand is negative and not all low bits are zero, then
2978 // the upper bits are all one.
2979 if (Known2.One[BitWidth-1] && ((Known2.One & LowBits) != 0))
2980 Known.One |= ~LowBits;
2981 assert((Known.Zero & Known.One) == 0&&"Bits known to be one AND zero?");
2982 }
2983 }
2984 break;
2985 case ISD::UREM: {
2986 if (ConstantSDNode *Rem = isConstOrConstSplat(Op.getOperand(1))) {
2987 const APInt &RA = Rem->getAPIntValue();
2988 if (RA.isPowerOf2()) {
2989 APInt LowBits = (RA - 1);
2990 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
2991
2992 // The upper bits are all zero, the lower ones are unchanged.
2993 Known.Zero = Known2.Zero | ~LowBits;
2994 Known.One = Known2.One & LowBits;
2995 break;
2996 }
2997 }
2998
2999 // Since the result is less than or equal to either operand, any leading
3000 // zero bits in either operand must also exist in the result.
3001 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3002 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3003
3004 uint32_t Leaders =
3005 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros());
3006 Known.resetAll();
3007 Known.Zero.setHighBits(Leaders);
3008 break;
3009 }
3010 case ISD::EXTRACT_ELEMENT: {
3011 Known = computeKnownBits(Op.getOperand(0), Depth+1);
3012 const unsigned Index = Op.getConstantOperandVal(1);
3013 const unsigned BitWidth = Op.getValueSizeInBits();
3014
3015 // Remove low part of known bits mask
3016 Known.Zero = Known.Zero.getHiBits(Known.Zero.getBitWidth() - Index * BitWidth);
3017 Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth);
3018
3019 // Remove high part of known bit mask
3020 Known = Known.trunc(BitWidth);
3021 break;
3022 }
3023 case ISD::EXTRACT_VECTOR_ELT: {
3024 SDValue InVec = Op.getOperand(0);
3025 SDValue EltNo = Op.getOperand(1);
3026 EVT VecVT = InVec.getValueType();
3027 const unsigned BitWidth = Op.getValueSizeInBits();
3028 const unsigned EltBitWidth = VecVT.getScalarSizeInBits();
3029 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3030 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
3031 // anything about the extended bits.
3032 if (BitWidth > EltBitWidth)
3033 Known = Known.trunc(EltBitWidth);
3034 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3035 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
3036 // If we know the element index, just demand that vector element.
3037 unsigned Idx = ConstEltNo->getZExtValue();
3038 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
3039 Known = computeKnownBits(InVec, DemandedElt, Depth + 1);
3040 } else {
3041 // Unknown element index, so ignore DemandedElts and demand them all.
3042 Known = computeKnownBits(InVec, Depth + 1);
3043 }
3044 if (BitWidth > EltBitWidth)
3045 Known = Known.zext(BitWidth);
3046 break;
3047 }
3048 case ISD::INSERT_VECTOR_ELT: {
3049 SDValue InVec = Op.getOperand(0);
3050 SDValue InVal = Op.getOperand(1);
3051 SDValue EltNo = Op.getOperand(2);
3052
3053 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3054 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3055 // If we know the element index, split the demand between the
3056 // source vector and the inserted element.
3057 Known.Zero = Known.One = APInt::getAllOnesValue(BitWidth);
3058 unsigned EltIdx = CEltNo->getZExtValue();
3059
3060 // If we demand the inserted element then add its common known bits.
3061 if (DemandedElts[EltIdx]) {
3062 Known2 = computeKnownBits(InVal, Depth + 1);
3063 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
3064 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
3065 }
3066
3067 // If we demand the source vector then add its common known bits, ensuring
3068 // that we don't demand the inserted element.
3069 APInt VectorElts = DemandedElts & ~(APInt::getOneBitSet(NumElts, EltIdx));
3070 if (!!VectorElts) {
3071 Known2 = computeKnownBits(InVec, VectorElts, Depth + 1);
3072 Known.One &= Known2.One;
3073 Known.Zero &= Known2.Zero;
3074 }
3075 } else {
3076 // Unknown element index, so ignore DemandedElts and demand them all.
3077 Known = computeKnownBits(InVec, Depth + 1);
3078 Known2 = computeKnownBits(InVal, Depth + 1);
3079 Known.One &= Known2.One.zextOrTrunc(Known.One.getBitWidth());
3080 Known.Zero &= Known2.Zero.zextOrTrunc(Known.Zero.getBitWidth());
3081 }
3082 break;
3083 }
3084 case ISD::BITREVERSE: {
3085 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3086 Known.Zero = Known2.Zero.reverseBits();
3087 Known.One = Known2.One.reverseBits();
3088 break;
3089 }
3090 case ISD::BSWAP: {
3091 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3092 Known.Zero = Known2.Zero.byteSwap();
3093 Known.One = Known2.One.byteSwap();
3094 break;
3095 }
3096 case ISD::ABS: {
3097 Known2 = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3098
3099 // If the source's MSB is zero then we know the rest of the bits already.
3100 if (Known2.isNonNegative()) {
3101 Known.Zero = Known2.Zero;
3102 Known.One = Known2.One;
3103 break;
3104 }
3105
3106 // We only know that the absolute values's MSB will be zero iff there is
3107 // a set bit that isn't the sign bit (otherwise it could be INT_MIN).
3108 Known2.One.clearSignBit();
3109 if (Known2.One.getBoolValue()) {
3110 Known.Zero = APInt::getSignMask(BitWidth);
3111 break;
3112 }
3113 break;
3114 }
3115 case ISD::UMIN: {
3116 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3117 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3118
3119 // UMIN - we know that the result will have the maximum of the
3120 // known zero leading bits of the inputs.
3121 unsigned LeadZero = Known.countMinLeadingZeros();
3122 LeadZero = std::max(LeadZero, Known2.countMinLeadingZeros());
3123
3124 Known.Zero &= Known2.Zero;
3125 Known.One &= Known2.One;
3126 Known.Zero.setHighBits(LeadZero);
3127 break;
3128 }
3129 case ISD::UMAX: {
3130 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3131 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3132
3133 // UMAX - we know that the result will have the maximum of the
3134 // known one leading bits of the inputs.
3135 unsigned LeadOne = Known.countMinLeadingOnes();
3136 LeadOne = std::max(LeadOne, Known2.countMinLeadingOnes());
3137
3138 Known.Zero &= Known2.Zero;
3139 Known.One &= Known2.One;
3140 Known.One.setHighBits(LeadOne);
3141 break;
3142 }
3143 case ISD::SMIN:
3144 case ISD::SMAX: {
3145 // If we have a clamp pattern, we know that the number of sign bits will be
3146 // the minimum of the clamp min/max range.
3147 bool IsMax = (Opcode == ISD::SMAX);
3148 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3149 if ((CstLow = isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)))
3150 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3151 CstHigh = isConstOrDemandedConstSplat(Op.getOperand(0).getOperand(1),
3152 DemandedElts);
3153 if (CstLow && CstHigh) {
3154 if (!IsMax)
3155 std::swap(CstLow, CstHigh);
3156
3157 const APInt &ValueLow = CstLow->getAPIntValue();
3158 const APInt &ValueHigh = CstHigh->getAPIntValue();
3159 if (ValueLow.sle(ValueHigh)) {
3160 unsigned LowSignBits = ValueLow.getNumSignBits();
3161 unsigned HighSignBits = ValueHigh.getNumSignBits();
3162 unsigned MinSignBits = std::min(LowSignBits, HighSignBits);
3163 if (ValueLow.isNegative() && ValueHigh.isNegative()) {
3164 Known.One.setHighBits(MinSignBits);
3165 break;
3166 }
3167 if (ValueLow.isNonNegative() && ValueHigh.isNonNegative()) {
3168 Known.Zero.setHighBits(MinSignBits);
3169 break;
3170 }
3171 }
3172 }
3173
3174 // Fallback - just get the shared known bits of the operands.
3175 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
3176 if (Known.isUnknown()) break; // Early-out
3177 Known2 = computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
3178 Known.Zero &= Known2.Zero;
3179 Known.One &= Known2.One;
3180 break;
3181 }
3182 case ISD::FrameIndex:
3183 case ISD::TargetFrameIndex:
3184 TLI->computeKnownBitsForFrameIndex(Op, Known, DemandedElts, *this, Depth);
3185 break;
3186
3187 default:
3188 if (Opcode < ISD::BUILTIN_OP_END)
3189 break;
3190 LLVM_FALLTHROUGH;
3191 case ISD::INTRINSIC_WO_CHAIN:
3192 case ISD::INTRINSIC_W_CHAIN:
3193 case ISD::INTRINSIC_VOID:
3194 // Allow the target to implement this method for its nodes.
3195 TLI->computeKnownBitsForTargetNode(Op, Known, DemandedElts, *this, Depth);
3196 break;
3197 }
3198
3199 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
3200 return Known;
3201 }
3202
computeOverflowKind(SDValue N0,SDValue N1) const3203 SelectionDAG::OverflowKind SelectionDAG::computeOverflowKind(SDValue N0,
3204 SDValue N1) const {
3205 // X + 0 never overflow
3206 if (isNullConstant(N1))
3207 return OFK_Never;
3208
3209 KnownBits N1Known = computeKnownBits(N1);
3210 if (N1Known.Zero.getBoolValue()) {
3211 KnownBits N0Known = computeKnownBits(N0);
3212
3213 bool overflow;
3214 (void)(~N0Known.Zero).uadd_ov(~N1Known.Zero, overflow);
3215 if (!overflow)
3216 return OFK_Never;
3217 }
3218
3219 // mulhi + 1 never overflow
3220 if (N0.getOpcode() == ISD::UMUL_LOHI && N0.getResNo() == 1 &&
3221 (~N1Known.Zero & 0x01) == ~N1Known.Zero)
3222 return OFK_Never;
3223
3224 if (N1.getOpcode() == ISD::UMUL_LOHI && N1.getResNo() == 1) {
3225 KnownBits N0Known = computeKnownBits(N0);
3226
3227 if ((~N0Known.Zero & 0x01) == ~N0Known.Zero)
3228 return OFK_Never;
3229 }
3230
3231 return OFK_Sometime;
3232 }
3233
isKnownToBeAPowerOfTwo(SDValue Val) const3234 bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
3235 EVT OpVT = Val.getValueType();
3236 unsigned BitWidth = OpVT.getScalarSizeInBits();
3237
3238 // Is the constant a known power of 2?
3239 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Val))
3240 return Const->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3241
3242 // A left-shift of a constant one will have exactly one bit set because
3243 // shifting the bit off the end is undefined.
3244 if (Val.getOpcode() == ISD::SHL) {
3245 auto *C = isConstOrConstSplat(Val.getOperand(0));
3246 if (C && C->getAPIntValue() == 1)
3247 return true;
3248 }
3249
3250 // Similarly, a logical right-shift of a constant sign-bit will have exactly
3251 // one bit set.
3252 if (Val.getOpcode() == ISD::SRL) {
3253 auto *C = isConstOrConstSplat(Val.getOperand(0));
3254 if (C && C->getAPIntValue().isSignMask())
3255 return true;
3256 }
3257
3258 // Are all operands of a build vector constant powers of two?
3259 if (Val.getOpcode() == ISD::BUILD_VECTOR)
3260 if (llvm::all_of(Val->ops(), [BitWidth](SDValue E) {
3261 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(E))
3262 return C->getAPIntValue().zextOrTrunc(BitWidth).isPowerOf2();
3263 return false;
3264 }))
3265 return true;
3266
3267 // More could be done here, though the above checks are enough
3268 // to handle some common cases.
3269
3270 // Fall back to computeKnownBits to catch other known cases.
3271 KnownBits Known = computeKnownBits(Val);
3272 return (Known.countMaxPopulation() == 1) && (Known.countMinPopulation() == 1);
3273 }
3274
ComputeNumSignBits(SDValue Op,unsigned Depth) const3275 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
3276 EVT VT = Op.getValueType();
3277 APInt DemandedElts = VT.isVector()
3278 ? APInt::getAllOnesValue(VT.getVectorNumElements())
3279 : APInt(1, 1);
3280 return ComputeNumSignBits(Op, DemandedElts, Depth);
3281 }
3282
ComputeNumSignBits(SDValue Op,const APInt & DemandedElts,unsigned Depth) const3283 unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
3284 unsigned Depth) const {
3285 EVT VT = Op.getValueType();
3286 assert((VT.isInteger() || VT.isFloatingPoint()) && "Invalid VT!");
3287 unsigned VTBits = VT.getScalarSizeInBits();
3288 unsigned NumElts = DemandedElts.getBitWidth();
3289 unsigned Tmp, Tmp2;
3290 unsigned FirstAnswer = 1;
3291
3292 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3293 const APInt &Val = C->getAPIntValue();
3294 return Val.getNumSignBits();
3295 }
3296
3297 if (Depth == 6)
3298 return 1; // Limit search depth.
3299
3300 if (!DemandedElts)
3301 return 1; // No demanded elts, better to assume we don't know anything.
3302
3303 unsigned Opcode = Op.getOpcode();
3304 switch (Opcode) {
3305 default: break;
3306 case ISD::AssertSext:
3307 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3308 return VTBits-Tmp+1;
3309 case ISD::AssertZext:
3310 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
3311 return VTBits-Tmp;
3312
3313 case ISD::BUILD_VECTOR:
3314 Tmp = VTBits;
3315 for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) {
3316 if (!DemandedElts[i])
3317 continue;
3318
3319 SDValue SrcOp = Op.getOperand(i);
3320 Tmp2 = ComputeNumSignBits(Op.getOperand(i), Depth + 1);
3321
3322 // BUILD_VECTOR can implicitly truncate sources, we must handle this.
3323 if (SrcOp.getValueSizeInBits() != VTBits) {
3324 assert(SrcOp.getValueSizeInBits() > VTBits &&
3325 "Expected BUILD_VECTOR implicit truncation");
3326 unsigned ExtraBits = SrcOp.getValueSizeInBits() - VTBits;
3327 Tmp2 = (Tmp2 > ExtraBits ? Tmp2 - ExtraBits : 1);
3328 }
3329 Tmp = std::min(Tmp, Tmp2);
3330 }
3331 return Tmp;
3332
3333 case ISD::VECTOR_SHUFFLE: {
3334 // Collect the minimum number of sign bits that are shared by every vector
3335 // element referenced by the shuffle.
3336 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
3337 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op);
3338 assert(NumElts == SVN->getMask().size() && "Unexpected vector size");
3339 for (unsigned i = 0; i != NumElts; ++i) {
3340 int M = SVN->getMaskElt(i);
3341 if (!DemandedElts[i])
3342 continue;
3343 // For UNDEF elements, we don't know anything about the common state of
3344 // the shuffle result.
3345 if (M < 0)
3346 return 1;
3347 if ((unsigned)M < NumElts)
3348 DemandedLHS.setBit((unsigned)M % NumElts);
3349 else
3350 DemandedRHS.setBit((unsigned)M % NumElts);
3351 }
3352 Tmp = std::numeric_limits<unsigned>::max();
3353 if (!!DemandedLHS)
3354 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
3355 if (!!DemandedRHS) {
3356 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
3357 Tmp = std::min(Tmp, Tmp2);
3358 }
3359 // If we don't know anything, early out and try computeKnownBits fall-back.
3360 if (Tmp == 1)
3361 break;
3362 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3363 return Tmp;
3364 }
3365
3366 case ISD::BITCAST: {
3367 SDValue N0 = Op.getOperand(0);
3368 EVT SrcVT = N0.getValueType();
3369 unsigned SrcBits = SrcVT.getScalarSizeInBits();
3370
3371 // Ignore bitcasts from unsupported types..
3372 if (!(SrcVT.isInteger() || SrcVT.isFloatingPoint()))
3373 break;
3374
3375 // Fast handling of 'identity' bitcasts.
3376 if (VTBits == SrcBits)
3377 return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
3378
3379 bool IsLE = getDataLayout().isLittleEndian();
3380
3381 // Bitcast 'large element' scalar/vector to 'small element' vector.
3382 if ((SrcBits % VTBits) == 0) {
3383 assert(VT.isVector() && "Expected bitcast to vector");
3384
3385 unsigned Scale = SrcBits / VTBits;
3386 APInt SrcDemandedElts(NumElts / Scale, 0);
3387 for (unsigned i = 0; i != NumElts; ++i)
3388 if (DemandedElts[i])
3389 SrcDemandedElts.setBit(i / Scale);
3390
3391 // Fast case - sign splat can be simply split across the small elements.
3392 Tmp = ComputeNumSignBits(N0, SrcDemandedElts, Depth + 1);
3393 if (Tmp == SrcBits)
3394 return VTBits;
3395
3396 // Slow case - determine how far the sign extends into each sub-element.
3397 Tmp2 = VTBits;
3398 for (unsigned i = 0; i != NumElts; ++i)
3399 if (DemandedElts[i]) {
3400 unsigned SubOffset = i % Scale;
3401 SubOffset = (IsLE ? ((Scale - 1) - SubOffset) : SubOffset);
3402 SubOffset = SubOffset * VTBits;
3403 if (Tmp <= SubOffset)
3404 return 1;
3405 Tmp2 = std::min(Tmp2, Tmp - SubOffset);
3406 }
3407 return Tmp2;
3408 }
3409 break;
3410 }
3411
3412 case ISD::SIGN_EXTEND:
3413 Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();
3414 return ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1) + Tmp;
3415 case ISD::SIGN_EXTEND_INREG:
3416 // Max of the input and what this extends.
3417 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
3418 Tmp = VTBits-Tmp+1;
3419 Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3420 return std::max(Tmp, Tmp2);
3421 case ISD::SIGN_EXTEND_VECTOR_INREG: {
3422 SDValue Src = Op.getOperand(0);
3423 EVT SrcVT = Src.getValueType();
3424 APInt DemandedSrcElts = DemandedElts.zextOrSelf(SrcVT.getVectorNumElements());
3425 Tmp = VTBits - SrcVT.getScalarSizeInBits();
3426 return ComputeNumSignBits(Src, DemandedSrcElts, Depth+1) + Tmp;
3427 }
3428
3429 case ISD::SRA:
3430 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3431 // SRA X, C -> adds C sign bits.
3432 if (ConstantSDNode *C =
3433 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) {
3434 APInt ShiftVal = C->getAPIntValue();
3435 ShiftVal += Tmp;
3436 Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
3437 }
3438 return Tmp;
3439 case ISD::SHL:
3440 if (ConstantSDNode *C =
3441 isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) {
3442 // shl destroys sign bits.
3443 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3444 if (C->getAPIntValue().uge(VTBits) || // Bad shift.
3445 C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out.
3446 return Tmp - C->getZExtValue();
3447 }
3448 break;
3449 case ISD::AND:
3450 case ISD::OR:
3451 case ISD::XOR: // NOT is handled here.
3452 // Logical binary ops preserve the number of sign bits at the worst.
3453 Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
3454 if (Tmp != 1) {
3455 Tmp2 = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3456 FirstAnswer = std::min(Tmp, Tmp2);
3457 // We computed what we know about the sign bits as our first
3458 // answer. Now proceed to the generic code that uses
3459 // computeKnownBits, and pick whichever answer is better.
3460 }
3461 break;
3462
3463 case ISD::SELECT:
3464 case ISD::VSELECT:
3465 Tmp = ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth+1);
3466 if (Tmp == 1) return 1; // Early out.
3467 Tmp2 = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3468 return std::min(Tmp, Tmp2);
3469 case ISD::SELECT_CC:
3470 Tmp = ComputeNumSignBits(Op.getOperand(2), DemandedElts, Depth+1);
3471 if (Tmp == 1) return 1; // Early out.
3472 Tmp2 = ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth+1);
3473 return std::min(Tmp, Tmp2);
3474
3475 case ISD::SMIN:
3476 case ISD::SMAX: {
3477 // If we have a clamp pattern, we know that the number of sign bits will be
3478 // the minimum of the clamp min/max range.
3479 bool IsMax = (Opcode == ISD::SMAX);
3480 ConstantSDNode *CstLow = nullptr, *CstHigh = nullptr;
3481 if ((CstLow = isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)))
3482 if (Op.getOperand(0).getOpcode() == (IsMax ? ISD::SMIN : ISD::SMAX))
3483 CstHigh = isConstOrDemandedConstSplat(Op.getOperand(0).getOperand(1),
3484 DemandedElts);
3485 if (CstLow && CstHigh) {
3486 if (!IsMax)
3487 std::swap(CstLow, CstHigh);
3488 if (CstLow->getAPIntValue().sle(CstHigh->getAPIntValue())) {
3489 Tmp = CstLow->getAPIntValue().getNumSignBits();
3490 Tmp2 = CstHigh->getAPIntValue().getNumSignBits();
3491 return std::min(Tmp, Tmp2);
3492 }
3493 }
3494
3495 // Fallback - just get the minimum number of sign bits of the operands.
3496 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3497 if (Tmp == 1)
3498 return 1; // Early out.
3499 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3500 return std::min(Tmp, Tmp2);
3501 }
3502 case ISD::UMIN:
3503 case ISD::UMAX:
3504 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3505 if (Tmp == 1)
3506 return 1; // Early out.
3507 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth + 1);
3508 return std::min(Tmp, Tmp2);
3509 case ISD::SADDO:
3510 case ISD::UADDO:
3511 case ISD::SSUBO:
3512 case ISD::USUBO:
3513 case ISD::SMULO:
3514 case ISD::UMULO:
3515 if (Op.getResNo() != 1)
3516 break;
3517 // The boolean result conforms to getBooleanContents. Fall through.
3518 // If setcc returns 0/-1, all bits are sign bits.
3519 // We know that we have an integer-based boolean since these operations
3520 // are only available for integer.
3521 if (TLI->getBooleanContents(VT.isVector(), false) ==
3522 TargetLowering::ZeroOrNegativeOneBooleanContent)
3523 return VTBits;
3524 break;
3525 case ISD::SETCC:
3526 // If setcc returns 0/-1, all bits are sign bits.
3527 if (TLI->getBooleanContents(Op.getOperand(0).getValueType()) ==
3528 TargetLowering::ZeroOrNegativeOneBooleanContent)
3529 return VTBits;
3530 break;
3531 case ISD::ROTL:
3532 case ISD::ROTR:
3533 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3534 unsigned RotAmt = C->getAPIntValue().urem(VTBits);
3535
3536 // Handle rotate right by N like a rotate left by 32-N.
3537 if (Opcode == ISD::ROTR)
3538 RotAmt = (VTBits - RotAmt) % VTBits;
3539
3540 // If we aren't rotating out all of the known-in sign bits, return the
3541 // number that are left. This handles rotl(sext(x), 1) for example.
3542 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3543 if (Tmp > (RotAmt + 1)) return (Tmp - RotAmt);
3544 }
3545 break;
3546 case ISD::ADD:
3547 case ISD::ADDC:
3548 // Add can have at most one carry bit. Thus we know that the output
3549 // is, at worst, one more bit than the inputs.
3550 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3551 if (Tmp == 1) return 1; // Early out.
3552
3553 // Special case decrementing a value (ADD X, -1):
3554 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
3555 if (CRHS->isAllOnesValue()) {
3556 KnownBits Known = computeKnownBits(Op.getOperand(0), Depth+1);
3557
3558 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3559 // sign bits set.
3560 if ((Known.Zero | 1).isAllOnesValue())
3561 return VTBits;
3562
3563 // If we are subtracting one from a positive number, there is no carry
3564 // out of the result.
3565 if (Known.isNonNegative())
3566 return Tmp;
3567 }
3568
3569 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3570 if (Tmp2 == 1) return 1;
3571 return std::min(Tmp, Tmp2)-1;
3572
3573 case ISD::SUB:
3574 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
3575 if (Tmp2 == 1) return 1;
3576
3577 // Handle NEG.
3578 if (ConstantSDNode *CLHS = isConstOrConstSplat(Op.getOperand(0)))
3579 if (CLHS->isNullValue()) {
3580 KnownBits Known = computeKnownBits(Op.getOperand(1), Depth+1);
3581 // If the input is known to be 0 or 1, the output is 0/-1, which is all
3582 // sign bits set.
3583 if ((Known.Zero | 1).isAllOnesValue())
3584 return VTBits;
3585
3586 // If the input is known to be positive (the sign bit is known clear),
3587 // the output of the NEG has the same number of sign bits as the input.
3588 if (Known.isNonNegative())
3589 return Tmp2;
3590
3591 // Otherwise, we treat this like a SUB.
3592 }
3593
3594 // Sub can have at most one carry bit. Thus we know that the output
3595 // is, at worst, one more bit than the inputs.
3596 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3597 if (Tmp == 1) return 1; // Early out.
3598 return std::min(Tmp, Tmp2)-1;
3599 case ISD::TRUNCATE: {
3600 // Check if the sign bits of source go down as far as the truncated value.
3601 unsigned NumSrcBits = Op.getOperand(0).getScalarValueSizeInBits();
3602 unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3603 if (NumSrcSignBits > (NumSrcBits - VTBits))
3604 return NumSrcSignBits - (NumSrcBits - VTBits);
3605 break;
3606 }
3607 case ISD::EXTRACT_ELEMENT: {
3608 const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1);
3609 const int BitWidth = Op.getValueSizeInBits();
3610 const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth;
3611
3612 // Get reverse index (starting from 1), Op1 value indexes elements from
3613 // little end. Sign starts at big end.
3614 const int rIndex = Items - 1 - Op.getConstantOperandVal(1);
3615
3616 // If the sign portion ends in our element the subtraction gives correct
3617 // result. Otherwise it gives either negative or > bitwidth result
3618 return std::max(std::min(KnownSign - rIndex * BitWidth, BitWidth), 0);
3619 }
3620 case ISD::INSERT_VECTOR_ELT: {
3621 SDValue InVec = Op.getOperand(0);
3622 SDValue InVal = Op.getOperand(1);
3623 SDValue EltNo = Op.getOperand(2);
3624 unsigned NumElts = InVec.getValueType().getVectorNumElements();
3625
3626 ConstantSDNode *CEltNo = dyn_cast<ConstantSDNode>(EltNo);
3627 if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) {
3628 // If we know the element index, split the demand between the
3629 // source vector and the inserted element.
3630 unsigned EltIdx = CEltNo->getZExtValue();
3631
3632 // If we demand the inserted element then get its sign bits.
3633 Tmp = std::numeric_limits<unsigned>::max();
3634 if (DemandedElts[EltIdx]) {
3635 // TODO - handle implicit truncation of inserted elements.
3636 if (InVal.getScalarValueSizeInBits() != VTBits)
3637 break;
3638 Tmp = ComputeNumSignBits(InVal, Depth + 1);
3639 }
3640
3641 // If we demand the source vector then get its sign bits, and determine
3642 // the minimum.
3643 APInt VectorElts = DemandedElts;
3644 VectorElts.clearBit(EltIdx);
3645 if (!!VectorElts) {
3646 Tmp2 = ComputeNumSignBits(InVec, VectorElts, Depth + 1);
3647 Tmp = std::min(Tmp, Tmp2);
3648 }
3649 } else {
3650 // Unknown element index, so ignore DemandedElts and demand them all.
3651 Tmp = ComputeNumSignBits(InVec, Depth + 1);
3652 Tmp2 = ComputeNumSignBits(InVal, Depth + 1);
3653 Tmp = std::min(Tmp, Tmp2);
3654 }
3655 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3656 return Tmp;
3657 }
3658 case ISD::EXTRACT_VECTOR_ELT: {
3659 SDValue InVec = Op.getOperand(0);
3660 SDValue EltNo = Op.getOperand(1);
3661 EVT VecVT = InVec.getValueType();
3662 const unsigned BitWidth = Op.getValueSizeInBits();
3663 const unsigned EltBitWidth = Op.getOperand(0).getScalarValueSizeInBits();
3664 const unsigned NumSrcElts = VecVT.getVectorNumElements();
3665
3666 // If BitWidth > EltBitWidth the value is anyext:ed, and we do not know
3667 // anything about sign bits. But if the sizes match we can derive knowledge
3668 // about sign bits from the vector operand.
3669 if (BitWidth != EltBitWidth)
3670 break;
3671
3672 // If we know the element index, just demand that vector element, else for
3673 // an unknown element index, ignore DemandedElts and demand them all.
3674 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts);
3675 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
3676 if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts))
3677 DemandedSrcElts =
3678 APInt::getOneBitSet(NumSrcElts, ConstEltNo->getZExtValue());
3679
3680 return ComputeNumSignBits(InVec, DemandedSrcElts, Depth + 1);
3681 }
3682 case ISD::EXTRACT_SUBVECTOR: {
3683 // If we know the element index, just demand that subvector elements,
3684 // otherwise demand them all.
3685 SDValue Src = Op.getOperand(0);
3686 ConstantSDNode *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
3687 unsigned NumSrcElts = Src.getValueType().getVectorNumElements();
3688 if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
3689 // Offset the demanded elts by the subvector index.
3690 uint64_t Idx = SubIdx->getZExtValue();
3691 APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
3692 return ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
3693 }
3694 return ComputeNumSignBits(Src, Depth + 1);
3695 }
3696 case ISD::CONCAT_VECTORS: {
3697 // Determine the minimum number of sign bits across all demanded
3698 // elts of the input vectors. Early out if the result is already 1.
3699 Tmp = std::numeric_limits<unsigned>::max();
3700 EVT SubVectorVT = Op.getOperand(0).getValueType();
3701 unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
3702 unsigned NumSubVectors = Op.getNumOperands();
3703 for (unsigned i = 0; (i < NumSubVectors) && (Tmp > 1); ++i) {
3704 APInt DemandedSub = DemandedElts.lshr(i * NumSubVectorElts);
3705 DemandedSub = DemandedSub.trunc(NumSubVectorElts);
3706 if (!DemandedSub)
3707 continue;
3708 Tmp2 = ComputeNumSignBits(Op.getOperand(i), DemandedSub, Depth + 1);
3709 Tmp = std::min(Tmp, Tmp2);
3710 }
3711 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3712 return Tmp;
3713 }
3714 case ISD::INSERT_SUBVECTOR: {
3715 // If we know the element index, demand any elements from the subvector and
3716 // the remainder from the src its inserted into, otherwise demand them all.
3717 SDValue Src = Op.getOperand(0);
3718 SDValue Sub = Op.getOperand(1);
3719 auto *SubIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3720 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
3721 if (SubIdx && SubIdx->getAPIntValue().ule(NumElts - NumSubElts)) {
3722 Tmp = std::numeric_limits<unsigned>::max();
3723 uint64_t Idx = SubIdx->getZExtValue();
3724 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx);
3725 if (!!DemandedSubElts) {
3726 Tmp = ComputeNumSignBits(Sub, DemandedSubElts, Depth + 1);
3727 if (Tmp == 1) return 1; // early-out
3728 }
3729 APInt SubMask = APInt::getBitsSet(NumElts, Idx, Idx + NumSubElts);
3730 APInt DemandedSrcElts = DemandedElts & ~SubMask;
3731 if (!!DemandedSrcElts) {
3732 Tmp2 = ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1);
3733 Tmp = std::min(Tmp, Tmp2);
3734 }
3735 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3736 return Tmp;
3737 }
3738
3739 // Not able to determine the index so just assume worst case.
3740 Tmp = ComputeNumSignBits(Sub, Depth + 1);
3741 if (Tmp == 1) return 1; // early-out
3742 Tmp2 = ComputeNumSignBits(Src, Depth + 1);
3743 Tmp = std::min(Tmp, Tmp2);
3744 assert(Tmp <= VTBits && "Failed to determine minimum sign bits");
3745 return Tmp;
3746 }
3747 }
3748
3749 // If we are looking at the loaded value of the SDNode.
3750 if (Op.getResNo() == 0) {
3751 // Handle LOADX separately here. EXTLOAD case will fallthrough.
3752 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
3753 unsigned ExtType = LD->getExtensionType();
3754 switch (ExtType) {
3755 default: break;
3756 case ISD::SEXTLOAD: // '17' bits known
3757 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3758 return VTBits-Tmp+1;
3759 case ISD::ZEXTLOAD: // '16' bits known
3760 Tmp = LD->getMemoryVT().getScalarSizeInBits();
3761 return VTBits-Tmp;
3762 }
3763 }
3764 }
3765
3766 // Allow the target to implement this method for its nodes.
3767 if (Opcode >= ISD::BUILTIN_OP_END ||
3768 Opcode == ISD::INTRINSIC_WO_CHAIN ||
3769 Opcode == ISD::INTRINSIC_W_CHAIN ||
3770 Opcode == ISD::INTRINSIC_VOID) {
3771 unsigned NumBits =
3772 TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth);
3773 if (NumBits > 1)
3774 FirstAnswer = std::max(FirstAnswer, NumBits);
3775 }
3776
3777 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3778 // use this information.
3779 KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
3780
3781 APInt Mask;
3782 if (Known.isNonNegative()) { // sign bit is 0
3783 Mask = Known.Zero;
3784 } else if (Known.isNegative()) { // sign bit is 1;
3785 Mask = Known.One;
3786 } else {
3787 // Nothing known.
3788 return FirstAnswer;
3789 }
3790
3791 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
3792 // the number of identical bits in the top of the input value.
3793 Mask = ~Mask;
3794 Mask <<= Mask.getBitWidth()-VTBits;
3795 // Return # leading zeros. We use 'min' here in case Val was zero before
3796 // shifting. We don't want to return '64' as for an i32 "0".
3797 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
3798 }
3799
isBaseWithConstantOffset(SDValue Op) const3800 bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
3801 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
3802 !isa<ConstantSDNode>(Op.getOperand(1)))
3803 return false;
3804
3805 if (Op.getOpcode() == ISD::OR &&
3806 !MaskedValueIsZero(Op.getOperand(0),
3807 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
3808 return false;
3809
3810 return true;
3811 }
3812
isKnownNeverNaN(SDValue Op,bool SNaN,unsigned Depth) const3813 bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const {
3814 // If we're told that NaNs won't happen, assume they won't.
3815 if (getTarget().Options.NoNaNsFPMath || Op->getFlags().hasNoNaNs())
3816 return true;
3817
3818 if (Depth == 6)
3819 return false; // Limit search depth.
3820
3821 // TODO: Handle vectors.
3822 // If the value is a constant, we can obviously see if it is a NaN or not.
3823 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
3824 return !C->getValueAPF().isNaN() ||
3825 (SNaN && !C->getValueAPF().isSignaling());
3826 }
3827
3828 unsigned Opcode = Op.getOpcode();
3829 switch (Opcode) {
3830 case ISD::FADD:
3831 case ISD::FSUB:
3832 case ISD::FMUL:
3833 case ISD::FDIV:
3834 case ISD::FREM:
3835 case ISD::FSIN:
3836 case ISD::FCOS: {
3837 if (SNaN)
3838 return true;
3839 // TODO: Need isKnownNeverInfinity
3840 return false;
3841 }
3842 case ISD::FCANONICALIZE:
3843 case ISD::FEXP:
3844 case ISD::FEXP2:
3845 case ISD::FTRUNC:
3846 case ISD::FFLOOR:
3847 case ISD::FCEIL:
3848 case ISD::FROUND:
3849 case ISD::FRINT:
3850 case ISD::FNEARBYINT: {
3851 if (SNaN)
3852 return true;
3853 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
3854 }
3855 case ISD::FABS:
3856 case ISD::FNEG:
3857 case ISD::FCOPYSIGN: {
3858 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
3859 }
3860 case ISD::SELECT:
3861 return isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
3862 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
3863 case ISD::FP_EXTEND:
3864 case ISD::FP_ROUND: {
3865 if (SNaN)
3866 return true;
3867 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
3868 }
3869 case ISD::SINT_TO_FP:
3870 case ISD::UINT_TO_FP:
3871 return true;
3872 case ISD::FMA:
3873 case ISD::FMAD: {
3874 if (SNaN)
3875 return true;
3876 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
3877 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
3878 isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
3879 }
3880 case ISD::FSQRT: // Need is known positive
3881 case ISD::FLOG:
3882 case ISD::FLOG2:
3883 case ISD::FLOG10:
3884 case ISD::FPOWI:
3885 case ISD::FPOW: {
3886 if (SNaN)
3887 return true;
3888 // TODO: Refine on operand
3889 return false;
3890 }
3891 case ISD::FMINNUM:
3892 case ISD::FMAXNUM: {
3893 // Only one needs to be known not-nan, since it will be returned if the
3894 // other ends up being one.
3895 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) ||
3896 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
3897 }
3898 case ISD::FMINNUM_IEEE:
3899 case ISD::FMAXNUM_IEEE: {
3900 if (SNaN)
3901 return true;
3902 // This can return a NaN if either operand is an sNaN, or if both operands
3903 // are NaN.
3904 return (isKnownNeverNaN(Op.getOperand(0), false, Depth + 1) &&
3905 isKnownNeverSNaN(Op.getOperand(1), Depth + 1)) ||
3906 (isKnownNeverNaN(Op.getOperand(1), false, Depth + 1) &&
3907 isKnownNeverSNaN(Op.getOperand(0), Depth + 1));
3908 }
3909 case ISD::FMINIMUM:
3910 case ISD::FMAXIMUM: {
3911 // TODO: Does this quiet or return the origina NaN as-is?
3912 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
3913 isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
3914 }
3915 case ISD::EXTRACT_VECTOR_ELT: {
3916 return isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
3917 }
3918 default:
3919 if (Opcode >= ISD::BUILTIN_OP_END ||
3920 Opcode == ISD::INTRINSIC_WO_CHAIN ||
3921 Opcode == ISD::INTRINSIC_W_CHAIN ||
3922 Opcode == ISD::INTRINSIC_VOID) {
3923 return TLI->isKnownNeverNaNForTargetNode(Op, *this, SNaN, Depth);
3924 }
3925
3926 return false;
3927 }
3928 }
3929
isKnownNeverZeroFloat(SDValue Op) const3930 bool SelectionDAG::isKnownNeverZeroFloat(SDValue Op) const {
3931 assert(Op.getValueType().isFloatingPoint() &&
3932 "Floating point type expected");
3933
3934 // If the value is a constant, we can obviously see if it is a zero or not.
3935 // TODO: Add BuildVector support.
3936 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
3937 return !C->isZero();
3938 return false;
3939 }
3940
isKnownNeverZero(SDValue Op) const3941 bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
3942 assert(!Op.getValueType().isFloatingPoint() &&
3943 "Floating point types unsupported - use isKnownNeverZeroFloat");
3944
3945 // If the value is a constant, we can obviously see if it is a zero or not.
3946 if (ISD::matchUnaryPredicate(
3947 Op, [](ConstantSDNode *C) { return !C->isNullValue(); }))
3948 return true;
3949
3950 // TODO: Recognize more cases here.
3951 switch (Op.getOpcode()) {
3952 default: break;
3953 case ISD::OR:
3954 if (isKnownNeverZero(Op.getOperand(1)) ||
3955 isKnownNeverZero(Op.getOperand(0)))
3956 return true;
3957 break;
3958 }
3959
3960 return false;
3961 }
3962
isEqualTo(SDValue A,SDValue B) const3963 bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
3964 // Check the obvious case.
3965 if (A == B) return true;
3966
3967 // For for negative and positive zero.
3968 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
3969 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
3970 if (CA->isZero() && CB->isZero()) return true;
3971
3972 // Otherwise they may not be equal.
3973 return false;
3974 }
3975
3976 // FIXME: unify with llvm::haveNoCommonBitsSet.
3977 // FIXME: could also handle masked merge pattern (X & ~M) op (Y & M)
haveNoCommonBitsSet(SDValue A,SDValue B) const3978 bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
3979 assert(A.getValueType() == B.getValueType() &&
3980 "Values must have the same type");
3981 return (computeKnownBits(A).Zero | computeKnownBits(B).Zero).isAllOnesValue();
3982 }
3983
FoldBUILD_VECTOR(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)3984 static SDValue FoldBUILD_VECTOR(const SDLoc &DL, EVT VT,
3985 ArrayRef<SDValue> Ops,
3986 SelectionDAG &DAG) {
3987 int NumOps = Ops.size();
3988 assert(NumOps != 0 && "Can't build an empty vector!");
3989 assert(VT.getVectorNumElements() == (unsigned)NumOps &&
3990 "Incorrect element count in BUILD_VECTOR!");
3991
3992 // BUILD_VECTOR of UNDEFs is UNDEF.
3993 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
3994 return DAG.getUNDEF(VT);
3995
3996 // BUILD_VECTOR of seq extract/insert from the same vector + type is Identity.
3997 SDValue IdentitySrc;
3998 bool IsIdentity = true;
3999 for (int i = 0; i != NumOps; ++i) {
4000 if (Ops[i].getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4001 Ops[i].getOperand(0).getValueType() != VT ||
4002 (IdentitySrc && Ops[i].getOperand(0) != IdentitySrc) ||
4003 !isa<ConstantSDNode>(Ops[i].getOperand(1)) ||
4004 cast<ConstantSDNode>(Ops[i].getOperand(1))->getAPIntValue() != i) {
4005 IsIdentity = false;
4006 break;
4007 }
4008 IdentitySrc = Ops[i].getOperand(0);
4009 }
4010 if (IsIdentity)
4011 return IdentitySrc;
4012
4013 return SDValue();
4014 }
4015
FoldCONCAT_VECTORS(const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG)4016 static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
4017 ArrayRef<SDValue> Ops,
4018 SelectionDAG &DAG) {
4019 assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
4020 assert(llvm::all_of(Ops,
4021 [Ops](SDValue Op) {
4022 return Ops[0].getValueType() == Op.getValueType();
4023 }) &&
4024 "Concatenation of vectors with inconsistent value types!");
4025 assert((Ops.size() * Ops[0].getValueType().getVectorNumElements()) ==
4026 VT.getVectorNumElements() &&
4027 "Incorrect element count in vector concatenation!");
4028
4029 if (Ops.size() == 1)
4030 return Ops[0];
4031
4032 // Concat of UNDEFs is UNDEF.
4033 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
4034 return DAG.getUNDEF(VT);
4035
4036 // A CONCAT_VECTOR with all UNDEF/BUILD_VECTOR operands can be
4037 // simplified to one big BUILD_VECTOR.
4038 // FIXME: Add support for SCALAR_TO_VECTOR as well.
4039 EVT SVT = VT.getScalarType();
4040 SmallVector<SDValue, 16> Elts;
4041 for (SDValue Op : Ops) {
4042 EVT OpVT = Op.getValueType();
4043 if (Op.isUndef())
4044 Elts.append(OpVT.getVectorNumElements(), DAG.getUNDEF(SVT));
4045 else if (Op.getOpcode() == ISD::BUILD_VECTOR)
4046 Elts.append(Op->op_begin(), Op->op_end());
4047 else
4048 return SDValue();
4049 }
4050
4051 // BUILD_VECTOR requires all inputs to be of the same type, find the
4052 // maximum type and extend them all.
4053 for (SDValue Op : Elts)
4054 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT);
4055
4056 if (SVT.bitsGT(VT.getScalarType()))
4057 for (SDValue &Op : Elts)
4058 Op = DAG.getTargetLoweringInfo().isZExtFree(Op.getValueType(), SVT)
4059 ? DAG.getZExtOrTrunc(Op, DL, SVT)
4060 : DAG.getSExtOrTrunc(Op, DL, SVT);
4061
4062 SDValue V = DAG.getBuildVector(VT, DL, Elts);
4063 NewSDValueDbgMsg(V, "New node fold concat vectors: ", &DAG);
4064 return V;
4065 }
4066
4067 /// Gets or creates the specified node.
getNode(unsigned Opcode,const SDLoc & DL,EVT VT)4068 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT) {
4069 FoldingSetNodeID ID;
4070 AddNodeIDNode(ID, Opcode, getVTList(VT), None);
4071 void *IP = nullptr;
4072 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
4073 return SDValue(E, 0);
4074
4075 auto *N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(),
4076 getVTList(VT));
4077 CSEMap.InsertNode(N, IP);
4078
4079 InsertNode(N);
4080 SDValue V = SDValue(N, 0);
4081 NewSDValueDbgMsg(V, "Creating new node: ", this);
4082 return V;
4083 }
4084
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue Operand,const SDNodeFlags Flags)4085 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4086 SDValue Operand, const SDNodeFlags Flags) {
4087 // Constant fold unary operations with an integer constant operand. Even
4088 // opaque constant will be folded, because the folding of unary operations
4089 // doesn't create new constants with different values. Nevertheless, the
4090 // opaque flag is preserved during folding to prevent future folding with
4091 // other constants.
4092 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand)) {
4093 const APInt &Val = C->getAPIntValue();
4094 switch (Opcode) {
4095 default: break;
4096 case ISD::SIGN_EXTEND:
4097 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), DL, VT,
4098 C->isTargetOpcode(), C->isOpaque());
4099 case ISD::TRUNCATE:
4100 if (C->isOpaque())
4101 break;
4102 LLVM_FALLTHROUGH;
4103 case ISD::ANY_EXTEND:
4104 case ISD::ZERO_EXTEND:
4105 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), DL, VT,
4106 C->isTargetOpcode(), C->isOpaque());
4107 case ISD::UINT_TO_FP:
4108 case ISD::SINT_TO_FP: {
4109 APFloat apf(EVTToAPFloatSemantics(VT),
4110 APInt::getNullValue(VT.getSizeInBits()));
4111 (void)apf.convertFromAPInt(Val,
4112 Opcode==ISD::SINT_TO_FP,
4113 APFloat::rmNearestTiesToEven);
4114 return getConstantFP(apf, DL, VT);
4115 }
4116 case ISD::BITCAST:
4117 if (VT == MVT::f16 && C->getValueType(0) == MVT::i16)
4118 return getConstantFP(APFloat(APFloat::IEEEhalf(), Val), DL, VT);
4119 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
4120 return getConstantFP(APFloat(APFloat::IEEEsingle(), Val), DL, VT);
4121 if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
4122 return getConstantFP(APFloat(APFloat::IEEEdouble(), Val), DL, VT);
4123 if (VT == MVT::f128 && C->getValueType(0) == MVT::i128)
4124 return getConstantFP(APFloat(APFloat::IEEEquad(), Val), DL, VT);
4125 break;
4126 case ISD::ABS:
4127 return getConstant(Val.abs(), DL, VT, C->isTargetOpcode(),
4128 C->isOpaque());
4129 case ISD::BITREVERSE:
4130 return getConstant(Val.reverseBits(), DL, VT, C->isTargetOpcode(),
4131 C->isOpaque());
4132 case ISD::BSWAP:
4133 return getConstant(Val.byteSwap(), DL, VT, C->isTargetOpcode(),
4134 C->isOpaque());
4135 case ISD::CTPOP:
4136 return getConstant(Val.countPopulation(), DL, VT, C->isTargetOpcode(),
4137 C->isOpaque());
4138 case ISD::CTLZ:
4139 case ISD::CTLZ_ZERO_UNDEF:
4140 return getConstant(Val.countLeadingZeros(), DL, VT, C->isTargetOpcode(),
4141 C->isOpaque());
4142 case ISD::CTTZ:
4143 case ISD::CTTZ_ZERO_UNDEF:
4144 return getConstant(Val.countTrailingZeros(), DL, VT, C->isTargetOpcode(),
4145 C->isOpaque());
4146 case ISD::FP16_TO_FP: {
4147 bool Ignored;
4148 APFloat FPV(APFloat::IEEEhalf(),
4149 (Val.getBitWidth() == 16) ? Val : Val.trunc(16));
4150
4151 // This can return overflow, underflow, or inexact; we don't care.
4152 // FIXME need to be more flexible about rounding mode.
4153 (void)FPV.convert(EVTToAPFloatSemantics(VT),
4154 APFloat::rmNearestTiesToEven, &Ignored);
4155 return getConstantFP(FPV, DL, VT);
4156 }
4157 }
4158 }
4159
4160 // Constant fold unary operations with a floating point constant operand.
4161 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand)) {
4162 APFloat V = C->getValueAPF(); // make copy
4163 switch (Opcode) {
4164 case ISD::FNEG:
4165 V.changeSign();
4166 return getConstantFP(V, DL, VT);
4167 case ISD::FABS:
4168 V.clearSign();
4169 return getConstantFP(V, DL, VT);
4170 case ISD::FCEIL: {
4171 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
4172 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4173 return getConstantFP(V, DL, VT);
4174 break;
4175 }
4176 case ISD::FTRUNC: {
4177 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
4178 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4179 return getConstantFP(V, DL, VT);
4180 break;
4181 }
4182 case ISD::FFLOOR: {
4183 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
4184 if (fs == APFloat::opOK || fs == APFloat::opInexact)
4185 return getConstantFP(V, DL, VT);
4186 break;
4187 }
4188 case ISD::FP_EXTEND: {
4189 bool ignored;
4190 // This can return overflow, underflow, or inexact; we don't care.
4191 // FIXME need to be more flexible about rounding mode.
4192 (void)V.convert(EVTToAPFloatSemantics(VT),
4193 APFloat::rmNearestTiesToEven, &ignored);
4194 return getConstantFP(V, DL, VT);
4195 }
4196 case ISD::FP_TO_SINT:
4197 case ISD::FP_TO_UINT: {
4198 bool ignored;
4199 APSInt IntVal(VT.getSizeInBits(), Opcode == ISD::FP_TO_UINT);
4200 // FIXME need to be more flexible about rounding mode.
4201 APFloat::opStatus s =
4202 V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored);
4203 if (s == APFloat::opInvalidOp) // inexact is OK, in fact usual
4204 break;
4205 return getConstant(IntVal, DL, VT);
4206 }
4207 case ISD::BITCAST:
4208 if (VT == MVT::i16 && C->getValueType(0) == MVT::f16)
4209 return getConstant((uint16_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4210 else if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
4211 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), DL, VT);
4212 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
4213 return getConstant(V.bitcastToAPInt().getZExtValue(), DL, VT);
4214 break;
4215 case ISD::FP_TO_FP16: {
4216 bool Ignored;
4217 // This can return overflow, underflow, or inexact; we don't care.
4218 // FIXME need to be more flexible about rounding mode.
4219 (void)V.convert(APFloat::IEEEhalf(),
4220 APFloat::rmNearestTiesToEven, &Ignored);
4221 return getConstant(V.bitcastToAPInt(), DL, VT);
4222 }
4223 }
4224 }
4225
4226 // Constant fold unary operations with a vector integer or float operand.
4227 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Operand)) {
4228 if (BV->isConstant()) {
4229 switch (Opcode) {
4230 default:
4231 // FIXME: Entirely reasonable to perform folding of other unary
4232 // operations here as the need arises.
4233 break;
4234 case ISD::FNEG:
4235 case ISD::FABS:
4236 case ISD::FCEIL:
4237 case ISD::FTRUNC:
4238 case ISD::FFLOOR:
4239 case ISD::FP_EXTEND:
4240 case ISD::FP_TO_SINT:
4241 case ISD::FP_TO_UINT:
4242 case ISD::TRUNCATE:
4243 case ISD::ANY_EXTEND:
4244 case ISD::ZERO_EXTEND:
4245 case ISD::SIGN_EXTEND:
4246 case ISD::UINT_TO_FP:
4247 case ISD::SINT_TO_FP:
4248 case ISD::ABS:
4249 case ISD::BITREVERSE:
4250 case ISD::BSWAP:
4251 case ISD::CTLZ:
4252 case ISD::CTLZ_ZERO_UNDEF:
4253 case ISD::CTTZ:
4254 case ISD::CTTZ_ZERO_UNDEF:
4255 case ISD::CTPOP: {
4256 SDValue Ops = { Operand };
4257 if (SDValue Fold = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops))
4258 return Fold;
4259 }
4260 }
4261 }
4262 }
4263
4264 unsigned OpOpcode = Operand.getNode()->getOpcode();
4265 switch (Opcode) {
4266 case ISD::TokenFactor:
4267 case ISD::MERGE_VALUES:
4268 case ISD::CONCAT_VECTORS:
4269 return Operand; // Factor, merge or concat of one node? No need.
4270 case ISD::BUILD_VECTOR: {
4271 // Attempt to simplify BUILD_VECTOR.
4272 SDValue Ops[] = {Operand};
4273 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4274 return V;
4275 break;
4276 }
4277 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
4278 case ISD::FP_EXTEND:
4279 assert(VT.isFloatingPoint() &&
4280 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
4281 if (Operand.getValueType() == VT) return Operand; // noop conversion.
4282 assert((!VT.isVector() ||
4283 VT.getVectorNumElements() ==
4284 Operand.getValueType().getVectorNumElements()) &&
4285 "Vector element count mismatch!");
4286 assert(Operand.getValueType().bitsLT(VT) &&
4287 "Invalid fpext node, dst < src!");
4288 if (Operand.isUndef())
4289 return getUNDEF(VT);
4290 break;
4291 case ISD::SIGN_EXTEND:
4292 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4293 "Invalid SIGN_EXTEND!");
4294 if (Operand.getValueType() == VT) return Operand; // noop extension
4295 assert((!VT.isVector() ||
4296 VT.getVectorNumElements() ==
4297 Operand.getValueType().getVectorNumElements()) &&
4298 "Vector element count mismatch!");
4299 assert(Operand.getValueType().bitsLT(VT) &&
4300 "Invalid sext node, dst < src!");
4301 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
4302 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4303 else if (OpOpcode == ISD::UNDEF)
4304 // sext(undef) = 0, because the top bits will all be the same.
4305 return getConstant(0, DL, VT);
4306 break;
4307 case ISD::ZERO_EXTEND:
4308 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4309 "Invalid ZERO_EXTEND!");
4310 if (Operand.getValueType() == VT) return Operand; // noop extension
4311 assert((!VT.isVector() ||
4312 VT.getVectorNumElements() ==
4313 Operand.getValueType().getVectorNumElements()) &&
4314 "Vector element count mismatch!");
4315 assert(Operand.getValueType().bitsLT(VT) &&
4316 "Invalid zext node, dst < src!");
4317 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
4318 return getNode(ISD::ZERO_EXTEND, DL, VT, Operand.getOperand(0));
4319 else if (OpOpcode == ISD::UNDEF)
4320 // zext(undef) = 0, because the top bits will be zero.
4321 return getConstant(0, DL, VT);
4322 break;
4323 case ISD::ANY_EXTEND:
4324 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4325 "Invalid ANY_EXTEND!");
4326 if (Operand.getValueType() == VT) return Operand; // noop extension
4327 assert((!VT.isVector() ||
4328 VT.getVectorNumElements() ==
4329 Operand.getValueType().getVectorNumElements()) &&
4330 "Vector element count mismatch!");
4331 assert(Operand.getValueType().bitsLT(VT) &&
4332 "Invalid anyext node, dst < src!");
4333
4334 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4335 OpOpcode == ISD::ANY_EXTEND)
4336 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
4337 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4338 else if (OpOpcode == ISD::UNDEF)
4339 return getUNDEF(VT);
4340
4341 // (ext (trunc x)) -> x
4342 if (OpOpcode == ISD::TRUNCATE) {
4343 SDValue OpOp = Operand.getOperand(0);
4344 if (OpOp.getValueType() == VT) {
4345 transferDbgValues(Operand, OpOp);
4346 return OpOp;
4347 }
4348 }
4349 break;
4350 case ISD::TRUNCATE:
4351 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
4352 "Invalid TRUNCATE!");
4353 if (Operand.getValueType() == VT) return Operand; // noop truncate
4354 assert((!VT.isVector() ||
4355 VT.getVectorNumElements() ==
4356 Operand.getValueType().getVectorNumElements()) &&
4357 "Vector element count mismatch!");
4358 assert(Operand.getValueType().bitsGT(VT) &&
4359 "Invalid truncate node, src < dst!");
4360 if (OpOpcode == ISD::TRUNCATE)
4361 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4362 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
4363 OpOpcode == ISD::ANY_EXTEND) {
4364 // If the source is smaller than the dest, we still need an extend.
4365 if (Operand.getOperand(0).getValueType().getScalarType()
4366 .bitsLT(VT.getScalarType()))
4367 return getNode(OpOpcode, DL, VT, Operand.getOperand(0));
4368 if (Operand.getOperand(0).getValueType().bitsGT(VT))
4369 return getNode(ISD::TRUNCATE, DL, VT, Operand.getOperand(0));
4370 return Operand.getOperand(0);
4371 }
4372 if (OpOpcode == ISD::UNDEF)
4373 return getUNDEF(VT);
4374 break;
4375 case ISD::ANY_EXTEND_VECTOR_INREG:
4376 case ISD::ZERO_EXTEND_VECTOR_INREG:
4377 case ISD::SIGN_EXTEND_VECTOR_INREG:
4378 assert(VT.isVector() && "This DAG node is restricted to vector types.");
4379 assert(Operand.getValueType().bitsLE(VT) &&
4380 "The input must be the same size or smaller than the result.");
4381 assert(VT.getVectorNumElements() <
4382 Operand.getValueType().getVectorNumElements() &&
4383 "The destination vector type must have fewer lanes than the input.");
4384 break;
4385 case ISD::ABS:
4386 assert(VT.isInteger() && VT == Operand.getValueType() &&
4387 "Invalid ABS!");
4388 if (OpOpcode == ISD::UNDEF)
4389 return getUNDEF(VT);
4390 break;
4391 case ISD::BSWAP:
4392 assert(VT.isInteger() && VT == Operand.getValueType() &&
4393 "Invalid BSWAP!");
4394 assert((VT.getScalarSizeInBits() % 16 == 0) &&
4395 "BSWAP types must be a multiple of 16 bits!");
4396 if (OpOpcode == ISD::UNDEF)
4397 return getUNDEF(VT);
4398 break;
4399 case ISD::BITREVERSE:
4400 assert(VT.isInteger() && VT == Operand.getValueType() &&
4401 "Invalid BITREVERSE!");
4402 if (OpOpcode == ISD::UNDEF)
4403 return getUNDEF(VT);
4404 break;
4405 case ISD::BITCAST:
4406 // Basic sanity checking.
4407 assert(VT.getSizeInBits() == Operand.getValueSizeInBits() &&
4408 "Cannot BITCAST between types of different sizes!");
4409 if (VT == Operand.getValueType()) return Operand; // noop conversion.
4410 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
4411 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
4412 if (OpOpcode == ISD::UNDEF)
4413 return getUNDEF(VT);
4414 break;
4415 case ISD::SCALAR_TO_VECTOR:
4416 assert(VT.isVector() && !Operand.getValueType().isVector() &&
4417 (VT.getVectorElementType() == Operand.getValueType() ||
4418 (VT.getVectorElementType().isInteger() &&
4419 Operand.getValueType().isInteger() &&
4420 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
4421 "Illegal SCALAR_TO_VECTOR node!");
4422 if (OpOpcode == ISD::UNDEF)
4423 return getUNDEF(VT);
4424 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
4425 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
4426 isa<ConstantSDNode>(Operand.getOperand(1)) &&
4427 Operand.getConstantOperandVal(1) == 0 &&
4428 Operand.getOperand(0).getValueType() == VT)
4429 return Operand.getOperand(0);
4430 break;
4431 case ISD::FNEG:
4432 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
4433 if ((getTarget().Options.UnsafeFPMath || Flags.hasNoSignedZeros()) &&
4434 OpOpcode == ISD::FSUB)
4435 return getNode(ISD::FSUB, DL, VT, Operand.getOperand(1),
4436 Operand.getOperand(0), Flags);
4437 if (OpOpcode == ISD::FNEG) // --X -> X
4438 return Operand.getOperand(0);
4439 break;
4440 case ISD::FABS:
4441 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
4442 return getNode(ISD::FABS, DL, VT, Operand.getOperand(0));
4443 break;
4444 }
4445
4446 SDNode *N;
4447 SDVTList VTs = getVTList(VT);
4448 SDValue Ops[] = {Operand};
4449 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
4450 FoldingSetNodeID ID;
4451 AddNodeIDNode(ID, Opcode, VTs, Ops);
4452 void *IP = nullptr;
4453 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
4454 E->intersectFlagsWith(Flags);
4455 return SDValue(E, 0);
4456 }
4457
4458 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4459 N->setFlags(Flags);
4460 createOperands(N, Ops);
4461 CSEMap.InsertNode(N, IP);
4462 } else {
4463 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
4464 createOperands(N, Ops);
4465 }
4466
4467 InsertNode(N);
4468 SDValue V = SDValue(N, 0);
4469 NewSDValueDbgMsg(V, "Creating new node: ", this);
4470 return V;
4471 }
4472
FoldValue(unsigned Opcode,const APInt & C1,const APInt & C2)4473 static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
4474 const APInt &C2) {
4475 switch (Opcode) {
4476 case ISD::ADD: return std::make_pair(C1 + C2, true);
4477 case ISD::SUB: return std::make_pair(C1 - C2, true);
4478 case ISD::MUL: return std::make_pair(C1 * C2, true);
4479 case ISD::AND: return std::make_pair(C1 & C2, true);
4480 case ISD::OR: return std::make_pair(C1 | C2, true);
4481 case ISD::XOR: return std::make_pair(C1 ^ C2, true);
4482 case ISD::SHL: return std::make_pair(C1 << C2, true);
4483 case ISD::SRL: return std::make_pair(C1.lshr(C2), true);
4484 case ISD::SRA: return std::make_pair(C1.ashr(C2), true);
4485 case ISD::ROTL: return std::make_pair(C1.rotl(C2), true);
4486 case ISD::ROTR: return std::make_pair(C1.rotr(C2), true);
4487 case ISD::SMIN: return std::make_pair(C1.sle(C2) ? C1 : C2, true);
4488 case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
4489 case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
4490 case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
4491 case ISD::SADDSAT: return std::make_pair(C1.sadd_sat(C2), true);
4492 case ISD::UADDSAT: return std::make_pair(C1.uadd_sat(C2), true);
4493 case ISD::SSUBSAT: return std::make_pair(C1.ssub_sat(C2), true);
4494 case ISD::USUBSAT: return std::make_pair(C1.usub_sat(C2), true);
4495 case ISD::UDIV:
4496 if (!C2.getBoolValue())
4497 break;
4498 return std::make_pair(C1.udiv(C2), true);
4499 case ISD::UREM:
4500 if (!C2.getBoolValue())
4501 break;
4502 return std::make_pair(C1.urem(C2), true);
4503 case ISD::SDIV:
4504 if (!C2.getBoolValue())
4505 break;
4506 return std::make_pair(C1.sdiv(C2), true);
4507 case ISD::SREM:
4508 if (!C2.getBoolValue())
4509 break;
4510 return std::make_pair(C1.srem(C2), true);
4511 }
4512 return std::make_pair(APInt(1, 0), false);
4513 }
4514
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,const ConstantSDNode * Cst1,const ConstantSDNode * Cst2)4515 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4516 EVT VT, const ConstantSDNode *Cst1,
4517 const ConstantSDNode *Cst2) {
4518 if (Cst1->isOpaque() || Cst2->isOpaque())
4519 return SDValue();
4520
4521 std::pair<APInt, bool> Folded = FoldValue(Opcode, Cst1->getAPIntValue(),
4522 Cst2->getAPIntValue());
4523 if (!Folded.second)
4524 return SDValue();
4525 return getConstant(Folded.first, DL, VT);
4526 }
4527
FoldSymbolOffset(unsigned Opcode,EVT VT,const GlobalAddressSDNode * GA,const SDNode * N2)4528 SDValue SelectionDAG::FoldSymbolOffset(unsigned Opcode, EVT VT,
4529 const GlobalAddressSDNode *GA,
4530 const SDNode *N2) {
4531 if (GA->getOpcode() != ISD::GlobalAddress)
4532 return SDValue();
4533 if (!TLI->isOffsetFoldingLegal(GA))
4534 return SDValue();
4535 const ConstantSDNode *Cst2 = dyn_cast<ConstantSDNode>(N2);
4536 if (!Cst2)
4537 return SDValue();
4538 int64_t Offset = Cst2->getSExtValue();
4539 switch (Opcode) {
4540 case ISD::ADD: break;
4541 case ISD::SUB: Offset = -uint64_t(Offset); break;
4542 default: return SDValue();
4543 }
4544 return getGlobalAddress(GA->getGlobal(), SDLoc(Cst2), VT,
4545 GA->getOffset() + uint64_t(Offset));
4546 }
4547
isUndef(unsigned Opcode,ArrayRef<SDValue> Ops)4548 bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
4549 switch (Opcode) {
4550 case ISD::SDIV:
4551 case ISD::UDIV:
4552 case ISD::SREM:
4553 case ISD::UREM: {
4554 // If a divisor is zero/undef or any element of a divisor vector is
4555 // zero/undef, the whole op is undef.
4556 assert(Ops.size() == 2 && "Div/rem should have 2 operands");
4557 SDValue Divisor = Ops[1];
4558 if (Divisor.isUndef() || isNullConstant(Divisor))
4559 return true;
4560
4561 return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
4562 llvm::any_of(Divisor->op_values(),
4563 [](SDValue V) { return V.isUndef() ||
4564 isNullConstant(V); });
4565 // TODO: Handle signed overflow.
4566 }
4567 // TODO: Handle oversized shifts.
4568 default:
4569 return false;
4570 }
4571 }
4572
FoldConstantArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,SDNode * Cst1,SDNode * Cst2)4573 SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL,
4574 EVT VT, SDNode *Cst1,
4575 SDNode *Cst2) {
4576 // If the opcode is a target-specific ISD node, there's nothing we can
4577 // do here and the operand rules may not line up with the below, so
4578 // bail early.
4579 if (Opcode >= ISD::BUILTIN_OP_END)
4580 return SDValue();
4581
4582 if (isUndef(Opcode, {SDValue(Cst1, 0), SDValue(Cst2, 0)}))
4583 return getUNDEF(VT);
4584
4585 // Handle the case of two scalars.
4586 if (const ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1)) {
4587 if (const ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2)) {
4588 SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2);
4589 assert((!Folded || !VT.isVector()) &&
4590 "Can't fold vectors ops with scalar operands");
4591 return Folded;
4592 }
4593 }
4594
4595 // fold (add Sym, c) -> Sym+c
4596 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst1))
4597 return FoldSymbolOffset(Opcode, VT, GA, Cst2);
4598 if (TLI->isCommutativeBinOp(Opcode))
4599 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Cst2))
4600 return FoldSymbolOffset(Opcode, VT, GA, Cst1);
4601
4602 // For vectors, extract each constant element and fold them individually.
4603 // Either input may be an undef value.
4604 auto *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
4605 if (!BV1 && !Cst1->isUndef())
4606 return SDValue();
4607 auto *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
4608 if (!BV2 && !Cst2->isUndef())
4609 return SDValue();
4610 // If both operands are undef, that's handled the same way as scalars.
4611 if (!BV1 && !BV2)
4612 return SDValue();
4613
4614 assert((!BV1 || !BV2 || BV1->getNumOperands() == BV2->getNumOperands()) &&
4615 "Vector binop with different number of elements in operands?");
4616
4617 EVT SVT = VT.getScalarType();
4618 EVT LegalSVT = SVT;
4619 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4620 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4621 if (LegalSVT.bitsLT(SVT))
4622 return SDValue();
4623 }
4624 SmallVector<SDValue, 4> Outputs;
4625 unsigned NumOps = BV1 ? BV1->getNumOperands() : BV2->getNumOperands();
4626 for (unsigned I = 0; I != NumOps; ++I) {
4627 SDValue V1 = BV1 ? BV1->getOperand(I) : getUNDEF(SVT);
4628 SDValue V2 = BV2 ? BV2->getOperand(I) : getUNDEF(SVT);
4629 if (SVT.isInteger()) {
4630 if (V1->getValueType(0).bitsGT(SVT))
4631 V1 = getNode(ISD::TRUNCATE, DL, SVT, V1);
4632 if (V2->getValueType(0).bitsGT(SVT))
4633 V2 = getNode(ISD::TRUNCATE, DL, SVT, V2);
4634 }
4635
4636 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
4637 return SDValue();
4638
4639 // Fold one vector element.
4640 SDValue ScalarResult = getNode(Opcode, DL, SVT, V1, V2);
4641 if (LegalSVT != SVT)
4642 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4643
4644 // Scalar folding only succeeded if the result is a constant or UNDEF.
4645 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4646 ScalarResult.getOpcode() != ISD::ConstantFP)
4647 return SDValue();
4648 Outputs.push_back(ScalarResult);
4649 }
4650
4651 assert(VT.getVectorNumElements() == Outputs.size() &&
4652 "Vector size mismatch!");
4653
4654 // We may have a vector type but a scalar result. Create a splat.
4655 Outputs.resize(VT.getVectorNumElements(), Outputs.back());
4656
4657 // Build a big vector out of the scalar elements we generated.
4658 return getBuildVector(VT, SDLoc(), Outputs);
4659 }
4660
4661 // TODO: Merge with FoldConstantArithmetic
FoldConstantVectorArithmetic(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)4662 SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
4663 const SDLoc &DL, EVT VT,
4664 ArrayRef<SDValue> Ops,
4665 const SDNodeFlags Flags) {
4666 // If the opcode is a target-specific ISD node, there's nothing we can
4667 // do here and the operand rules may not line up with the below, so
4668 // bail early.
4669 if (Opcode >= ISD::BUILTIN_OP_END)
4670 return SDValue();
4671
4672 if (isUndef(Opcode, Ops))
4673 return getUNDEF(VT);
4674
4675 // We can only fold vectors - maybe merge with FoldConstantArithmetic someday?
4676 if (!VT.isVector())
4677 return SDValue();
4678
4679 unsigned NumElts = VT.getVectorNumElements();
4680
4681 auto IsScalarOrSameVectorSize = [&](const SDValue &Op) {
4682 return !Op.getValueType().isVector() ||
4683 Op.getValueType().getVectorNumElements() == NumElts;
4684 };
4685
4686 auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) {
4687 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op);
4688 return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) ||
4689 (BV && BV->isConstant());
4690 };
4691
4692 // All operands must be vector types with the same number of elements as
4693 // the result type and must be either UNDEF or a build vector of constant
4694 // or UNDEF scalars.
4695 if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
4696 !llvm::all_of(Ops, IsScalarOrSameVectorSize))
4697 return SDValue();
4698
4699 // If we are comparing vectors, then the result needs to be a i1 boolean
4700 // that is then sign-extended back to the legal result type.
4701 EVT SVT = (Opcode == ISD::SETCC ? MVT::i1 : VT.getScalarType());
4702
4703 // Find legal integer scalar type for constant promotion and
4704 // ensure that its scalar size is at least as large as source.
4705 EVT LegalSVT = VT.getScalarType();
4706 if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
4707 LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
4708 if (LegalSVT.bitsLT(VT.getScalarType()))
4709 return SDValue();
4710 }
4711
4712 // Constant fold each scalar lane separately.
4713 SmallVector<SDValue, 4> ScalarResults;
4714 for (unsigned i = 0; i != NumElts; i++) {
4715 SmallVector<SDValue, 4> ScalarOps;
4716 for (SDValue Op : Ops) {
4717 EVT InSVT = Op.getValueType().getScalarType();
4718 BuildVectorSDNode *InBV = dyn_cast<BuildVectorSDNode>(Op);
4719 if (!InBV) {
4720 // We've checked that this is UNDEF or a constant of some kind.
4721 if (Op.isUndef())
4722 ScalarOps.push_back(getUNDEF(InSVT));
4723 else
4724 ScalarOps.push_back(Op);
4725 continue;
4726 }
4727
4728 SDValue ScalarOp = InBV->getOperand(i);
4729 EVT ScalarVT = ScalarOp.getValueType();
4730
4731 // Build vector (integer) scalar operands may need implicit
4732 // truncation - do this before constant folding.
4733 if (ScalarVT.isInteger() && ScalarVT.bitsGT(InSVT))
4734 ScalarOp = getNode(ISD::TRUNCATE, DL, InSVT, ScalarOp);
4735
4736 ScalarOps.push_back(ScalarOp);
4737 }
4738
4739 // Constant fold the scalar operands.
4740 SDValue ScalarResult = getNode(Opcode, DL, SVT, ScalarOps, Flags);
4741
4742 // Legalize the (integer) scalar constant if necessary.
4743 if (LegalSVT != SVT)
4744 ScalarResult = getNode(ISD::SIGN_EXTEND, DL, LegalSVT, ScalarResult);
4745
4746 // Scalar folding only succeeded if the result is a constant or UNDEF.
4747 if (!ScalarResult.isUndef() && ScalarResult.getOpcode() != ISD::Constant &&
4748 ScalarResult.getOpcode() != ISD::ConstantFP)
4749 return SDValue();
4750 ScalarResults.push_back(ScalarResult);
4751 }
4752
4753 SDValue V = getBuildVector(VT, DL, ScalarResults);
4754 NewSDValueDbgMsg(V, "New node fold constant vector: ", this);
4755 return V;
4756 }
4757
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,const SDNodeFlags Flags)4758 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
4759 SDValue N1, SDValue N2, const SDNodeFlags Flags) {
4760 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
4761 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
4762 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
4763 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
4764
4765 // Canonicalize constant to RHS if commutative.
4766 if (TLI->isCommutativeBinOp(Opcode)) {
4767 if (N1C && !N2C) {
4768 std::swap(N1C, N2C);
4769 std::swap(N1, N2);
4770 } else if (N1CFP && !N2CFP) {
4771 std::swap(N1CFP, N2CFP);
4772 std::swap(N1, N2);
4773 }
4774 }
4775
4776 switch (Opcode) {
4777 default: break;
4778 case ISD::TokenFactor:
4779 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
4780 N2.getValueType() == MVT::Other && "Invalid token factor!");
4781 // Fold trivial token factors.
4782 if (N1.getOpcode() == ISD::EntryToken) return N2;
4783 if (N2.getOpcode() == ISD::EntryToken) return N1;
4784 if (N1 == N2) return N1;
4785 break;
4786 case ISD::BUILD_VECTOR: {
4787 // Attempt to simplify BUILD_VECTOR.
4788 SDValue Ops[] = {N1, N2};
4789 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
4790 return V;
4791 break;
4792 }
4793 case ISD::CONCAT_VECTORS: {
4794 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
4795 SDValue Ops[] = {N1, N2};
4796 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
4797 return V;
4798 break;
4799 }
4800 case ISD::AND:
4801 assert(VT.isInteger() && "This operator does not apply to FP types!");
4802 assert(N1.getValueType() == N2.getValueType() &&
4803 N1.getValueType() == VT && "Binary operator types must match!");
4804 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
4805 // worth handling here.
4806 if (N2C && N2C->isNullValue())
4807 return N2;
4808 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
4809 return N1;
4810 break;
4811 case ISD::OR:
4812 case ISD::XOR:
4813 case ISD::ADD:
4814 case ISD::SUB:
4815 assert(VT.isInteger() && "This operator does not apply to FP types!");
4816 assert(N1.getValueType() == N2.getValueType() &&
4817 N1.getValueType() == VT && "Binary operator types must match!");
4818 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
4819 // it's worth handling here.
4820 if (N2C && N2C->isNullValue())
4821 return N1;
4822 break;
4823 case ISD::UDIV:
4824 case ISD::UREM:
4825 case ISD::MULHU:
4826 case ISD::MULHS:
4827 case ISD::MUL:
4828 case ISD::SDIV:
4829 case ISD::SREM:
4830 case ISD::SMIN:
4831 case ISD::SMAX:
4832 case ISD::UMIN:
4833 case ISD::UMAX:
4834 case ISD::SADDSAT:
4835 case ISD::SSUBSAT:
4836 case ISD::UADDSAT:
4837 case ISD::USUBSAT:
4838 assert(VT.isInteger() && "This operator does not apply to FP types!");
4839 assert(N1.getValueType() == N2.getValueType() &&
4840 N1.getValueType() == VT && "Binary operator types must match!");
4841 break;
4842 case ISD::FADD:
4843 case ISD::FSUB:
4844 case ISD::FMUL:
4845 case ISD::FDIV:
4846 case ISD::FREM:
4847 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
4848 assert(N1.getValueType() == N2.getValueType() &&
4849 N1.getValueType() == VT && "Binary operator types must match!");
4850 break;
4851 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
4852 assert(N1.getValueType() == VT &&
4853 N1.getValueType().isFloatingPoint() &&
4854 N2.getValueType().isFloatingPoint() &&
4855 "Invalid FCOPYSIGN!");
4856 break;
4857 case ISD::SHL:
4858 case ISD::SRA:
4859 case ISD::SRL:
4860 if (SDValue V = simplifyShift(N1, N2))
4861 return V;
4862 LLVM_FALLTHROUGH;
4863 case ISD::ROTL:
4864 case ISD::ROTR:
4865 assert(VT == N1.getValueType() &&
4866 "Shift operators return type must be the same as their first arg");
4867 assert(VT.isInteger() && N2.getValueType().isInteger() &&
4868 "Shifts only work on integers");
4869 assert((!VT.isVector() || VT == N2.getValueType()) &&
4870 "Vector shift amounts must be in the same as their first arg");
4871 // Verify that the shift amount VT is big enough to hold valid shift
4872 // amounts. This catches things like trying to shift an i1024 value by an
4873 // i8, which is easy to fall into in generic code that uses
4874 // TLI.getShiftAmount().
4875 assert(N2.getValueSizeInBits() >= Log2_32_Ceil(N1.getValueSizeInBits()) &&
4876 "Invalid use of small shift amount with oversized value!");
4877
4878 // Always fold shifts of i1 values so the code generator doesn't need to
4879 // handle them. Since we know the size of the shift has to be less than the
4880 // size of the value, the shift/rotate count is guaranteed to be zero.
4881 if (VT == MVT::i1)
4882 return N1;
4883 if (N2C && N2C->isNullValue())
4884 return N1;
4885 break;
4886 case ISD::FP_ROUND_INREG: {
4887 EVT EVT = cast<VTSDNode>(N2)->getVT();
4888 assert(VT == N1.getValueType() && "Not an inreg round!");
4889 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
4890 "Cannot FP_ROUND_INREG integer types");
4891 assert(EVT.isVector() == VT.isVector() &&
4892 "FP_ROUND_INREG type should be vector iff the operand "
4893 "type is vector!");
4894 assert((!EVT.isVector() ||
4895 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
4896 "Vector element counts must match in FP_ROUND_INREG");
4897 assert(EVT.bitsLE(VT) && "Not rounding down!");
4898 (void)EVT;
4899 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
4900 break;
4901 }
4902 case ISD::FP_ROUND:
4903 assert(VT.isFloatingPoint() &&
4904 N1.getValueType().isFloatingPoint() &&
4905 VT.bitsLE(N1.getValueType()) &&
4906 N2C && (N2C->getZExtValue() == 0 || N2C->getZExtValue() == 1) &&
4907 "Invalid FP_ROUND!");
4908 if (N1.getValueType() == VT) return N1; // noop conversion.
4909 break;
4910 case ISD::AssertSext:
4911 case ISD::AssertZext: {
4912 EVT EVT = cast<VTSDNode>(N2)->getVT();
4913 assert(VT == N1.getValueType() && "Not an inreg extend!");
4914 assert(VT.isInteger() && EVT.isInteger() &&
4915 "Cannot *_EXTEND_INREG FP types");
4916 assert(!EVT.isVector() &&
4917 "AssertSExt/AssertZExt type should be the vector element type "
4918 "rather than the vector type!");
4919 assert(EVT.bitsLE(VT.getScalarType()) && "Not extending!");
4920 if (VT.getScalarType() == EVT) return N1; // noop assertion.
4921 break;
4922 }
4923 case ISD::SIGN_EXTEND_INREG: {
4924 EVT EVT = cast<VTSDNode>(N2)->getVT();
4925 assert(VT == N1.getValueType() && "Not an inreg extend!");
4926 assert(VT.isInteger() && EVT.isInteger() &&
4927 "Cannot *_EXTEND_INREG FP types");
4928 assert(EVT.isVector() == VT.isVector() &&
4929 "SIGN_EXTEND_INREG type should be vector iff the operand "
4930 "type is vector!");
4931 assert((!EVT.isVector() ||
4932 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
4933 "Vector element counts must match in SIGN_EXTEND_INREG");
4934 assert(EVT.bitsLE(VT) && "Not extending!");
4935 if (EVT == VT) return N1; // Not actually extending
4936
4937 auto SignExtendInReg = [&](APInt Val, llvm::EVT ConstantVT) {
4938 unsigned FromBits = EVT.getScalarSizeInBits();
4939 Val <<= Val.getBitWidth() - FromBits;
4940 Val.ashrInPlace(Val.getBitWidth() - FromBits);
4941 return getConstant(Val, DL, ConstantVT);
4942 };
4943
4944 if (N1C) {
4945 const APInt &Val = N1C->getAPIntValue();
4946 return SignExtendInReg(Val, VT);
4947 }
4948 if (ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) {
4949 SmallVector<SDValue, 8> Ops;
4950 llvm::EVT OpVT = N1.getOperand(0).getValueType();
4951 for (int i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
4952 SDValue Op = N1.getOperand(i);
4953 if (Op.isUndef()) {
4954 Ops.push_back(getUNDEF(OpVT));
4955 continue;
4956 }
4957 ConstantSDNode *C = cast<ConstantSDNode>(Op);
4958 APInt Val = C->getAPIntValue();
4959 Ops.push_back(SignExtendInReg(Val, OpVT));
4960 }
4961 return getBuildVector(VT, DL, Ops);
4962 }
4963 break;
4964 }
4965 case ISD::EXTRACT_VECTOR_ELT:
4966 assert(VT.getSizeInBits() >= N1.getValueType().getScalarSizeInBits() &&
4967 "The result of EXTRACT_VECTOR_ELT must be at least as wide as the \
4968 element type of the vector.");
4969
4970 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
4971 if (N1.isUndef())
4972 return getUNDEF(VT);
4973
4974 // EXTRACT_VECTOR_ELT of out-of-bounds element is an UNDEF
4975 if (N2C && N2C->getAPIntValue().uge(N1.getValueType().getVectorNumElements()))
4976 return getUNDEF(VT);
4977
4978 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
4979 // expanding copies of large vectors from registers.
4980 if (N2C &&
4981 N1.getOpcode() == ISD::CONCAT_VECTORS &&
4982 N1.getNumOperands() > 0) {
4983 unsigned Factor =
4984 N1.getOperand(0).getValueType().getVectorNumElements();
4985 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
4986 N1.getOperand(N2C->getZExtValue() / Factor),
4987 getConstant(N2C->getZExtValue() % Factor, DL,
4988 N2.getValueType()));
4989 }
4990
4991 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
4992 // expanding large vector constants.
4993 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
4994 SDValue Elt = N1.getOperand(N2C->getZExtValue());
4995
4996 if (VT != Elt.getValueType())
4997 // If the vector element type is not legal, the BUILD_VECTOR operands
4998 // are promoted and implicitly truncated, and the result implicitly
4999 // extended. Make that explicit here.
5000 Elt = getAnyExtOrTrunc(Elt, DL, VT);
5001
5002 return Elt;
5003 }
5004
5005 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
5006 // operations are lowered to scalars.
5007 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
5008 // If the indices are the same, return the inserted element else
5009 // if the indices are known different, extract the element from
5010 // the original vector.
5011 SDValue N1Op2 = N1.getOperand(2);
5012 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2);
5013
5014 if (N1Op2C && N2C) {
5015 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
5016 if (VT == N1.getOperand(1).getValueType())
5017 return N1.getOperand(1);
5018 else
5019 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
5020 }
5021
5022 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
5023 }
5024 }
5025
5026 // EXTRACT_VECTOR_ELT of v1iX EXTRACT_SUBVECTOR could be formed
5027 // when vector types are scalarized and v1iX is legal.
5028 // vextract (v1iX extract_subvector(vNiX, Idx)) -> vextract(vNiX,Idx)
5029 if (N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5030 N1.getValueType().getVectorNumElements() == 1) {
5031 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0),
5032 N1.getOperand(1));
5033 }
5034 break;
5035 case ISD::EXTRACT_ELEMENT:
5036 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
5037 assert(!N1.getValueType().isVector() && !VT.isVector() &&
5038 (N1.getValueType().isInteger() == VT.isInteger()) &&
5039 N1.getValueType() != VT &&
5040 "Wrong types for EXTRACT_ELEMENT!");
5041
5042 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
5043 // 64-bit integers into 32-bit parts. Instead of building the extract of
5044 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
5045 if (N1.getOpcode() == ISD::BUILD_PAIR)
5046 return N1.getOperand(N2C->getZExtValue());
5047
5048 // EXTRACT_ELEMENT of a constant int is also very common.
5049 if (N1C) {
5050 unsigned ElementSize = VT.getSizeInBits();
5051 unsigned Shift = ElementSize * N2C->getZExtValue();
5052 APInt ShiftedVal = N1C->getAPIntValue().lshr(Shift);
5053 return getConstant(ShiftedVal.trunc(ElementSize), DL, VT);
5054 }
5055 break;
5056 case ISD::EXTRACT_SUBVECTOR:
5057 if (VT.isSimple() && N1.getValueType().isSimple()) {
5058 assert(VT.isVector() && N1.getValueType().isVector() &&
5059 "Extract subvector VTs must be a vectors!");
5060 assert(VT.getVectorElementType() ==
5061 N1.getValueType().getVectorElementType() &&
5062 "Extract subvector VTs must have the same element type!");
5063 assert(VT.getSimpleVT() <= N1.getSimpleValueType() &&
5064 "Extract subvector must be from larger vector to smaller vector!");
5065
5066 if (N2C) {
5067 assert((VT.getVectorNumElements() + N2C->getZExtValue()
5068 <= N1.getValueType().getVectorNumElements())
5069 && "Extract subvector overflow!");
5070 }
5071
5072 // Trivial extraction.
5073 if (VT.getSimpleVT() == N1.getSimpleValueType())
5074 return N1;
5075
5076 // EXTRACT_SUBVECTOR of an UNDEF is an UNDEF.
5077 if (N1.isUndef())
5078 return getUNDEF(VT);
5079
5080 // EXTRACT_SUBVECTOR of CONCAT_VECTOR can be simplified if the pieces of
5081 // the concat have the same type as the extract.
5082 if (N2C && N1.getOpcode() == ISD::CONCAT_VECTORS &&
5083 N1.getNumOperands() > 0 &&
5084 VT == N1.getOperand(0).getValueType()) {
5085 unsigned Factor = VT.getVectorNumElements();
5086 return N1.getOperand(N2C->getZExtValue() / Factor);
5087 }
5088
5089 // EXTRACT_SUBVECTOR of INSERT_SUBVECTOR is often created
5090 // during shuffle legalization.
5091 if (N1.getOpcode() == ISD::INSERT_SUBVECTOR && N2 == N1.getOperand(2) &&
5092 VT == N1.getOperand(1).getValueType())
5093 return N1.getOperand(1);
5094 }
5095 break;
5096 }
5097
5098 // Perform trivial constant folding.
5099 if (SDValue SV =
5100 FoldConstantArithmetic(Opcode, DL, VT, N1.getNode(), N2.getNode()))
5101 return SV;
5102
5103 // Constant fold FP operations.
5104 bool HasFPExceptions = TLI->hasFloatingPointExceptions();
5105 if (N1CFP) {
5106 if (N2CFP) {
5107 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
5108 APFloat::opStatus s;
5109 switch (Opcode) {
5110 case ISD::FADD:
5111 s = V1.add(V2, APFloat::rmNearestTiesToEven);
5112 if (!HasFPExceptions || s != APFloat::opInvalidOp)
5113 return getConstantFP(V1, DL, VT);
5114 break;
5115 case ISD::FSUB:
5116 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
5117 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
5118 return getConstantFP(V1, DL, VT);
5119 break;
5120 case ISD::FMUL:
5121 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
5122 if (!HasFPExceptions || s!=APFloat::opInvalidOp)
5123 return getConstantFP(V1, DL, VT);
5124 break;
5125 case ISD::FDIV:
5126 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
5127 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
5128 s!=APFloat::opDivByZero)) {
5129 return getConstantFP(V1, DL, VT);
5130 }
5131 break;
5132 case ISD::FREM :
5133 s = V1.mod(V2);
5134 if (!HasFPExceptions || (s!=APFloat::opInvalidOp &&
5135 s!=APFloat::opDivByZero)) {
5136 return getConstantFP(V1, DL, VT);
5137 }
5138 break;
5139 case ISD::FCOPYSIGN:
5140 V1.copySign(V2);
5141 return getConstantFP(V1, DL, VT);
5142 default: break;
5143 }
5144 }
5145
5146 if (Opcode == ISD::FP_ROUND) {
5147 APFloat V = N1CFP->getValueAPF(); // make copy
5148 bool ignored;
5149 // This can return overflow, underflow, or inexact; we don't care.
5150 // FIXME need to be more flexible about rounding mode.
5151 (void)V.convert(EVTToAPFloatSemantics(VT),
5152 APFloat::rmNearestTiesToEven, &ignored);
5153 return getConstantFP(V, DL, VT);
5154 }
5155 }
5156
5157 switch (Opcode) {
5158 case ISD::FADD:
5159 case ISD::FSUB:
5160 case ISD::FMUL:
5161 case ISD::FDIV:
5162 case ISD::FREM:
5163 // If both operands are undef, the result is undef. If 1 operand is undef,
5164 // the result is NaN. This should match the behavior of the IR optimizer.
5165 if (N1.isUndef() && N2.isUndef())
5166 return getUNDEF(VT);
5167 if (N1.isUndef() || N2.isUndef())
5168 return getConstantFP(APFloat::getNaN(EVTToAPFloatSemantics(VT)), DL, VT);
5169 }
5170
5171 // Canonicalize an UNDEF to the RHS, even over a constant.
5172 if (N1.isUndef()) {
5173 if (TLI->isCommutativeBinOp(Opcode)) {
5174 std::swap(N1, N2);
5175 } else {
5176 switch (Opcode) {
5177 case ISD::FP_ROUND_INREG:
5178 case ISD::SIGN_EXTEND_INREG:
5179 case ISD::SUB:
5180 return getUNDEF(VT); // fold op(undef, arg2) -> undef
5181 case ISD::UDIV:
5182 case ISD::SDIV:
5183 case ISD::UREM:
5184 case ISD::SREM:
5185 case ISD::SSUBSAT:
5186 case ISD::USUBSAT:
5187 return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0
5188 }
5189 }
5190 }
5191
5192 // Fold a bunch of operators when the RHS is undef.
5193 if (N2.isUndef()) {
5194 switch (Opcode) {
5195 case ISD::XOR:
5196 if (N1.isUndef())
5197 // Handle undef ^ undef -> 0 special case. This is a common
5198 // idiom (misuse).
5199 return getConstant(0, DL, VT);
5200 LLVM_FALLTHROUGH;
5201 case ISD::ADD:
5202 case ISD::SUB:
5203 case ISD::UDIV:
5204 case ISD::SDIV:
5205 case ISD::UREM:
5206 case ISD::SREM:
5207 return getUNDEF(VT); // fold op(arg1, undef) -> undef
5208 case ISD::MUL:
5209 case ISD::AND:
5210 case ISD::SSUBSAT:
5211 case ISD::USUBSAT:
5212 return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0
5213 case ISD::OR:
5214 case ISD::SADDSAT:
5215 case ISD::UADDSAT:
5216 return getAllOnesConstant(DL, VT);
5217 }
5218 }
5219
5220 // Memoize this node if possible.
5221 SDNode *N;
5222 SDVTList VTs = getVTList(VT);
5223 SDValue Ops[] = {N1, N2};
5224 if (VT != MVT::Glue) {
5225 FoldingSetNodeID ID;
5226 AddNodeIDNode(ID, Opcode, VTs, Ops);
5227 void *IP = nullptr;
5228 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5229 E->intersectFlagsWith(Flags);
5230 return SDValue(E, 0);
5231 }
5232
5233 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5234 N->setFlags(Flags);
5235 createOperands(N, Ops);
5236 CSEMap.InsertNode(N, IP);
5237 } else {
5238 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5239 createOperands(N, Ops);
5240 }
5241
5242 InsertNode(N);
5243 SDValue V = SDValue(N, 0);
5244 NewSDValueDbgMsg(V, "Creating new node: ", this);
5245 return V;
5246 }
5247
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,const SDNodeFlags Flags)5248 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5249 SDValue N1, SDValue N2, SDValue N3,
5250 const SDNodeFlags Flags) {
5251 // Perform various simplifications.
5252 switch (Opcode) {
5253 case ISD::FMA: {
5254 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
5255 assert(N1.getValueType() == VT && N2.getValueType() == VT &&
5256 N3.getValueType() == VT && "FMA types must match!");
5257 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
5258 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2);
5259 ConstantFPSDNode *N3CFP = dyn_cast<ConstantFPSDNode>(N3);
5260 if (N1CFP && N2CFP && N3CFP) {
5261 APFloat V1 = N1CFP->getValueAPF();
5262 const APFloat &V2 = N2CFP->getValueAPF();
5263 const APFloat &V3 = N3CFP->getValueAPF();
5264 APFloat::opStatus s =
5265 V1.fusedMultiplyAdd(V2, V3, APFloat::rmNearestTiesToEven);
5266 if (!TLI->hasFloatingPointExceptions() || s != APFloat::opInvalidOp)
5267 return getConstantFP(V1, DL, VT);
5268 }
5269 break;
5270 }
5271 case ISD::BUILD_VECTOR: {
5272 // Attempt to simplify BUILD_VECTOR.
5273 SDValue Ops[] = {N1, N2, N3};
5274 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
5275 return V;
5276 break;
5277 }
5278 case ISD::CONCAT_VECTORS: {
5279 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
5280 SDValue Ops[] = {N1, N2, N3};
5281 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
5282 return V;
5283 break;
5284 }
5285 case ISD::SETCC: {
5286 assert(VT.isInteger() && "SETCC result type must be an integer!");
5287 assert(N1.getValueType() == N2.getValueType() &&
5288 "SETCC operands must have the same type!");
5289 assert(VT.isVector() == N1.getValueType().isVector() &&
5290 "SETCC type should be vector iff the operand type is vector!");
5291 assert((!VT.isVector() ||
5292 VT.getVectorNumElements() == N1.getValueType().getVectorNumElements()) &&
5293 "SETCC vector element counts must match!");
5294 // Use FoldSetCC to simplify SETCC's.
5295 if (SDValue V = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL))
5296 return V;
5297 // Vector constant folding.
5298 SDValue Ops[] = {N1, N2, N3};
5299 if (SDValue V = FoldConstantVectorArithmetic(Opcode, DL, VT, Ops)) {
5300 NewSDValueDbgMsg(V, "New node vector constant folding: ", this);
5301 return V;
5302 }
5303 break;
5304 }
5305 case ISD::SELECT:
5306 case ISD::VSELECT:
5307 if (SDValue V = simplifySelect(N1, N2, N3))
5308 return V;
5309 break;
5310 case ISD::VECTOR_SHUFFLE:
5311 llvm_unreachable("should use getVectorShuffle constructor!");
5312 case ISD::INSERT_VECTOR_ELT: {
5313 ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3);
5314 // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF
5315 if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements())
5316 return getUNDEF(VT);
5317 break;
5318 }
5319 case ISD::INSERT_SUBVECTOR: {
5320 SDValue Index = N3;
5321 if (VT.isSimple() && N1.getValueType().isSimple()
5322 && N2.getValueType().isSimple()) {
5323 assert(VT.isVector() && N1.getValueType().isVector() &&
5324 N2.getValueType().isVector() &&
5325 "Insert subvector VTs must be a vectors");
5326 assert(VT == N1.getValueType() &&
5327 "Dest and insert subvector source types must match!");
5328 assert(N2.getSimpleValueType() <= N1.getSimpleValueType() &&
5329 "Insert subvector must be from smaller vector to larger vector!");
5330 if (isa<ConstantSDNode>(Index)) {
5331 assert((N2.getValueType().getVectorNumElements() +
5332 cast<ConstantSDNode>(Index)->getZExtValue()
5333 <= VT.getVectorNumElements())
5334 && "Insert subvector overflow!");
5335 }
5336
5337 // Trivial insertion.
5338 if (VT.getSimpleVT() == N2.getSimpleValueType())
5339 return N2;
5340 }
5341 break;
5342 }
5343 case ISD::BITCAST:
5344 // Fold bit_convert nodes from a type to themselves.
5345 if (N1.getValueType() == VT)
5346 return N1;
5347 break;
5348 }
5349
5350 // Memoize node if it doesn't produce a flag.
5351 SDNode *N;
5352 SDVTList VTs = getVTList(VT);
5353 SDValue Ops[] = {N1, N2, N3};
5354 if (VT != MVT::Glue) {
5355 FoldingSetNodeID ID;
5356 AddNodeIDNode(ID, Opcode, VTs, Ops);
5357 void *IP = nullptr;
5358 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
5359 E->intersectFlagsWith(Flags);
5360 return SDValue(E, 0);
5361 }
5362
5363 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5364 N->setFlags(Flags);
5365 createOperands(N, Ops);
5366 CSEMap.InsertNode(N, IP);
5367 } else {
5368 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
5369 createOperands(N, Ops);
5370 }
5371
5372 InsertNode(N);
5373 SDValue V = SDValue(N, 0);
5374 NewSDValueDbgMsg(V, "Creating new node: ", this);
5375 return V;
5376 }
5377
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4)5378 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5379 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
5380 SDValue Ops[] = { N1, N2, N3, N4 };
5381 return getNode(Opcode, DL, VT, Ops);
5382 }
5383
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)5384 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
5385 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
5386 SDValue N5) {
5387 SDValue Ops[] = { N1, N2, N3, N4, N5 };
5388 return getNode(Opcode, DL, VT, Ops);
5389 }
5390
5391 /// getStackArgumentTokenFactor - Compute a TokenFactor to force all
5392 /// the incoming stack arguments to be loaded from the stack.
getStackArgumentTokenFactor(SDValue Chain)5393 SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
5394 SmallVector<SDValue, 8> ArgChains;
5395
5396 // Include the original chain at the beginning of the list. When this is
5397 // used by target LowerCall hooks, this helps legalize find the
5398 // CALLSEQ_BEGIN node.
5399 ArgChains.push_back(Chain);
5400
5401 // Add a chain value for each stack argument.
5402 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
5403 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
5404 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
5405 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
5406 if (FI->getIndex() < 0)
5407 ArgChains.push_back(SDValue(L, 1));
5408
5409 // Build a tokenfactor for all the chains.
5410 return getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
5411 }
5412
5413 /// getMemsetValue - Vectorized representation of the memset value
5414 /// operand.
getMemsetValue(SDValue Value,EVT VT,SelectionDAG & DAG,const SDLoc & dl)5415 static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
5416 const SDLoc &dl) {
5417 assert(!Value.isUndef());
5418
5419 unsigned NumBits = VT.getScalarSizeInBits();
5420 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
5421 assert(C->getAPIntValue().getBitWidth() == 8);
5422 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
5423 if (VT.isInteger()) {
5424 bool IsOpaque = VT.getSizeInBits() > 64 ||
5425 !DAG.getTargetLoweringInfo().isLegalStoreImmediate(C->getSExtValue());
5426 return DAG.getConstant(Val, dl, VT, false, IsOpaque);
5427 }
5428 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), dl,
5429 VT);
5430 }
5431
5432 assert(Value.getValueType() == MVT::i8 && "memset with non-byte fill value?");
5433 EVT IntVT = VT.getScalarType();
5434 if (!IntVT.isInteger())
5435 IntVT = EVT::getIntegerVT(*DAG.getContext(), IntVT.getSizeInBits());
5436
5437 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, IntVT, Value);
5438 if (NumBits > 8) {
5439 // Use a multiplication with 0x010101... to extend the input to the
5440 // required length.
5441 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
5442 Value = DAG.getNode(ISD::MUL, dl, IntVT, Value,
5443 DAG.getConstant(Magic, dl, IntVT));
5444 }
5445
5446 if (VT != Value.getValueType() && !VT.isInteger())
5447 Value = DAG.getBitcast(VT.getScalarType(), Value);
5448 if (VT != Value.getValueType())
5449 Value = DAG.getSplatBuildVector(VT, dl, Value);
5450
5451 return Value;
5452 }
5453
5454 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
5455 /// used when a memcpy is turned into a memset when the source is a constant
5456 /// string ptr.
getMemsetStringVal(EVT VT,const SDLoc & dl,SelectionDAG & DAG,const TargetLowering & TLI,const ConstantDataArraySlice & Slice)5457 static SDValue getMemsetStringVal(EVT VT, const SDLoc &dl, SelectionDAG &DAG,
5458 const TargetLowering &TLI,
5459 const ConstantDataArraySlice &Slice) {
5460 // Handle vector with all elements zero.
5461 if (Slice.Array == nullptr) {
5462 if (VT.isInteger())
5463 return DAG.getConstant(0, dl, VT);
5464 else if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128)
5465 return DAG.getConstantFP(0.0, dl, VT);
5466 else if (VT.isVector()) {
5467 unsigned NumElts = VT.getVectorNumElements();
5468 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
5469 return DAG.getNode(ISD::BITCAST, dl, VT,
5470 DAG.getConstant(0, dl,
5471 EVT::getVectorVT(*DAG.getContext(),
5472 EltVT, NumElts)));
5473 } else
5474 llvm_unreachable("Expected type!");
5475 }
5476
5477 assert(!VT.isVector() && "Can't handle vector type here!");
5478 unsigned NumVTBits = VT.getSizeInBits();
5479 unsigned NumVTBytes = NumVTBits / 8;
5480 unsigned NumBytes = std::min(NumVTBytes, unsigned(Slice.Length));
5481
5482 APInt Val(NumVTBits, 0);
5483 if (DAG.getDataLayout().isLittleEndian()) {
5484 for (unsigned i = 0; i != NumBytes; ++i)
5485 Val |= (uint64_t)(unsigned char)Slice[i] << i*8;
5486 } else {
5487 for (unsigned i = 0; i != NumBytes; ++i)
5488 Val |= (uint64_t)(unsigned char)Slice[i] << (NumVTBytes-i-1)*8;
5489 }
5490
5491 // If the "cost" of materializing the integer immediate is less than the cost
5492 // of a load, then it is cost effective to turn the load into the immediate.
5493 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
5494 if (TLI.shouldConvertConstantLoadToIntImm(Val, Ty))
5495 return DAG.getConstant(Val, dl, VT);
5496 return SDValue(nullptr, 0);
5497 }
5498
getMemBasePlusOffset(SDValue Base,unsigned Offset,const SDLoc & DL)5499 SDValue SelectionDAG::getMemBasePlusOffset(SDValue Base, unsigned Offset,
5500 const SDLoc &DL) {
5501 EVT VT = Base.getValueType();
5502 return getNode(ISD::ADD, DL, VT, Base, getConstant(Offset, DL, VT));
5503 }
5504
5505 /// Returns true if memcpy source is constant data.
isMemSrcFromConstant(SDValue Src,ConstantDataArraySlice & Slice)5506 static bool isMemSrcFromConstant(SDValue Src, ConstantDataArraySlice &Slice) {
5507 uint64_t SrcDelta = 0;
5508 GlobalAddressSDNode *G = nullptr;
5509 if (Src.getOpcode() == ISD::GlobalAddress)
5510 G = cast<GlobalAddressSDNode>(Src);
5511 else if (Src.getOpcode() == ISD::ADD &&
5512 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
5513 Src.getOperand(1).getOpcode() == ISD::Constant) {
5514 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
5515 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
5516 }
5517 if (!G)
5518 return false;
5519
5520 return getConstantDataArrayInfo(G->getGlobal(), Slice, 8,
5521 SrcDelta + G->getOffset());
5522 }
5523
5524 /// Determines the optimal series of memory ops to replace the memset / memcpy.
5525 /// Return true if the number of memory ops is below the threshold (Limit).
5526 /// It returns the types of the sequence of memory ops to perform
5527 /// memset / memcpy by reference.
FindOptimalMemOpLowering(std::vector<EVT> & MemOps,unsigned Limit,uint64_t Size,unsigned DstAlign,unsigned SrcAlign,bool IsMemset,bool ZeroMemset,bool MemcpyStrSrc,bool AllowOverlap,unsigned DstAS,unsigned SrcAS,SelectionDAG & DAG,const TargetLowering & TLI)5528 static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
5529 unsigned Limit, uint64_t Size,
5530 unsigned DstAlign, unsigned SrcAlign,
5531 bool IsMemset,
5532 bool ZeroMemset,
5533 bool MemcpyStrSrc,
5534 bool AllowOverlap,
5535 unsigned DstAS, unsigned SrcAS,
5536 SelectionDAG &DAG,
5537 const TargetLowering &TLI) {
5538 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
5539 "Expecting memcpy / memset source to meet alignment requirement!");
5540 // If 'SrcAlign' is zero, that means the memory operation does not need to
5541 // load the value, i.e. memset or memcpy from constant string. Otherwise,
5542 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
5543 // is the specified alignment of the memory operation. If it is zero, that
5544 // means it's possible to change the alignment of the destination.
5545 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
5546 // not need to be loaded.
5547 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
5548 IsMemset, ZeroMemset, MemcpyStrSrc,
5549 DAG.getMachineFunction());
5550
5551 if (VT == MVT::Other) {
5552 // Use the largest integer type whose alignment constraints are satisfied.
5553 // We only need to check DstAlign here as SrcAlign is always greater or
5554 // equal to DstAlign (or zero).
5555 VT = MVT::i64;
5556 while (DstAlign && DstAlign < VT.getSizeInBits() / 8 &&
5557 !TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign))
5558 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
5559 assert(VT.isInteger());
5560
5561 // Find the largest legal integer type.
5562 MVT LVT = MVT::i64;
5563 while (!TLI.isTypeLegal(LVT))
5564 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
5565 assert(LVT.isInteger());
5566
5567 // If the type we've chosen is larger than the largest legal integer type
5568 // then use that instead.
5569 if (VT.bitsGT(LVT))
5570 VT = LVT;
5571 }
5572
5573 unsigned NumMemOps = 0;
5574 while (Size != 0) {
5575 unsigned VTSize = VT.getSizeInBits() / 8;
5576 while (VTSize > Size) {
5577 // For now, only use non-vector load / store's for the left-over pieces.
5578 EVT NewVT = VT;
5579 unsigned NewVTSize;
5580
5581 bool Found = false;
5582 if (VT.isVector() || VT.isFloatingPoint()) {
5583 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
5584 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
5585 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
5586 Found = true;
5587 else if (NewVT == MVT::i64 &&
5588 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
5589 TLI.isSafeMemOpType(MVT::f64)) {
5590 // i64 is usually not legal on 32-bit targets, but f64 may be.
5591 NewVT = MVT::f64;
5592 Found = true;
5593 }
5594 }
5595
5596 if (!Found) {
5597 do {
5598 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
5599 if (NewVT == MVT::i8)
5600 break;
5601 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
5602 }
5603 NewVTSize = NewVT.getSizeInBits() / 8;
5604
5605 // If the new VT cannot cover all of the remaining bits, then consider
5606 // issuing a (or a pair of) unaligned and overlapping load / store.
5607 bool Fast;
5608 if (NumMemOps && AllowOverlap && NewVTSize < Size &&
5609 TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) &&
5610 Fast)
5611 VTSize = Size;
5612 else {
5613 VT = NewVT;
5614 VTSize = NewVTSize;
5615 }
5616 }
5617
5618 if (++NumMemOps > Limit)
5619 return false;
5620
5621 MemOps.push_back(VT);
5622 Size -= VTSize;
5623 }
5624
5625 return true;
5626 }
5627
shouldLowerMemFuncForSize(const MachineFunction & MF)5628 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
5629 // On Darwin, -Os means optimize for size without hurting performance, so
5630 // only really optimize for size when -Oz (MinSize) is used.
5631 if (MF.getTarget().getTargetTriple().isOSDarwin())
5632 return MF.getFunction().optForMinSize();
5633 return MF.getFunction().optForSize();
5634 }
5635
chainLoadsAndStoresForMemcpy(SelectionDAG & DAG,const SDLoc & dl,SmallVector<SDValue,32> & OutChains,unsigned From,unsigned To,SmallVector<SDValue,16> & OutLoadChains,SmallVector<SDValue,16> & OutStoreChains)5636 static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
5637 SmallVector<SDValue, 32> &OutChains, unsigned From,
5638 unsigned To, SmallVector<SDValue, 16> &OutLoadChains,
5639 SmallVector<SDValue, 16> &OutStoreChains) {
5640 assert(OutLoadChains.size() && "Missing loads in memcpy inlining");
5641 assert(OutStoreChains.size() && "Missing stores in memcpy inlining");
5642 SmallVector<SDValue, 16> GluedLoadChains;
5643 for (unsigned i = From; i < To; ++i) {
5644 OutChains.push_back(OutLoadChains[i]);
5645 GluedLoadChains.push_back(OutLoadChains[i]);
5646 }
5647
5648 // Chain for all loads.
5649 SDValue LoadToken = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
5650 GluedLoadChains);
5651
5652 for (unsigned i = From; i < To; ++i) {
5653 StoreSDNode *ST = dyn_cast<StoreSDNode>(OutStoreChains[i]);
5654 SDValue NewStore = DAG.getTruncStore(LoadToken, dl, ST->getValue(),
5655 ST->getBasePtr(), ST->getMemoryVT(),
5656 ST->getMemOperand());
5657 OutChains.push_back(NewStore);
5658 }
5659 }
5660
getMemcpyLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)5661 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5662 SDValue Chain, SDValue Dst, SDValue Src,
5663 uint64_t Size, unsigned Align,
5664 bool isVol, bool AlwaysInline,
5665 MachinePointerInfo DstPtrInfo,
5666 MachinePointerInfo SrcPtrInfo) {
5667 // Turn a memcpy of undef to nop.
5668 if (Src.isUndef())
5669 return Chain;
5670
5671 // Expand memcpy to a series of load and store ops if the size operand falls
5672 // below a certain threshold.
5673 // TODO: In the AlwaysInline case, if the size is big then generate a loop
5674 // rather than maybe a humongous number of loads and stores.
5675 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5676 const DataLayout &DL = DAG.getDataLayout();
5677 LLVMContext &C = *DAG.getContext();
5678 std::vector<EVT> MemOps;
5679 bool DstAlignCanChange = false;
5680 MachineFunction &MF = DAG.getMachineFunction();
5681 MachineFrameInfo &MFI = MF.getFrameInfo();
5682 bool OptSize = shouldLowerMemFuncForSize(MF);
5683 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5684 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5685 DstAlignCanChange = true;
5686 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5687 if (Align > SrcAlign)
5688 SrcAlign = Align;
5689 ConstantDataArraySlice Slice;
5690 bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
5691 bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
5692 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
5693
5694 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
5695 (DstAlignCanChange ? 0 : Align),
5696 (isZeroConstant ? 0 : SrcAlign),
5697 false, false, CopyFromConstant, true,
5698 DstPtrInfo.getAddrSpace(),
5699 SrcPtrInfo.getAddrSpace(),
5700 DAG, TLI))
5701 return SDValue();
5702
5703 if (DstAlignCanChange) {
5704 Type *Ty = MemOps[0].getTypeForEVT(C);
5705 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5706
5707 // Don't promote to an alignment that would require dynamic stack
5708 // realignment.
5709 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
5710 if (!TRI->needsStackRealignment(MF))
5711 while (NewAlign > Align &&
5712 DL.exceedsNaturalStackAlignment(NewAlign))
5713 NewAlign /= 2;
5714
5715 if (NewAlign > Align) {
5716 // Give the stack frame object a larger alignment if needed.
5717 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5718 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5719 Align = NewAlign;
5720 }
5721 }
5722
5723 MachineMemOperand::Flags MMOFlags =
5724 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5725 SmallVector<SDValue, 16> OutLoadChains;
5726 SmallVector<SDValue, 16> OutStoreChains;
5727 SmallVector<SDValue, 32> OutChains;
5728 unsigned NumMemOps = MemOps.size();
5729 uint64_t SrcOff = 0, DstOff = 0;
5730 for (unsigned i = 0; i != NumMemOps; ++i) {
5731 EVT VT = MemOps[i];
5732 unsigned VTSize = VT.getSizeInBits() / 8;
5733 SDValue Value, Store;
5734
5735 if (VTSize > Size) {
5736 // Issuing an unaligned load / store pair that overlaps with the previous
5737 // pair. Adjust the offset accordingly.
5738 assert(i == NumMemOps-1 && i != 0);
5739 SrcOff -= VTSize - Size;
5740 DstOff -= VTSize - Size;
5741 }
5742
5743 if (CopyFromConstant &&
5744 (isZeroConstant || (VT.isInteger() && !VT.isVector()))) {
5745 // It's unlikely a store of a vector immediate can be done in a single
5746 // instruction. It would require a load from a constantpool first.
5747 // We only handle zero vectors here.
5748 // FIXME: Handle other cases where store of vector immediate is done in
5749 // a single instruction.
5750 ConstantDataArraySlice SubSlice;
5751 if (SrcOff < Slice.Length) {
5752 SubSlice = Slice;
5753 SubSlice.move(SrcOff);
5754 } else {
5755 // This is an out-of-bounds access and hence UB. Pretend we read zero.
5756 SubSlice.Array = nullptr;
5757 SubSlice.Offset = 0;
5758 SubSlice.Length = VTSize;
5759 }
5760 Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice);
5761 if (Value.getNode()) {
5762 Store = DAG.getStore(Chain, dl, Value,
5763 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5764 DstPtrInfo.getWithOffset(DstOff), Align,
5765 MMOFlags);
5766 OutChains.push_back(Store);
5767 }
5768 }
5769
5770 if (!Store.getNode()) {
5771 // The type might not be legal for the target. This should only happen
5772 // if the type is smaller than a legal type, as on PPC, so the right
5773 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
5774 // to Load/Store if NVT==VT.
5775 // FIXME does the case above also need this?
5776 EVT NVT = TLI.getTypeToTransformTo(C, VT);
5777 assert(NVT.bitsGE(VT));
5778
5779 bool isDereferenceable =
5780 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
5781 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
5782 if (isDereferenceable)
5783 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
5784
5785 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
5786 DAG.getMemBasePlusOffset(Src, SrcOff, dl),
5787 SrcPtrInfo.getWithOffset(SrcOff), VT,
5788 MinAlign(SrcAlign, SrcOff), SrcMMOFlags);
5789 OutLoadChains.push_back(Value.getValue(1));
5790
5791 Store = DAG.getTruncStore(
5792 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5793 DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags);
5794 OutStoreChains.push_back(Store);
5795 }
5796 SrcOff += VTSize;
5797 DstOff += VTSize;
5798 Size -= VTSize;
5799 }
5800
5801 unsigned GluedLdStLimit = MaxLdStGlue == 0 ?
5802 TLI.getMaxGluedStoresPerMemcpy() : MaxLdStGlue;
5803 unsigned NumLdStInMemcpy = OutStoreChains.size();
5804
5805 if (NumLdStInMemcpy) {
5806 // It may be that memcpy might be converted to memset if it's memcpy
5807 // of constants. In such a case, we won't have loads and stores, but
5808 // just stores. In the absence of loads, there is nothing to gang up.
5809 if ((GluedLdStLimit <= 1) || !EnableMemCpyDAGOpt) {
5810 // If target does not care, just leave as it.
5811 for (unsigned i = 0; i < NumLdStInMemcpy; ++i) {
5812 OutChains.push_back(OutLoadChains[i]);
5813 OutChains.push_back(OutStoreChains[i]);
5814 }
5815 } else {
5816 // Ld/St less than/equal limit set by target.
5817 if (NumLdStInMemcpy <= GluedLdStLimit) {
5818 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
5819 NumLdStInMemcpy, OutLoadChains,
5820 OutStoreChains);
5821 } else {
5822 unsigned NumberLdChain = NumLdStInMemcpy / GluedLdStLimit;
5823 unsigned RemainingLdStInMemcpy = NumLdStInMemcpy % GluedLdStLimit;
5824 unsigned GlueIter = 0;
5825
5826 for (unsigned cnt = 0; cnt < NumberLdChain; ++cnt) {
5827 unsigned IndexFrom = NumLdStInMemcpy - GlueIter - GluedLdStLimit;
5828 unsigned IndexTo = NumLdStInMemcpy - GlueIter;
5829
5830 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, IndexFrom, IndexTo,
5831 OutLoadChains, OutStoreChains);
5832 GlueIter += GluedLdStLimit;
5833 }
5834
5835 // Residual ld/st.
5836 if (RemainingLdStInMemcpy) {
5837 chainLoadsAndStoresForMemcpy(DAG, dl, OutChains, 0,
5838 RemainingLdStInMemcpy, OutLoadChains,
5839 OutStoreChains);
5840 }
5841 }
5842 }
5843 }
5844 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
5845 }
5846
getMemmoveLoadsAndStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,bool AlwaysInline,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)5847 static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
5848 SDValue Chain, SDValue Dst, SDValue Src,
5849 uint64_t Size, unsigned Align,
5850 bool isVol, bool AlwaysInline,
5851 MachinePointerInfo DstPtrInfo,
5852 MachinePointerInfo SrcPtrInfo) {
5853 // Turn a memmove of undef to nop.
5854 if (Src.isUndef())
5855 return Chain;
5856
5857 // Expand memmove to a series of load and store ops if the size operand falls
5858 // below a certain threshold.
5859 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5860 const DataLayout &DL = DAG.getDataLayout();
5861 LLVMContext &C = *DAG.getContext();
5862 std::vector<EVT> MemOps;
5863 bool DstAlignCanChange = false;
5864 MachineFunction &MF = DAG.getMachineFunction();
5865 MachineFrameInfo &MFI = MF.getFrameInfo();
5866 bool OptSize = shouldLowerMemFuncForSize(MF);
5867 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5868 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5869 DstAlignCanChange = true;
5870 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
5871 if (Align > SrcAlign)
5872 SrcAlign = Align;
5873 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
5874
5875 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
5876 (DstAlignCanChange ? 0 : Align), SrcAlign,
5877 false, false, false, false,
5878 DstPtrInfo.getAddrSpace(),
5879 SrcPtrInfo.getAddrSpace(),
5880 DAG, TLI))
5881 return SDValue();
5882
5883 if (DstAlignCanChange) {
5884 Type *Ty = MemOps[0].getTypeForEVT(C);
5885 unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty);
5886 if (NewAlign > Align) {
5887 // Give the stack frame object a larger alignment if needed.
5888 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5889 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5890 Align = NewAlign;
5891 }
5892 }
5893
5894 MachineMemOperand::Flags MMOFlags =
5895 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
5896 uint64_t SrcOff = 0, DstOff = 0;
5897 SmallVector<SDValue, 8> LoadValues;
5898 SmallVector<SDValue, 8> LoadChains;
5899 SmallVector<SDValue, 8> OutChains;
5900 unsigned NumMemOps = MemOps.size();
5901 for (unsigned i = 0; i < NumMemOps; i++) {
5902 EVT VT = MemOps[i];
5903 unsigned VTSize = VT.getSizeInBits() / 8;
5904 SDValue Value;
5905
5906 bool isDereferenceable =
5907 SrcPtrInfo.getWithOffset(SrcOff).isDereferenceable(VTSize, C, DL);
5908 MachineMemOperand::Flags SrcMMOFlags = MMOFlags;
5909 if (isDereferenceable)
5910 SrcMMOFlags |= MachineMemOperand::MODereferenceable;
5911
5912 Value =
5913 DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
5914 SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags);
5915 LoadValues.push_back(Value);
5916 LoadChains.push_back(Value.getValue(1));
5917 SrcOff += VTSize;
5918 }
5919 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
5920 OutChains.clear();
5921 for (unsigned i = 0; i < NumMemOps; i++) {
5922 EVT VT = MemOps[i];
5923 unsigned VTSize = VT.getSizeInBits() / 8;
5924 SDValue Store;
5925
5926 Store = DAG.getStore(Chain, dl, LoadValues[i],
5927 DAG.getMemBasePlusOffset(Dst, DstOff, dl),
5928 DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags);
5929 OutChains.push_back(Store);
5930 DstOff += VTSize;
5931 }
5932
5933 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
5934 }
5935
5936 /// Lower the call to 'memset' intrinsic function into a series of store
5937 /// operations.
5938 ///
5939 /// \param DAG Selection DAG where lowered code is placed.
5940 /// \param dl Link to corresponding IR location.
5941 /// \param Chain Control flow dependency.
5942 /// \param Dst Pointer to destination memory location.
5943 /// \param Src Value of byte to write into the memory.
5944 /// \param Size Number of bytes to write.
5945 /// \param Align Alignment of the destination in bytes.
5946 /// \param isVol True if destination is volatile.
5947 /// \param DstPtrInfo IR information on the memory pointer.
5948 /// \returns New head in the control flow, if lowering was successful, empty
5949 /// SDValue otherwise.
5950 ///
5951 /// The function tries to replace 'llvm.memset' intrinsic with several store
5952 /// operations and value calculation code. This is usually profitable for small
5953 /// memory size.
getMemsetStores(SelectionDAG & DAG,const SDLoc & dl,SDValue Chain,SDValue Dst,SDValue Src,uint64_t Size,unsigned Align,bool isVol,MachinePointerInfo DstPtrInfo)5954 static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
5955 SDValue Chain, SDValue Dst, SDValue Src,
5956 uint64_t Size, unsigned Align, bool isVol,
5957 MachinePointerInfo DstPtrInfo) {
5958 // Turn a memset of undef to nop.
5959 if (Src.isUndef())
5960 return Chain;
5961
5962 // Expand memset to a series of load/store ops if the size operand
5963 // falls below a certain threshold.
5964 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5965 std::vector<EVT> MemOps;
5966 bool DstAlignCanChange = false;
5967 MachineFunction &MF = DAG.getMachineFunction();
5968 MachineFrameInfo &MFI = MF.getFrameInfo();
5969 bool OptSize = shouldLowerMemFuncForSize(MF);
5970 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
5971 if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
5972 DstAlignCanChange = true;
5973 bool IsZeroVal =
5974 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
5975 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
5976 Size, (DstAlignCanChange ? 0 : Align), 0,
5977 true, IsZeroVal, false, true,
5978 DstPtrInfo.getAddrSpace(), ~0u,
5979 DAG, TLI))
5980 return SDValue();
5981
5982 if (DstAlignCanChange) {
5983 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
5984 unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty);
5985 if (NewAlign > Align) {
5986 // Give the stack frame object a larger alignment if needed.
5987 if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign)
5988 MFI.setObjectAlignment(FI->getIndex(), NewAlign);
5989 Align = NewAlign;
5990 }
5991 }
5992
5993 SmallVector<SDValue, 8> OutChains;
5994 uint64_t DstOff = 0;
5995 unsigned NumMemOps = MemOps.size();
5996
5997 // Find the largest store and generate the bit pattern for it.
5998 EVT LargestVT = MemOps[0];
5999 for (unsigned i = 1; i < NumMemOps; i++)
6000 if (MemOps[i].bitsGT(LargestVT))
6001 LargestVT = MemOps[i];
6002 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
6003
6004 for (unsigned i = 0; i < NumMemOps; i++) {
6005 EVT VT = MemOps[i];
6006 unsigned VTSize = VT.getSizeInBits() / 8;
6007 if (VTSize > Size) {
6008 // Issuing an unaligned load / store pair that overlaps with the previous
6009 // pair. Adjust the offset accordingly.
6010 assert(i == NumMemOps-1 && i != 0);
6011 DstOff -= VTSize - Size;
6012 }
6013
6014 // If this store is smaller than the largest store see whether we can get
6015 // the smaller value for free with a truncate.
6016 SDValue Value = MemSetValue;
6017 if (VT.bitsLT(LargestVT)) {
6018 if (!LargestVT.isVector() && !VT.isVector() &&
6019 TLI.isTruncateFree(LargestVT, VT))
6020 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
6021 else
6022 Value = getMemsetValue(Src, VT, DAG, dl);
6023 }
6024 assert(Value.getValueType() == VT && "Value with wrong type.");
6025 SDValue Store = DAG.getStore(
6026 Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
6027 DstPtrInfo.getWithOffset(DstOff), Align,
6028 isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
6029 OutChains.push_back(Store);
6030 DstOff += VT.getSizeInBits() / 8;
6031 Size -= VTSize;
6032 }
6033
6034 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
6035 }
6036
checkAddrSpaceIsValidForLibcall(const TargetLowering * TLI,unsigned AS)6037 static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
6038 unsigned AS) {
6039 // Lowering memcpy / memset / memmove intrinsics to calls is only valid if all
6040 // pointer operands can be losslessly bitcasted to pointers of address space 0
6041 if (AS != 0 && !TLI->isNoopAddrSpaceCast(AS, 0)) {
6042 report_fatal_error("cannot lower memory intrinsic in address space " +
6043 Twine(AS));
6044 }
6045 }
6046
getMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool AlwaysInline,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6047 SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
6048 SDValue Src, SDValue Size, unsigned Align,
6049 bool isVol, bool AlwaysInline, bool isTailCall,
6050 MachinePointerInfo DstPtrInfo,
6051 MachinePointerInfo SrcPtrInfo) {
6052 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6053
6054 // Check to see if we should lower the memcpy to loads and stores first.
6055 // For cases within the target-specified limits, this is the best choice.
6056 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6057 if (ConstantSize) {
6058 // Memcpy with size zero? Just return the original chain.
6059 if (ConstantSize->isNullValue())
6060 return Chain;
6061
6062 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6063 ConstantSize->getZExtValue(),Align,
6064 isVol, false, DstPtrInfo, SrcPtrInfo);
6065 if (Result.getNode())
6066 return Result;
6067 }
6068
6069 // Then check to see if we should lower the memcpy with target-specific
6070 // code. If the target chooses to do this, this is the next best.
6071 if (TSI) {
6072 SDValue Result = TSI->EmitTargetCodeForMemcpy(
6073 *this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline,
6074 DstPtrInfo, SrcPtrInfo);
6075 if (Result.getNode())
6076 return Result;
6077 }
6078
6079 // If we really need inline code and the target declined to provide it,
6080 // use a (potentially long) sequence of loads and stores.
6081 if (AlwaysInline) {
6082 assert(ConstantSize && "AlwaysInline requires a constant size!");
6083 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
6084 ConstantSize->getZExtValue(), Align, isVol,
6085 true, DstPtrInfo, SrcPtrInfo);
6086 }
6087
6088 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6089 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6090
6091 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
6092 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
6093 // respect volatile, so they may do things like read or write memory
6094 // beyond the given memory regions. But fixing this isn't easy, and most
6095 // people don't care.
6096
6097 // Emit a library call.
6098 TargetLowering::ArgListTy Args;
6099 TargetLowering::ArgListEntry Entry;
6100 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6101 Entry.Node = Dst; Args.push_back(Entry);
6102 Entry.Node = Src; Args.push_back(Entry);
6103 Entry.Node = Size; Args.push_back(Entry);
6104 // FIXME: pass in SDLoc
6105 TargetLowering::CallLoweringInfo CLI(*this);
6106 CLI.setDebugLoc(dl)
6107 .setChain(Chain)
6108 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMCPY),
6109 Dst.getValueType().getTypeForEVT(*getContext()),
6110 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMCPY),
6111 TLI->getPointerTy(getDataLayout())),
6112 std::move(Args))
6113 .setDiscardResult()
6114 .setTailCall(isTailCall);
6115
6116 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6117 return CallResult.second;
6118 }
6119
getAtomicMemcpy(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6120 SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
6121 SDValue Dst, unsigned DstAlign,
6122 SDValue Src, unsigned SrcAlign,
6123 SDValue Size, Type *SizeTy,
6124 unsigned ElemSz, bool isTailCall,
6125 MachinePointerInfo DstPtrInfo,
6126 MachinePointerInfo SrcPtrInfo) {
6127 // Emit a library call.
6128 TargetLowering::ArgListTy Args;
6129 TargetLowering::ArgListEntry Entry;
6130 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6131 Entry.Node = Dst;
6132 Args.push_back(Entry);
6133
6134 Entry.Node = Src;
6135 Args.push_back(Entry);
6136
6137 Entry.Ty = SizeTy;
6138 Entry.Node = Size;
6139 Args.push_back(Entry);
6140
6141 RTLIB::Libcall LibraryCall =
6142 RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6143 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6144 report_fatal_error("Unsupported element size");
6145
6146 TargetLowering::CallLoweringInfo CLI(*this);
6147 CLI.setDebugLoc(dl)
6148 .setChain(Chain)
6149 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6150 Type::getVoidTy(*getContext()),
6151 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6152 TLI->getPointerTy(getDataLayout())),
6153 std::move(Args))
6154 .setDiscardResult()
6155 .setTailCall(isTailCall);
6156
6157 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6158 return CallResult.second;
6159 }
6160
getMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6161 SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
6162 SDValue Src, SDValue Size, unsigned Align,
6163 bool isVol, bool isTailCall,
6164 MachinePointerInfo DstPtrInfo,
6165 MachinePointerInfo SrcPtrInfo) {
6166 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6167
6168 // Check to see if we should lower the memmove to loads and stores first.
6169 // For cases within the target-specified limits, this is the best choice.
6170 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6171 if (ConstantSize) {
6172 // Memmove with size zero? Just return the original chain.
6173 if (ConstantSize->isNullValue())
6174 return Chain;
6175
6176 SDValue Result =
6177 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
6178 ConstantSize->getZExtValue(), Align, isVol,
6179 false, DstPtrInfo, SrcPtrInfo);
6180 if (Result.getNode())
6181 return Result;
6182 }
6183
6184 // Then check to see if we should lower the memmove with target-specific
6185 // code. If the target chooses to do this, this is the next best.
6186 if (TSI) {
6187 SDValue Result = TSI->EmitTargetCodeForMemmove(
6188 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo);
6189 if (Result.getNode())
6190 return Result;
6191 }
6192
6193 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6194 checkAddrSpaceIsValidForLibcall(TLI, SrcPtrInfo.getAddrSpace());
6195
6196 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
6197 // not be safe. See memcpy above for more details.
6198
6199 // Emit a library call.
6200 TargetLowering::ArgListTy Args;
6201 TargetLowering::ArgListEntry Entry;
6202 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6203 Entry.Node = Dst; Args.push_back(Entry);
6204 Entry.Node = Src; Args.push_back(Entry);
6205 Entry.Node = Size; Args.push_back(Entry);
6206 // FIXME: pass in SDLoc
6207 TargetLowering::CallLoweringInfo CLI(*this);
6208 CLI.setDebugLoc(dl)
6209 .setChain(Chain)
6210 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMMOVE),
6211 Dst.getValueType().getTypeForEVT(*getContext()),
6212 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMMOVE),
6213 TLI->getPointerTy(getDataLayout())),
6214 std::move(Args))
6215 .setDiscardResult()
6216 .setTailCall(isTailCall);
6217
6218 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6219 return CallResult.second;
6220 }
6221
getAtomicMemmove(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Src,unsigned SrcAlign,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo,MachinePointerInfo SrcPtrInfo)6222 SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
6223 SDValue Dst, unsigned DstAlign,
6224 SDValue Src, unsigned SrcAlign,
6225 SDValue Size, Type *SizeTy,
6226 unsigned ElemSz, bool isTailCall,
6227 MachinePointerInfo DstPtrInfo,
6228 MachinePointerInfo SrcPtrInfo) {
6229 // Emit a library call.
6230 TargetLowering::ArgListTy Args;
6231 TargetLowering::ArgListEntry Entry;
6232 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6233 Entry.Node = Dst;
6234 Args.push_back(Entry);
6235
6236 Entry.Node = Src;
6237 Args.push_back(Entry);
6238
6239 Entry.Ty = SizeTy;
6240 Entry.Node = Size;
6241 Args.push_back(Entry);
6242
6243 RTLIB::Libcall LibraryCall =
6244 RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6245 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6246 report_fatal_error("Unsupported element size");
6247
6248 TargetLowering::CallLoweringInfo CLI(*this);
6249 CLI.setDebugLoc(dl)
6250 .setChain(Chain)
6251 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6252 Type::getVoidTy(*getContext()),
6253 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6254 TLI->getPointerTy(getDataLayout())),
6255 std::move(Args))
6256 .setDiscardResult()
6257 .setTailCall(isTailCall);
6258
6259 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6260 return CallResult.second;
6261 }
6262
getMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,SDValue Src,SDValue Size,unsigned Align,bool isVol,bool isTailCall,MachinePointerInfo DstPtrInfo)6263 SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
6264 SDValue Src, SDValue Size, unsigned Align,
6265 bool isVol, bool isTailCall,
6266 MachinePointerInfo DstPtrInfo) {
6267 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
6268
6269 // Check to see if we should lower the memset to stores first.
6270 // For cases within the target-specified limits, this is the best choice.
6271 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
6272 if (ConstantSize) {
6273 // Memset with size zero? Just return the original chain.
6274 if (ConstantSize->isNullValue())
6275 return Chain;
6276
6277 SDValue Result =
6278 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
6279 Align, isVol, DstPtrInfo);
6280
6281 if (Result.getNode())
6282 return Result;
6283 }
6284
6285 // Then check to see if we should lower the memset with target-specific
6286 // code. If the target chooses to do this, this is the next best.
6287 if (TSI) {
6288 SDValue Result = TSI->EmitTargetCodeForMemset(
6289 *this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo);
6290 if (Result.getNode())
6291 return Result;
6292 }
6293
6294 checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
6295
6296 // Emit a library call.
6297 Type *IntPtrTy = getDataLayout().getIntPtrType(*getContext());
6298 TargetLowering::ArgListTy Args;
6299 TargetLowering::ArgListEntry Entry;
6300 Entry.Node = Dst; Entry.Ty = IntPtrTy;
6301 Args.push_back(Entry);
6302 Entry.Node = Src;
6303 Entry.Ty = Src.getValueType().getTypeForEVT(*getContext());
6304 Args.push_back(Entry);
6305 Entry.Node = Size;
6306 Entry.Ty = IntPtrTy;
6307 Args.push_back(Entry);
6308
6309 // FIXME: pass in SDLoc
6310 TargetLowering::CallLoweringInfo CLI(*this);
6311 CLI.setDebugLoc(dl)
6312 .setChain(Chain)
6313 .setLibCallee(TLI->getLibcallCallingConv(RTLIB::MEMSET),
6314 Dst.getValueType().getTypeForEVT(*getContext()),
6315 getExternalSymbol(TLI->getLibcallName(RTLIB::MEMSET),
6316 TLI->getPointerTy(getDataLayout())),
6317 std::move(Args))
6318 .setDiscardResult()
6319 .setTailCall(isTailCall);
6320
6321 std::pair<SDValue,SDValue> CallResult = TLI->LowerCallTo(CLI);
6322 return CallResult.second;
6323 }
6324
getAtomicMemset(SDValue Chain,const SDLoc & dl,SDValue Dst,unsigned DstAlign,SDValue Value,SDValue Size,Type * SizeTy,unsigned ElemSz,bool isTailCall,MachinePointerInfo DstPtrInfo)6325 SDValue SelectionDAG::getAtomicMemset(SDValue Chain, const SDLoc &dl,
6326 SDValue Dst, unsigned DstAlign,
6327 SDValue Value, SDValue Size, Type *SizeTy,
6328 unsigned ElemSz, bool isTailCall,
6329 MachinePointerInfo DstPtrInfo) {
6330 // Emit a library call.
6331 TargetLowering::ArgListTy Args;
6332 TargetLowering::ArgListEntry Entry;
6333 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
6334 Entry.Node = Dst;
6335 Args.push_back(Entry);
6336
6337 Entry.Ty = Type::getInt8Ty(*getContext());
6338 Entry.Node = Value;
6339 Args.push_back(Entry);
6340
6341 Entry.Ty = SizeTy;
6342 Entry.Node = Size;
6343 Args.push_back(Entry);
6344
6345 RTLIB::Libcall LibraryCall =
6346 RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(ElemSz);
6347 if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
6348 report_fatal_error("Unsupported element size");
6349
6350 TargetLowering::CallLoweringInfo CLI(*this);
6351 CLI.setDebugLoc(dl)
6352 .setChain(Chain)
6353 .setLibCallee(TLI->getLibcallCallingConv(LibraryCall),
6354 Type::getVoidTy(*getContext()),
6355 getExternalSymbol(TLI->getLibcallName(LibraryCall),
6356 TLI->getPointerTy(getDataLayout())),
6357 std::move(Args))
6358 .setDiscardResult()
6359 .setTailCall(isTailCall);
6360
6361 std::pair<SDValue, SDValue> CallResult = TLI->LowerCallTo(CLI);
6362 return CallResult.second;
6363 }
6364
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTList,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)6365 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6366 SDVTList VTList, ArrayRef<SDValue> Ops,
6367 MachineMemOperand *MMO) {
6368 FoldingSetNodeID ID;
6369 ID.AddInteger(MemVT.getRawBits());
6370 AddNodeIDNode(ID, Opcode, VTList, Ops);
6371 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6372 void* IP = nullptr;
6373 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6374 cast<AtomicSDNode>(E)->refineAlignment(MMO);
6375 return SDValue(E, 0);
6376 }
6377
6378 auto *N = newSDNode<AtomicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6379 VTList, MemVT, MMO);
6380 createOperands(N, Ops);
6381
6382 CSEMap.InsertNode(N, IP);
6383 InsertNode(N);
6384 return SDValue(N, 0);
6385 }
6386
getAtomicCmpSwap(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachinePointerInfo PtrInfo,unsigned Alignment,AtomicOrdering SuccessOrdering,AtomicOrdering FailureOrdering,SyncScope::ID SSID)6387 SDValue SelectionDAG::getAtomicCmpSwap(
6388 unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain,
6389 SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
6390 unsigned Alignment, AtomicOrdering SuccessOrdering,
6391 AtomicOrdering FailureOrdering, SyncScope::ID SSID) {
6392 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
6393 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
6394 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
6395
6396 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6397 Alignment = getEVTAlignment(MemVT);
6398
6399 MachineFunction &MF = getMachineFunction();
6400
6401 // FIXME: Volatile isn't really correct; we should keep track of atomic
6402 // orderings in the memoperand.
6403 auto Flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad |
6404 MachineMemOperand::MOStore;
6405 MachineMemOperand *MMO =
6406 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
6407 AAMDNodes(), nullptr, SSID, SuccessOrdering,
6408 FailureOrdering);
6409
6410 return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO);
6411 }
6412
getAtomicCmpSwap(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDVTList VTs,SDValue Chain,SDValue Ptr,SDValue Cmp,SDValue Swp,MachineMemOperand * MMO)6413 SDValue SelectionDAG::getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl,
6414 EVT MemVT, SDVTList VTs, SDValue Chain,
6415 SDValue Ptr, SDValue Cmp, SDValue Swp,
6416 MachineMemOperand *MMO) {
6417 assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
6418 Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
6419 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
6420
6421 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
6422 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6423 }
6424
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,const Value * PtrVal,unsigned Alignment,AtomicOrdering Ordering,SyncScope::ID SSID)6425 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6426 SDValue Chain, SDValue Ptr, SDValue Val,
6427 const Value *PtrVal, unsigned Alignment,
6428 AtomicOrdering Ordering,
6429 SyncScope::ID SSID) {
6430 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6431 Alignment = getEVTAlignment(MemVT);
6432
6433 MachineFunction &MF = getMachineFunction();
6434 // An atomic store does not load. An atomic load does not store.
6435 // (An atomicrmw obviously both loads and stores.)
6436 // For now, atomics are considered to be volatile always, and they are
6437 // chained as such.
6438 // FIXME: Volatile isn't really correct; we should keep track of atomic
6439 // orderings in the memoperand.
6440 auto Flags = MachineMemOperand::MOVolatile;
6441 if (Opcode != ISD::ATOMIC_STORE)
6442 Flags |= MachineMemOperand::MOLoad;
6443 if (Opcode != ISD::ATOMIC_LOAD)
6444 Flags |= MachineMemOperand::MOStore;
6445
6446 MachineMemOperand *MMO =
6447 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
6448 MemVT.getStoreSize(), Alignment, AAMDNodes(),
6449 nullptr, SSID, Ordering);
6450
6451 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
6452 }
6453
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,SDValue Chain,SDValue Ptr,SDValue Val,MachineMemOperand * MMO)6454 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6455 SDValue Chain, SDValue Ptr, SDValue Val,
6456 MachineMemOperand *MMO) {
6457 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
6458 Opcode == ISD::ATOMIC_LOAD_SUB ||
6459 Opcode == ISD::ATOMIC_LOAD_AND ||
6460 Opcode == ISD::ATOMIC_LOAD_CLR ||
6461 Opcode == ISD::ATOMIC_LOAD_OR ||
6462 Opcode == ISD::ATOMIC_LOAD_XOR ||
6463 Opcode == ISD::ATOMIC_LOAD_NAND ||
6464 Opcode == ISD::ATOMIC_LOAD_MIN ||
6465 Opcode == ISD::ATOMIC_LOAD_MAX ||
6466 Opcode == ISD::ATOMIC_LOAD_UMIN ||
6467 Opcode == ISD::ATOMIC_LOAD_UMAX ||
6468 Opcode == ISD::ATOMIC_SWAP ||
6469 Opcode == ISD::ATOMIC_STORE) &&
6470 "Invalid Atomic Op");
6471
6472 EVT VT = Val.getValueType();
6473
6474 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
6475 getVTList(VT, MVT::Other);
6476 SDValue Ops[] = {Chain, Ptr, Val};
6477 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6478 }
6479
getAtomic(unsigned Opcode,const SDLoc & dl,EVT MemVT,EVT VT,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)6480 SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
6481 EVT VT, SDValue Chain, SDValue Ptr,
6482 MachineMemOperand *MMO) {
6483 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
6484
6485 SDVTList VTs = getVTList(VT, MVT::Other);
6486 SDValue Ops[] = {Chain, Ptr};
6487 return getAtomic(Opcode, dl, MemVT, VTs, Ops, MMO);
6488 }
6489
6490 /// getMergeValues - Create a MERGE_VALUES node from the given operands.
getMergeValues(ArrayRef<SDValue> Ops,const SDLoc & dl)6491 SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {
6492 if (Ops.size() == 1)
6493 return Ops[0];
6494
6495 SmallVector<EVT, 4> VTs;
6496 VTs.reserve(Ops.size());
6497 for (unsigned i = 0; i < Ops.size(); ++i)
6498 VTs.push_back(Ops[i].getValueType());
6499 return getNode(ISD::MERGE_VALUES, dl, getVTList(VTs), Ops);
6500 }
6501
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachinePointerInfo PtrInfo,unsigned Align,MachineMemOperand::Flags Flags,unsigned Size)6502 SDValue SelectionDAG::getMemIntrinsicNode(
6503 unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
6504 EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align,
6505 MachineMemOperand::Flags Flags, unsigned Size) {
6506 if (Align == 0) // Ensure that codegen never sees alignment 0
6507 Align = getEVTAlignment(MemVT);
6508
6509 if (!Size)
6510 Size = MemVT.getStoreSize();
6511
6512 MachineFunction &MF = getMachineFunction();
6513 MachineMemOperand *MMO =
6514 MF.getMachineMemOperand(PtrInfo, Flags, Size, Align);
6515
6516 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
6517 }
6518
getMemIntrinsicNode(unsigned Opcode,const SDLoc & dl,SDVTList VTList,ArrayRef<SDValue> Ops,EVT MemVT,MachineMemOperand * MMO)6519 SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
6520 SDVTList VTList,
6521 ArrayRef<SDValue> Ops, EVT MemVT,
6522 MachineMemOperand *MMO) {
6523 assert((Opcode == ISD::INTRINSIC_VOID ||
6524 Opcode == ISD::INTRINSIC_W_CHAIN ||
6525 Opcode == ISD::PREFETCH ||
6526 Opcode == ISD::LIFETIME_START ||
6527 Opcode == ISD::LIFETIME_END ||
6528 ((int)Opcode <= std::numeric_limits<int>::max() &&
6529 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
6530 "Opcode is not a memory-accessing opcode!");
6531
6532 // Memoize the node unless it returns a flag.
6533 MemIntrinsicSDNode *N;
6534 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
6535 FoldingSetNodeID ID;
6536 AddNodeIDNode(ID, Opcode, VTList, Ops);
6537 ID.AddInteger(getSyntheticNodeSubclassData<MemIntrinsicSDNode>(
6538 Opcode, dl.getIROrder(), VTList, MemVT, MMO));
6539 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6540 void *IP = nullptr;
6541 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6542 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
6543 return SDValue(E, 0);
6544 }
6545
6546 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6547 VTList, MemVT, MMO);
6548 createOperands(N, Ops);
6549
6550 CSEMap.InsertNode(N, IP);
6551 } else {
6552 N = newSDNode<MemIntrinsicSDNode>(Opcode, dl.getIROrder(), dl.getDebugLoc(),
6553 VTList, MemVT, MMO);
6554 createOperands(N, Ops);
6555 }
6556 InsertNode(N);
6557 return SDValue(N, 0);
6558 }
6559
6560 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6561 /// MachinePointerInfo record from it. This is particularly useful because the
6562 /// code generator has many cases where it doesn't bother passing in a
6563 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,int64_t Offset=0)6564 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6565 SelectionDAG &DAG, SDValue Ptr,
6566 int64_t Offset = 0) {
6567 // If this is FI+Offset, we can model it.
6568 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
6569 return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
6570 FI->getIndex(), Offset);
6571
6572 // If this is (FI+Offset1)+Offset2, we can model it.
6573 if (Ptr.getOpcode() != ISD::ADD ||
6574 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
6575 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
6576 return Info;
6577
6578 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6579 return MachinePointerInfo::getFixedStack(
6580 DAG.getMachineFunction(), FI,
6581 Offset + cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
6582 }
6583
6584 /// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
6585 /// MachinePointerInfo record from it. This is particularly useful because the
6586 /// code generator has many cases where it doesn't bother passing in a
6587 /// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
InferPointerInfo(const MachinePointerInfo & Info,SelectionDAG & DAG,SDValue Ptr,SDValue OffsetOp)6588 static MachinePointerInfo InferPointerInfo(const MachinePointerInfo &Info,
6589 SelectionDAG &DAG, SDValue Ptr,
6590 SDValue OffsetOp) {
6591 // If the 'Offset' value isn't a constant, we can't handle this.
6592 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
6593 return InferPointerInfo(Info, DAG, Ptr, OffsetNode->getSExtValue());
6594 if (OffsetOp.isUndef())
6595 return InferPointerInfo(Info, DAG, Ptr);
6596 return Info;
6597 }
6598
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,MachinePointerInfo PtrInfo,EVT MemVT,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)6599 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6600 EVT VT, const SDLoc &dl, SDValue Chain,
6601 SDValue Ptr, SDValue Offset,
6602 MachinePointerInfo PtrInfo, EVT MemVT,
6603 unsigned Alignment,
6604 MachineMemOperand::Flags MMOFlags,
6605 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6606 assert(Chain.getValueType() == MVT::Other &&
6607 "Invalid chain type");
6608 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6609 Alignment = getEVTAlignment(MemVT);
6610
6611 MMOFlags |= MachineMemOperand::MOLoad;
6612 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
6613 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
6614 // clients.
6615 if (PtrInfo.V.isNull())
6616 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr, Offset);
6617
6618 MachineFunction &MF = getMachineFunction();
6619 MachineMemOperand *MMO = MF.getMachineMemOperand(
6620 PtrInfo, MMOFlags, MemVT.getStoreSize(), Alignment, AAInfo, Ranges);
6621 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
6622 }
6623
getLoad(ISD::MemIndexedMode AM,ISD::LoadExtType ExtType,EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Offset,EVT MemVT,MachineMemOperand * MMO)6624 SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
6625 EVT VT, const SDLoc &dl, SDValue Chain,
6626 SDValue Ptr, SDValue Offset, EVT MemVT,
6627 MachineMemOperand *MMO) {
6628 if (VT == MemVT) {
6629 ExtType = ISD::NON_EXTLOAD;
6630 } else if (ExtType == ISD::NON_EXTLOAD) {
6631 assert(VT == MemVT && "Non-extending load from different memory type!");
6632 } else {
6633 // Extending load.
6634 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
6635 "Should only be an extending load, not truncating!");
6636 assert(VT.isInteger() == MemVT.isInteger() &&
6637 "Cannot convert from FP to Int or Int -> FP!");
6638 assert(VT.isVector() == MemVT.isVector() &&
6639 "Cannot use an ext load to convert to or from a vector!");
6640 assert((!VT.isVector() ||
6641 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
6642 "Cannot use an ext load to change the number of vector elements!");
6643 }
6644
6645 bool Indexed = AM != ISD::UNINDEXED;
6646 assert((Indexed || Offset.isUndef()) && "Unindexed load with an offset!");
6647
6648 SDVTList VTs = Indexed ?
6649 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
6650 SDValue Ops[] = { Chain, Ptr, Offset };
6651 FoldingSetNodeID ID;
6652 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops);
6653 ID.AddInteger(MemVT.getRawBits());
6654 ID.AddInteger(getSyntheticNodeSubclassData<LoadSDNode>(
6655 dl.getIROrder(), VTs, AM, ExtType, MemVT, MMO));
6656 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6657 void *IP = nullptr;
6658 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6659 cast<LoadSDNode>(E)->refineAlignment(MMO);
6660 return SDValue(E, 0);
6661 }
6662 auto *N = newSDNode<LoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
6663 ExtType, MemVT, MMO);
6664 createOperands(N, Ops);
6665
6666 CSEMap.InsertNode(N, IP);
6667 InsertNode(N);
6668 SDValue V(N, 0);
6669 NewSDValueDbgMsg(V, "Creating new node: ", this);
6670 return V;
6671 }
6672
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo,const MDNode * Ranges)6673 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6674 SDValue Ptr, MachinePointerInfo PtrInfo,
6675 unsigned Alignment,
6676 MachineMemOperand::Flags MMOFlags,
6677 const AAMDNodes &AAInfo, const MDNode *Ranges) {
6678 SDValue Undef = getUNDEF(Ptr.getValueType());
6679 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6680 PtrInfo, VT, Alignment, MMOFlags, AAInfo, Ranges);
6681 }
6682
getLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,MachineMemOperand * MMO)6683 SDValue SelectionDAG::getLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6684 SDValue Ptr, MachineMemOperand *MMO) {
6685 SDValue Undef = getUNDEF(Ptr.getValueType());
6686 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
6687 VT, MMO);
6688 }
6689
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,MachinePointerInfo PtrInfo,EVT MemVT,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)6690 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6691 EVT VT, SDValue Chain, SDValue Ptr,
6692 MachinePointerInfo PtrInfo, EVT MemVT,
6693 unsigned Alignment,
6694 MachineMemOperand::Flags MMOFlags,
6695 const AAMDNodes &AAInfo) {
6696 SDValue Undef = getUNDEF(Ptr.getValueType());
6697 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef, PtrInfo,
6698 MemVT, Alignment, MMOFlags, AAInfo);
6699 }
6700
getExtLoad(ISD::LoadExtType ExtType,const SDLoc & dl,EVT VT,SDValue Chain,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO)6701 SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl,
6702 EVT VT, SDValue Chain, SDValue Ptr, EVT MemVT,
6703 MachineMemOperand *MMO) {
6704 SDValue Undef = getUNDEF(Ptr.getValueType());
6705 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
6706 MemVT, MMO);
6707 }
6708
getIndexedLoad(SDValue OrigLoad,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)6709 SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl,
6710 SDValue Base, SDValue Offset,
6711 ISD::MemIndexedMode AM) {
6712 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
6713 assert(LD->getOffset().isUndef() && "Load is already a indexed load!");
6714 // Don't propagate the invariant or dereferenceable flags.
6715 auto MMOFlags =
6716 LD->getMemOperand()->getFlags() &
6717 ~(MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable);
6718 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
6719 LD->getChain(), Base, Offset, LD->getPointerInfo(),
6720 LD->getMemoryVT(), LD->getAlignment(), MMOFlags,
6721 LD->getAAInfo());
6722 }
6723
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)6724 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6725 SDValue Ptr, MachinePointerInfo PtrInfo,
6726 unsigned Alignment,
6727 MachineMemOperand::Flags MMOFlags,
6728 const AAMDNodes &AAInfo) {
6729 assert(Chain.getValueType() == MVT::Other && "Invalid chain type");
6730 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6731 Alignment = getEVTAlignment(Val.getValueType());
6732
6733 MMOFlags |= MachineMemOperand::MOStore;
6734 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6735
6736 if (PtrInfo.V.isNull())
6737 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6738
6739 MachineFunction &MF = getMachineFunction();
6740 MachineMemOperand *MMO = MF.getMachineMemOperand(
6741 PtrInfo, MMOFlags, Val.getValueType().getStoreSize(), Alignment, AAInfo);
6742 return getStore(Chain, dl, Val, Ptr, MMO);
6743 }
6744
getStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachineMemOperand * MMO)6745 SDValue SelectionDAG::getStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6746 SDValue Ptr, MachineMemOperand *MMO) {
6747 assert(Chain.getValueType() == MVT::Other &&
6748 "Invalid chain type");
6749 EVT VT = Val.getValueType();
6750 SDVTList VTs = getVTList(MVT::Other);
6751 SDValue Undef = getUNDEF(Ptr.getValueType());
6752 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6753 FoldingSetNodeID ID;
6754 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6755 ID.AddInteger(VT.getRawBits());
6756 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6757 dl.getIROrder(), VTs, ISD::UNINDEXED, false, VT, MMO));
6758 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6759 void *IP = nullptr;
6760 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6761 cast<StoreSDNode>(E)->refineAlignment(MMO);
6762 return SDValue(E, 0);
6763 }
6764 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6765 ISD::UNINDEXED, false, VT, MMO);
6766 createOperands(N, Ops);
6767
6768 CSEMap.InsertNode(N, IP);
6769 InsertNode(N);
6770 SDValue V(N, 0);
6771 NewSDValueDbgMsg(V, "Creating new node: ", this);
6772 return V;
6773 }
6774
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,MachinePointerInfo PtrInfo,EVT SVT,unsigned Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)6775 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6776 SDValue Ptr, MachinePointerInfo PtrInfo,
6777 EVT SVT, unsigned Alignment,
6778 MachineMemOperand::Flags MMOFlags,
6779 const AAMDNodes &AAInfo) {
6780 assert(Chain.getValueType() == MVT::Other &&
6781 "Invalid chain type");
6782 if (Alignment == 0) // Ensure that codegen never sees alignment 0
6783 Alignment = getEVTAlignment(SVT);
6784
6785 MMOFlags |= MachineMemOperand::MOStore;
6786 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
6787
6788 if (PtrInfo.V.isNull())
6789 PtrInfo = InferPointerInfo(PtrInfo, *this, Ptr);
6790
6791 MachineFunction &MF = getMachineFunction();
6792 MachineMemOperand *MMO = MF.getMachineMemOperand(
6793 PtrInfo, MMOFlags, SVT.getStoreSize(), Alignment, AAInfo);
6794 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
6795 }
6796
getTruncStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,EVT SVT,MachineMemOperand * MMO)6797 SDValue SelectionDAG::getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
6798 SDValue Ptr, EVT SVT,
6799 MachineMemOperand *MMO) {
6800 EVT VT = Val.getValueType();
6801
6802 assert(Chain.getValueType() == MVT::Other &&
6803 "Invalid chain type");
6804 if (VT == SVT)
6805 return getStore(Chain, dl, Val, Ptr, MMO);
6806
6807 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
6808 "Should only be a truncating store, not extending!");
6809 assert(VT.isInteger() == SVT.isInteger() &&
6810 "Can't do FP-INT conversion!");
6811 assert(VT.isVector() == SVT.isVector() &&
6812 "Cannot use trunc store to convert to or from a vector!");
6813 assert((!VT.isVector() ||
6814 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
6815 "Cannot use trunc store to change the number of vector elements!");
6816
6817 SDVTList VTs = getVTList(MVT::Other);
6818 SDValue Undef = getUNDEF(Ptr.getValueType());
6819 SDValue Ops[] = { Chain, Val, Ptr, Undef };
6820 FoldingSetNodeID ID;
6821 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6822 ID.AddInteger(SVT.getRawBits());
6823 ID.AddInteger(getSyntheticNodeSubclassData<StoreSDNode>(
6824 dl.getIROrder(), VTs, ISD::UNINDEXED, true, SVT, MMO));
6825 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6826 void *IP = nullptr;
6827 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6828 cast<StoreSDNode>(E)->refineAlignment(MMO);
6829 return SDValue(E, 0);
6830 }
6831 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6832 ISD::UNINDEXED, true, SVT, MMO);
6833 createOperands(N, Ops);
6834
6835 CSEMap.InsertNode(N, IP);
6836 InsertNode(N);
6837 SDValue V(N, 0);
6838 NewSDValueDbgMsg(V, "Creating new node: ", this);
6839 return V;
6840 }
6841
getIndexedStore(SDValue OrigStore,const SDLoc & dl,SDValue Base,SDValue Offset,ISD::MemIndexedMode AM)6842 SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl,
6843 SDValue Base, SDValue Offset,
6844 ISD::MemIndexedMode AM) {
6845 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
6846 assert(ST->getOffset().isUndef() && "Store is already a indexed store!");
6847 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
6848 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
6849 FoldingSetNodeID ID;
6850 AddNodeIDNode(ID, ISD::STORE, VTs, Ops);
6851 ID.AddInteger(ST->getMemoryVT().getRawBits());
6852 ID.AddInteger(ST->getRawSubclassData());
6853 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
6854 void *IP = nullptr;
6855 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP))
6856 return SDValue(E, 0);
6857
6858 auto *N = newSDNode<StoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs, AM,
6859 ST->isTruncatingStore(), ST->getMemoryVT(),
6860 ST->getMemOperand());
6861 createOperands(N, Ops);
6862
6863 CSEMap.InsertNode(N, IP);
6864 InsertNode(N);
6865 SDValue V(N, 0);
6866 NewSDValueDbgMsg(V, "Creating new node: ", this);
6867 return V;
6868 }
6869
getMaskedLoad(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue Mask,SDValue PassThru,EVT MemVT,MachineMemOperand * MMO,ISD::LoadExtType ExtTy,bool isExpanding)6870 SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
6871 SDValue Ptr, SDValue Mask, SDValue PassThru,
6872 EVT MemVT, MachineMemOperand *MMO,
6873 ISD::LoadExtType ExtTy, bool isExpanding) {
6874 SDVTList VTs = getVTList(VT, MVT::Other);
6875 SDValue Ops[] = { Chain, Ptr, Mask, PassThru };
6876 FoldingSetNodeID ID;
6877 AddNodeIDNode(ID, ISD::MLOAD, VTs, Ops);
6878 ID.AddInteger(VT.getRawBits());
6879 ID.AddInteger(getSyntheticNodeSubclassData<MaskedLoadSDNode>(
6880 dl.getIROrder(), VTs, ExtTy, isExpanding, MemVT, MMO));
6881 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6882 void *IP = nullptr;
6883 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6884 cast<MaskedLoadSDNode>(E)->refineAlignment(MMO);
6885 return SDValue(E, 0);
6886 }
6887 auto *N = newSDNode<MaskedLoadSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6888 ExtTy, isExpanding, MemVT, MMO);
6889 createOperands(N, Ops);
6890
6891 CSEMap.InsertNode(N, IP);
6892 InsertNode(N);
6893 SDValue V(N, 0);
6894 NewSDValueDbgMsg(V, "Creating new node: ", this);
6895 return V;
6896 }
6897
getMaskedStore(SDValue Chain,const SDLoc & dl,SDValue Val,SDValue Ptr,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,bool IsTruncating,bool IsCompressing)6898 SDValue SelectionDAG::getMaskedStore(SDValue Chain, const SDLoc &dl,
6899 SDValue Val, SDValue Ptr, SDValue Mask,
6900 EVT MemVT, MachineMemOperand *MMO,
6901 bool IsTruncating, bool IsCompressing) {
6902 assert(Chain.getValueType() == MVT::Other &&
6903 "Invalid chain type");
6904 EVT VT = Val.getValueType();
6905 SDVTList VTs = getVTList(MVT::Other);
6906 SDValue Ops[] = { Chain, Val, Ptr, Mask };
6907 FoldingSetNodeID ID;
6908 AddNodeIDNode(ID, ISD::MSTORE, VTs, Ops);
6909 ID.AddInteger(VT.getRawBits());
6910 ID.AddInteger(getSyntheticNodeSubclassData<MaskedStoreSDNode>(
6911 dl.getIROrder(), VTs, IsTruncating, IsCompressing, MemVT, MMO));
6912 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6913 void *IP = nullptr;
6914 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6915 cast<MaskedStoreSDNode>(E)->refineAlignment(MMO);
6916 return SDValue(E, 0);
6917 }
6918 auto *N = newSDNode<MaskedStoreSDNode>(dl.getIROrder(), dl.getDebugLoc(), VTs,
6919 IsTruncating, IsCompressing, MemVT, MMO);
6920 createOperands(N, Ops);
6921
6922 CSEMap.InsertNode(N, IP);
6923 InsertNode(N);
6924 SDValue V(N, 0);
6925 NewSDValueDbgMsg(V, "Creating new node: ", this);
6926 return V;
6927 }
6928
getMaskedGather(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)6929 SDValue SelectionDAG::getMaskedGather(SDVTList VTs, EVT VT, const SDLoc &dl,
6930 ArrayRef<SDValue> Ops,
6931 MachineMemOperand *MMO) {
6932 assert(Ops.size() == 6 && "Incompatible number of operands");
6933
6934 FoldingSetNodeID ID;
6935 AddNodeIDNode(ID, ISD::MGATHER, VTs, Ops);
6936 ID.AddInteger(VT.getRawBits());
6937 ID.AddInteger(getSyntheticNodeSubclassData<MaskedGatherSDNode>(
6938 dl.getIROrder(), VTs, VT, MMO));
6939 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6940 void *IP = nullptr;
6941 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6942 cast<MaskedGatherSDNode>(E)->refineAlignment(MMO);
6943 return SDValue(E, 0);
6944 }
6945
6946 auto *N = newSDNode<MaskedGatherSDNode>(dl.getIROrder(), dl.getDebugLoc(),
6947 VTs, VT, MMO);
6948 createOperands(N, Ops);
6949
6950 assert(N->getPassThru().getValueType() == N->getValueType(0) &&
6951 "Incompatible type of the PassThru value in MaskedGatherSDNode");
6952 assert(N->getMask().getValueType().getVectorNumElements() ==
6953 N->getValueType(0).getVectorNumElements() &&
6954 "Vector width mismatch between mask and data");
6955 assert(N->getIndex().getValueType().getVectorNumElements() >=
6956 N->getValueType(0).getVectorNumElements() &&
6957 "Vector width mismatch between index and data");
6958 assert(isa<ConstantSDNode>(N->getScale()) &&
6959 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
6960 "Scale should be a constant power of 2");
6961
6962 CSEMap.InsertNode(N, IP);
6963 InsertNode(N);
6964 SDValue V(N, 0);
6965 NewSDValueDbgMsg(V, "Creating new node: ", this);
6966 return V;
6967 }
6968
getMaskedScatter(SDVTList VTs,EVT VT,const SDLoc & dl,ArrayRef<SDValue> Ops,MachineMemOperand * MMO)6969 SDValue SelectionDAG::getMaskedScatter(SDVTList VTs, EVT VT, const SDLoc &dl,
6970 ArrayRef<SDValue> Ops,
6971 MachineMemOperand *MMO) {
6972 assert(Ops.size() == 6 && "Incompatible number of operands");
6973
6974 FoldingSetNodeID ID;
6975 AddNodeIDNode(ID, ISD::MSCATTER, VTs, Ops);
6976 ID.AddInteger(VT.getRawBits());
6977 ID.AddInteger(getSyntheticNodeSubclassData<MaskedScatterSDNode>(
6978 dl.getIROrder(), VTs, VT, MMO));
6979 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
6980 void *IP = nullptr;
6981 if (SDNode *E = FindNodeOrInsertPos(ID, dl, IP)) {
6982 cast<MaskedScatterSDNode>(E)->refineAlignment(MMO);
6983 return SDValue(E, 0);
6984 }
6985 auto *N = newSDNode<MaskedScatterSDNode>(dl.getIROrder(), dl.getDebugLoc(),
6986 VTs, VT, MMO);
6987 createOperands(N, Ops);
6988
6989 assert(N->getMask().getValueType().getVectorNumElements() ==
6990 N->getValue().getValueType().getVectorNumElements() &&
6991 "Vector width mismatch between mask and data");
6992 assert(N->getIndex().getValueType().getVectorNumElements() >=
6993 N->getValue().getValueType().getVectorNumElements() &&
6994 "Vector width mismatch between index and data");
6995 assert(isa<ConstantSDNode>(N->getScale()) &&
6996 cast<ConstantSDNode>(N->getScale())->getAPIntValue().isPowerOf2() &&
6997 "Scale should be a constant power of 2");
6998
6999 CSEMap.InsertNode(N, IP);
7000 InsertNode(N);
7001 SDValue V(N, 0);
7002 NewSDValueDbgMsg(V, "Creating new node: ", this);
7003 return V;
7004 }
7005
simplifySelect(SDValue Cond,SDValue T,SDValue F)7006 SDValue SelectionDAG::simplifySelect(SDValue Cond, SDValue T, SDValue F) {
7007 // select undef, T, F --> T (if T is a constant), otherwise F
7008 // select, ?, undef, F --> F
7009 // select, ?, T, undef --> T
7010 if (Cond.isUndef())
7011 return isConstantValueOfAnyType(T) ? T : F;
7012 if (T.isUndef())
7013 return F;
7014 if (F.isUndef())
7015 return T;
7016
7017 // select true, T, F --> T
7018 // select false, T, F --> F
7019 if (auto *CondC = dyn_cast<ConstantSDNode>(Cond))
7020 return CondC->isNullValue() ? F : T;
7021
7022 // TODO: This should simplify VSELECT with constant condition using something
7023 // like this (but check boolean contents to be complete?):
7024 // if (ISD::isBuildVectorAllOnes(Cond.getNode()))
7025 // return T;
7026 // if (ISD::isBuildVectorAllZeros(Cond.getNode()))
7027 // return F;
7028
7029 // select ?, T, T --> T
7030 if (T == F)
7031 return T;
7032
7033 return SDValue();
7034 }
7035
simplifyShift(SDValue X,SDValue Y)7036 SDValue SelectionDAG::simplifyShift(SDValue X, SDValue Y) {
7037 // shift undef, Y --> 0 (can always assume that the undef value is 0)
7038 if (X.isUndef())
7039 return getConstant(0, SDLoc(X.getNode()), X.getValueType());
7040 // shift X, undef --> undef (because it may shift by the bitwidth)
7041 if (Y.isUndef())
7042 return getUNDEF(X.getValueType());
7043
7044 // shift 0, Y --> 0
7045 // shift X, 0 --> X
7046 if (isNullOrNullSplat(X) || isNullOrNullSplat(Y))
7047 return X;
7048
7049 // shift X, C >= bitwidth(X) --> undef
7050 // All vector elements must be too big (or undef) to avoid partial undefs.
7051 auto isShiftTooBig = [X](ConstantSDNode *Val) {
7052 return !Val || Val->getAPIntValue().uge(X.getScalarValueSizeInBits());
7053 };
7054 if (ISD::matchUnaryPredicate(Y, isShiftTooBig, true))
7055 return getUNDEF(X.getValueType());
7056
7057 return SDValue();
7058 }
7059
getVAArg(EVT VT,const SDLoc & dl,SDValue Chain,SDValue Ptr,SDValue SV,unsigned Align)7060 SDValue SelectionDAG::getVAArg(EVT VT, const SDLoc &dl, SDValue Chain,
7061 SDValue Ptr, SDValue SV, unsigned Align) {
7062 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, dl, MVT::i32) };
7063 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops);
7064 }
7065
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDUse> Ops)7066 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7067 ArrayRef<SDUse> Ops) {
7068 switch (Ops.size()) {
7069 case 0: return getNode(Opcode, DL, VT);
7070 case 1: return getNode(Opcode, DL, VT, static_cast<const SDValue>(Ops[0]));
7071 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
7072 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
7073 default: break;
7074 }
7075
7076 // Copy from an SDUse array into an SDValue array for use with
7077 // the regular getNode logic.
7078 SmallVector<SDValue, 8> NewOps(Ops.begin(), Ops.end());
7079 return getNode(Opcode, DL, VT, NewOps);
7080 }
7081
getNode(unsigned Opcode,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)7082 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
7083 ArrayRef<SDValue> Ops, const SDNodeFlags Flags) {
7084 unsigned NumOps = Ops.size();
7085 switch (NumOps) {
7086 case 0: return getNode(Opcode, DL, VT);
7087 case 1: return getNode(Opcode, DL, VT, Ops[0], Flags);
7088 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Flags);
7089 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2], Flags);
7090 default: break;
7091 }
7092
7093 switch (Opcode) {
7094 default: break;
7095 case ISD::BUILD_VECTOR:
7096 // Attempt to simplify BUILD_VECTOR.
7097 if (SDValue V = FoldBUILD_VECTOR(DL, VT, Ops, *this))
7098 return V;
7099 break;
7100 case ISD::CONCAT_VECTORS:
7101 // Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
7102 if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
7103 return V;
7104 break;
7105 case ISD::SELECT_CC:
7106 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
7107 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
7108 "LHS and RHS of condition must have same type!");
7109 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7110 "True and False arms of SelectCC must have same type!");
7111 assert(Ops[2].getValueType() == VT &&
7112 "select_cc node must be of same type as true and false value!");
7113 break;
7114 case ISD::BR_CC:
7115 assert(NumOps == 5 && "BR_CC takes 5 operands!");
7116 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
7117 "LHS/RHS of comparison should match types!");
7118 break;
7119 }
7120
7121 // Memoize nodes.
7122 SDNode *N;
7123 SDVTList VTs = getVTList(VT);
7124
7125 if (VT != MVT::Glue) {
7126 FoldingSetNodeID ID;
7127 AddNodeIDNode(ID, Opcode, VTs, Ops);
7128 void *IP = nullptr;
7129
7130 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7131 return SDValue(E, 0);
7132
7133 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7134 createOperands(N, Ops);
7135
7136 CSEMap.InsertNode(N, IP);
7137 } else {
7138 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7139 createOperands(N, Ops);
7140 }
7141
7142 InsertNode(N);
7143 SDValue V(N, 0);
7144 NewSDValueDbgMsg(V, "Creating new node: ", this);
7145 return V;
7146 }
7147
getNode(unsigned Opcode,const SDLoc & DL,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)7148 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7149 ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops) {
7150 return getNode(Opcode, DL, getVTList(ResultTys), Ops);
7151 }
7152
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,ArrayRef<SDValue> Ops)7153 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7154 ArrayRef<SDValue> Ops) {
7155 if (VTList.NumVTs == 1)
7156 return getNode(Opcode, DL, VTList.VTs[0], Ops);
7157
7158 #if 0
7159 switch (Opcode) {
7160 // FIXME: figure out how to safely handle things like
7161 // int foo(int x) { return 1 << (x & 255); }
7162 // int bar() { return foo(256); }
7163 case ISD::SRA_PARTS:
7164 case ISD::SRL_PARTS:
7165 case ISD::SHL_PARTS:
7166 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
7167 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
7168 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7169 else if (N3.getOpcode() == ISD::AND)
7170 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
7171 // If the and is only masking out bits that cannot effect the shift,
7172 // eliminate the and.
7173 unsigned NumBits = VT.getScalarSizeInBits()*2;
7174 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
7175 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
7176 }
7177 break;
7178 }
7179 #endif
7180
7181 // Memoize the node unless it returns a flag.
7182 SDNode *N;
7183 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
7184 FoldingSetNodeID ID;
7185 AddNodeIDNode(ID, Opcode, VTList, Ops);
7186 void *IP = nullptr;
7187 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP))
7188 return SDValue(E, 0);
7189
7190 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7191 createOperands(N, Ops);
7192 CSEMap.InsertNode(N, IP);
7193 } else {
7194 N = newSDNode<SDNode>(Opcode, DL.getIROrder(), DL.getDebugLoc(), VTList);
7195 createOperands(N, Ops);
7196 }
7197 InsertNode(N);
7198 SDValue V(N, 0);
7199 NewSDValueDbgMsg(V, "Creating new node: ", this);
7200 return V;
7201 }
7202
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList)7203 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL,
7204 SDVTList VTList) {
7205 return getNode(Opcode, DL, VTList, None);
7206 }
7207
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1)7208 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7209 SDValue N1) {
7210 SDValue Ops[] = { N1 };
7211 return getNode(Opcode, DL, VTList, Ops);
7212 }
7213
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2)7214 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7215 SDValue N1, SDValue N2) {
7216 SDValue Ops[] = { N1, N2 };
7217 return getNode(Opcode, DL, VTList, Ops);
7218 }
7219
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3)7220 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7221 SDValue N1, SDValue N2, SDValue N3) {
7222 SDValue Ops[] = { N1, N2, N3 };
7223 return getNode(Opcode, DL, VTList, Ops);
7224 }
7225
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4)7226 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7227 SDValue N1, SDValue N2, SDValue N3, SDValue N4) {
7228 SDValue Ops[] = { N1, N2, N3, N4 };
7229 return getNode(Opcode, DL, VTList, Ops);
7230 }
7231
getNode(unsigned Opcode,const SDLoc & DL,SDVTList VTList,SDValue N1,SDValue N2,SDValue N3,SDValue N4,SDValue N5)7232 SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
7233 SDValue N1, SDValue N2, SDValue N3, SDValue N4,
7234 SDValue N5) {
7235 SDValue Ops[] = { N1, N2, N3, N4, N5 };
7236 return getNode(Opcode, DL, VTList, Ops);
7237 }
7238
getVTList(EVT VT)7239 SDVTList SelectionDAG::getVTList(EVT VT) {
7240 return makeVTList(SDNode::getValueTypeList(VT), 1);
7241 }
7242
getVTList(EVT VT1,EVT VT2)7243 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
7244 FoldingSetNodeID ID;
7245 ID.AddInteger(2U);
7246 ID.AddInteger(VT1.getRawBits());
7247 ID.AddInteger(VT2.getRawBits());
7248
7249 void *IP = nullptr;
7250 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7251 if (!Result) {
7252 EVT *Array = Allocator.Allocate<EVT>(2);
7253 Array[0] = VT1;
7254 Array[1] = VT2;
7255 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 2);
7256 VTListMap.InsertNode(Result, IP);
7257 }
7258 return Result->getSDVTList();
7259 }
7260
getVTList(EVT VT1,EVT VT2,EVT VT3)7261 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
7262 FoldingSetNodeID ID;
7263 ID.AddInteger(3U);
7264 ID.AddInteger(VT1.getRawBits());
7265 ID.AddInteger(VT2.getRawBits());
7266 ID.AddInteger(VT3.getRawBits());
7267
7268 void *IP = nullptr;
7269 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7270 if (!Result) {
7271 EVT *Array = Allocator.Allocate<EVT>(3);
7272 Array[0] = VT1;
7273 Array[1] = VT2;
7274 Array[2] = VT3;
7275 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 3);
7276 VTListMap.InsertNode(Result, IP);
7277 }
7278 return Result->getSDVTList();
7279 }
7280
getVTList(EVT VT1,EVT VT2,EVT VT3,EVT VT4)7281 SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
7282 FoldingSetNodeID ID;
7283 ID.AddInteger(4U);
7284 ID.AddInteger(VT1.getRawBits());
7285 ID.AddInteger(VT2.getRawBits());
7286 ID.AddInteger(VT3.getRawBits());
7287 ID.AddInteger(VT4.getRawBits());
7288
7289 void *IP = nullptr;
7290 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7291 if (!Result) {
7292 EVT *Array = Allocator.Allocate<EVT>(4);
7293 Array[0] = VT1;
7294 Array[1] = VT2;
7295 Array[2] = VT3;
7296 Array[3] = VT4;
7297 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, 4);
7298 VTListMap.InsertNode(Result, IP);
7299 }
7300 return Result->getSDVTList();
7301 }
7302
getVTList(ArrayRef<EVT> VTs)7303 SDVTList SelectionDAG::getVTList(ArrayRef<EVT> VTs) {
7304 unsigned NumVTs = VTs.size();
7305 FoldingSetNodeID ID;
7306 ID.AddInteger(NumVTs);
7307 for (unsigned index = 0; index < NumVTs; index++) {
7308 ID.AddInteger(VTs[index].getRawBits());
7309 }
7310
7311 void *IP = nullptr;
7312 SDVTListNode *Result = VTListMap.FindNodeOrInsertPos(ID, IP);
7313 if (!Result) {
7314 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
7315 llvm::copy(VTs, Array);
7316 Result = new (Allocator) SDVTListNode(ID.Intern(Allocator), Array, NumVTs);
7317 VTListMap.InsertNode(Result, IP);
7318 }
7319 return Result->getSDVTList();
7320 }
7321
7322
7323 /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
7324 /// specified operands. If the resultant node already exists in the DAG,
7325 /// this does not modify the specified node, instead it returns the node that
7326 /// already exists. If the resultant node does not exist in the DAG, the
7327 /// input node is returned. As a degenerate case, if you specify the same
7328 /// input operands as the node already has, the input node is returned.
UpdateNodeOperands(SDNode * N,SDValue Op)7329 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
7330 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
7331
7332 // Check to see if there is no change.
7333 if (Op == N->getOperand(0)) return N;
7334
7335 // See if the modified node already exists.
7336 void *InsertPos = nullptr;
7337 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
7338 return Existing;
7339
7340 // Nope it doesn't. Remove the node from its current place in the maps.
7341 if (InsertPos)
7342 if (!RemoveNodeFromCSEMaps(N))
7343 InsertPos = nullptr;
7344
7345 // Now we update the operands.
7346 N->OperandList[0].set(Op);
7347
7348 updateDivergence(N);
7349 // If this gets put into a CSE map, add it.
7350 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7351 return N;
7352 }
7353
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2)7354 SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
7355 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
7356
7357 // Check to see if there is no change.
7358 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
7359 return N; // No operands changed, just return the input node.
7360
7361 // See if the modified node already exists.
7362 void *InsertPos = nullptr;
7363 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
7364 return Existing;
7365
7366 // Nope it doesn't. Remove the node from its current place in the maps.
7367 if (InsertPos)
7368 if (!RemoveNodeFromCSEMaps(N))
7369 InsertPos = nullptr;
7370
7371 // Now we update the operands.
7372 if (N->OperandList[0] != Op1)
7373 N->OperandList[0].set(Op1);
7374 if (N->OperandList[1] != Op2)
7375 N->OperandList[1].set(Op2);
7376
7377 updateDivergence(N);
7378 // If this gets put into a CSE map, add it.
7379 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7380 return N;
7381 }
7382
7383 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3)7384 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
7385 SDValue Ops[] = { Op1, Op2, Op3 };
7386 return UpdateNodeOperands(N, Ops);
7387 }
7388
7389 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4)7390 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7391 SDValue Op3, SDValue Op4) {
7392 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
7393 return UpdateNodeOperands(N, Ops);
7394 }
7395
7396 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,SDValue Op1,SDValue Op2,SDValue Op3,SDValue Op4,SDValue Op5)7397 UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
7398 SDValue Op3, SDValue Op4, SDValue Op5) {
7399 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
7400 return UpdateNodeOperands(N, Ops);
7401 }
7402
7403 SDNode *SelectionDAG::
UpdateNodeOperands(SDNode * N,ArrayRef<SDValue> Ops)7404 UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops) {
7405 unsigned NumOps = Ops.size();
7406 assert(N->getNumOperands() == NumOps &&
7407 "Update with wrong number of operands");
7408
7409 // If no operands changed just return the input node.
7410 if (std::equal(Ops.begin(), Ops.end(), N->op_begin()))
7411 return N;
7412
7413 // See if the modified node already exists.
7414 void *InsertPos = nullptr;
7415 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, InsertPos))
7416 return Existing;
7417
7418 // Nope it doesn't. Remove the node from its current place in the maps.
7419 if (InsertPos)
7420 if (!RemoveNodeFromCSEMaps(N))
7421 InsertPos = nullptr;
7422
7423 // Now we update the operands.
7424 for (unsigned i = 0; i != NumOps; ++i)
7425 if (N->OperandList[i] != Ops[i])
7426 N->OperandList[i].set(Ops[i]);
7427
7428 updateDivergence(N);
7429 // If this gets put into a CSE map, add it.
7430 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
7431 return N;
7432 }
7433
7434 /// DropOperands - Release the operands and set this node to have
7435 /// zero operands.
DropOperands()7436 void SDNode::DropOperands() {
7437 // Unlike the code in MorphNodeTo that does this, we don't need to
7438 // watch for dead nodes here.
7439 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
7440 SDUse &Use = *I++;
7441 Use.set(SDValue());
7442 }
7443 }
7444
setNodeMemRefs(MachineSDNode * N,ArrayRef<MachineMemOperand * > NewMemRefs)7445 void SelectionDAG::setNodeMemRefs(MachineSDNode *N,
7446 ArrayRef<MachineMemOperand *> NewMemRefs) {
7447 if (NewMemRefs.empty()) {
7448 N->clearMemRefs();
7449 return;
7450 }
7451
7452 // Check if we can avoid allocating by storing a single reference directly.
7453 if (NewMemRefs.size() == 1) {
7454 N->MemRefs = NewMemRefs[0];
7455 N->NumMemRefs = 1;
7456 return;
7457 }
7458
7459 MachineMemOperand **MemRefsBuffer =
7460 Allocator.template Allocate<MachineMemOperand *>(NewMemRefs.size());
7461 llvm::copy(NewMemRefs, MemRefsBuffer);
7462 N->MemRefs = MemRefsBuffer;
7463 N->NumMemRefs = static_cast<int>(NewMemRefs.size());
7464 }
7465
7466 /// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
7467 /// machine opcode.
7468 ///
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT)7469 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7470 EVT VT) {
7471 SDVTList VTs = getVTList(VT);
7472 return SelectNodeTo(N, MachineOpc, VTs, None);
7473 }
7474
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1)7475 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7476 EVT VT, SDValue Op1) {
7477 SDVTList VTs = getVTList(VT);
7478 SDValue Ops[] = { Op1 };
7479 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7480 }
7481
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2)7482 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7483 EVT VT, SDValue Op1,
7484 SDValue Op2) {
7485 SDVTList VTs = getVTList(VT);
7486 SDValue Ops[] = { Op1, Op2 };
7487 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7488 }
7489
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)7490 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7491 EVT VT, SDValue Op1,
7492 SDValue Op2, SDValue Op3) {
7493 SDVTList VTs = getVTList(VT);
7494 SDValue Ops[] = { Op1, Op2, Op3 };
7495 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7496 }
7497
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT,ArrayRef<SDValue> Ops)7498 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7499 EVT VT, ArrayRef<SDValue> Ops) {
7500 SDVTList VTs = getVTList(VT);
7501 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7502 }
7503
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)7504 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7505 EVT VT1, EVT VT2, ArrayRef<SDValue> Ops) {
7506 SDVTList VTs = getVTList(VT1, VT2);
7507 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7508 }
7509
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2)7510 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7511 EVT VT1, EVT VT2) {
7512 SDVTList VTs = getVTList(VT1, VT2);
7513 return SelectNodeTo(N, MachineOpc, VTs, None);
7514 }
7515
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)7516 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7517 EVT VT1, EVT VT2, EVT VT3,
7518 ArrayRef<SDValue> Ops) {
7519 SDVTList VTs = getVTList(VT1, VT2, VT3);
7520 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7521 }
7522
SelectNodeTo(SDNode * N,unsigned MachineOpc,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)7523 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7524 EVT VT1, EVT VT2,
7525 SDValue Op1, SDValue Op2) {
7526 SDVTList VTs = getVTList(VT1, VT2);
7527 SDValue Ops[] = { Op1, Op2 };
7528 return SelectNodeTo(N, MachineOpc, VTs, Ops);
7529 }
7530
SelectNodeTo(SDNode * N,unsigned MachineOpc,SDVTList VTs,ArrayRef<SDValue> Ops)7531 SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
7532 SDVTList VTs,ArrayRef<SDValue> Ops) {
7533 SDNode *New = MorphNodeTo(N, ~MachineOpc, VTs, Ops);
7534 // Reset the NodeID to -1.
7535 New->setNodeId(-1);
7536 if (New != N) {
7537 ReplaceAllUsesWith(N, New);
7538 RemoveDeadNode(N);
7539 }
7540 return New;
7541 }
7542
7543 /// UpdateSDLocOnMergeSDNode - If the opt level is -O0 then it throws away
7544 /// the line number information on the merged node since it is not possible to
7545 /// preserve the information that operation is associated with multiple lines.
7546 /// This will make the debugger working better at -O0, were there is a higher
7547 /// probability having other instructions associated with that line.
7548 ///
7549 /// For IROrder, we keep the smaller of the two
UpdateSDLocOnMergeSDNode(SDNode * N,const SDLoc & OLoc)7550 SDNode *SelectionDAG::UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &OLoc) {
7551 DebugLoc NLoc = N->getDebugLoc();
7552 if (NLoc && OptLevel == CodeGenOpt::None && OLoc.getDebugLoc() != NLoc) {
7553 N->setDebugLoc(DebugLoc());
7554 }
7555 unsigned Order = std::min(N->getIROrder(), OLoc.getIROrder());
7556 N->setIROrder(Order);
7557 return N;
7558 }
7559
7560 /// MorphNodeTo - This *mutates* the specified node to have the specified
7561 /// return type, opcode, and operands.
7562 ///
7563 /// Note that MorphNodeTo returns the resultant node. If there is already a
7564 /// node of the specified opcode and operands, it returns that node instead of
7565 /// the current one. Note that the SDLoc need not be the same.
7566 ///
7567 /// Using MorphNodeTo is faster than creating a new node and swapping it in
7568 /// with ReplaceAllUsesWith both because it often avoids allocating a new
7569 /// node, and because it doesn't require CSE recalculation for any of
7570 /// the node's users.
7571 ///
7572 /// However, note that MorphNodeTo recursively deletes dead nodes from the DAG.
7573 /// As a consequence it isn't appropriate to use from within the DAG combiner or
7574 /// the legalizer which maintain worklists that would need to be updated when
7575 /// deleting things.
MorphNodeTo(SDNode * N,unsigned Opc,SDVTList VTs,ArrayRef<SDValue> Ops)7576 SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
7577 SDVTList VTs, ArrayRef<SDValue> Ops) {
7578 // If an identical node already exists, use it.
7579 void *IP = nullptr;
7580 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
7581 FoldingSetNodeID ID;
7582 AddNodeIDNode(ID, Opc, VTs, Ops);
7583 if (SDNode *ON = FindNodeOrInsertPos(ID, SDLoc(N), IP))
7584 return UpdateSDLocOnMergeSDNode(ON, SDLoc(N));
7585 }
7586
7587 if (!RemoveNodeFromCSEMaps(N))
7588 IP = nullptr;
7589
7590 // Start the morphing.
7591 N->NodeType = Opc;
7592 N->ValueList = VTs.VTs;
7593 N->NumValues = VTs.NumVTs;
7594
7595 // Clear the operands list, updating used nodes to remove this from their
7596 // use list. Keep track of any operands that become dead as a result.
7597 SmallPtrSet<SDNode*, 16> DeadNodeSet;
7598 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
7599 SDUse &Use = *I++;
7600 SDNode *Used = Use.getNode();
7601 Use.set(SDValue());
7602 if (Used->use_empty())
7603 DeadNodeSet.insert(Used);
7604 }
7605
7606 // For MachineNode, initialize the memory references information.
7607 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N))
7608 MN->clearMemRefs();
7609
7610 // Swap for an appropriately sized array from the recycler.
7611 removeOperands(N);
7612 createOperands(N, Ops);
7613
7614 // Delete any nodes that are still dead after adding the uses for the
7615 // new operands.
7616 if (!DeadNodeSet.empty()) {
7617 SmallVector<SDNode *, 16> DeadNodes;
7618 for (SDNode *N : DeadNodeSet)
7619 if (N->use_empty())
7620 DeadNodes.push_back(N);
7621 RemoveDeadNodes(DeadNodes);
7622 }
7623
7624 if (IP)
7625 CSEMap.InsertNode(N, IP); // Memoize the new node.
7626 return N;
7627 }
7628
mutateStrictFPToFP(SDNode * Node)7629 SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
7630 unsigned OrigOpc = Node->getOpcode();
7631 unsigned NewOpc;
7632 bool IsUnary = false;
7633 bool IsTernary = false;
7634 switch (OrigOpc) {
7635 default:
7636 llvm_unreachable("mutateStrictFPToFP called with unexpected opcode!");
7637 case ISD::STRICT_FADD: NewOpc = ISD::FADD; break;
7638 case ISD::STRICT_FSUB: NewOpc = ISD::FSUB; break;
7639 case ISD::STRICT_FMUL: NewOpc = ISD::FMUL; break;
7640 case ISD::STRICT_FDIV: NewOpc = ISD::FDIV; break;
7641 case ISD::STRICT_FREM: NewOpc = ISD::FREM; break;
7642 case ISD::STRICT_FMA: NewOpc = ISD::FMA; IsTernary = true; break;
7643 case ISD::STRICT_FSQRT: NewOpc = ISD::FSQRT; IsUnary = true; break;
7644 case ISD::STRICT_FPOW: NewOpc = ISD::FPOW; break;
7645 case ISD::STRICT_FPOWI: NewOpc = ISD::FPOWI; break;
7646 case ISD::STRICT_FSIN: NewOpc = ISD::FSIN; IsUnary = true; break;
7647 case ISD::STRICT_FCOS: NewOpc = ISD::FCOS; IsUnary = true; break;
7648 case ISD::STRICT_FEXP: NewOpc = ISD::FEXP; IsUnary = true; break;
7649 case ISD::STRICT_FEXP2: NewOpc = ISD::FEXP2; IsUnary = true; break;
7650 case ISD::STRICT_FLOG: NewOpc = ISD::FLOG; IsUnary = true; break;
7651 case ISD::STRICT_FLOG10: NewOpc = ISD::FLOG10; IsUnary = true; break;
7652 case ISD::STRICT_FLOG2: NewOpc = ISD::FLOG2; IsUnary = true; break;
7653 case ISD::STRICT_FRINT: NewOpc = ISD::FRINT; IsUnary = true; break;
7654 case ISD::STRICT_FNEARBYINT:
7655 NewOpc = ISD::FNEARBYINT;
7656 IsUnary = true;
7657 break;
7658 case ISD::STRICT_FMAXNUM: NewOpc = ISD::FMAXNUM; break;
7659 case ISD::STRICT_FMINNUM: NewOpc = ISD::FMINNUM; break;
7660 case ISD::STRICT_FCEIL: NewOpc = ISD::FCEIL; IsUnary = true; break;
7661 case ISD::STRICT_FFLOOR: NewOpc = ISD::FFLOOR; IsUnary = true; break;
7662 case ISD::STRICT_FROUND: NewOpc = ISD::FROUND; IsUnary = true; break;
7663 case ISD::STRICT_FTRUNC: NewOpc = ISD::FTRUNC; IsUnary = true; break;
7664 }
7665
7666 // We're taking this node out of the chain, so we need to re-link things.
7667 SDValue InputChain = Node->getOperand(0);
7668 SDValue OutputChain = SDValue(Node, 1);
7669 ReplaceAllUsesOfValueWith(OutputChain, InputChain);
7670
7671 SDVTList VTs = getVTList(Node->getOperand(1).getValueType());
7672 SDNode *Res = nullptr;
7673 if (IsUnary)
7674 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1) });
7675 else if (IsTernary)
7676 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1),
7677 Node->getOperand(2),
7678 Node->getOperand(3)});
7679 else
7680 Res = MorphNodeTo(Node, NewOpc, VTs, { Node->getOperand(1),
7681 Node->getOperand(2) });
7682
7683 // MorphNodeTo can operate in two ways: if an existing node with the
7684 // specified operands exists, it can just return it. Otherwise, it
7685 // updates the node in place to have the requested operands.
7686 if (Res == Node) {
7687 // If we updated the node in place, reset the node ID. To the isel,
7688 // this should be just like a newly allocated machine node.
7689 Res->setNodeId(-1);
7690 } else {
7691 ReplaceAllUsesWith(Node, Res);
7692 RemoveDeadNode(Node);
7693 }
7694
7695 return Res;
7696 }
7697
7698 /// getMachineNode - These are used for target selectors to create a new node
7699 /// with specified return type(s), MachineInstr opcode, and operands.
7700 ///
7701 /// Note that getMachineNode returns the resultant node. If there is already a
7702 /// node of the specified opcode and operands, it returns that node instead of
7703 /// the current one.
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT)7704 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7705 EVT VT) {
7706 SDVTList VTs = getVTList(VT);
7707 return getMachineNode(Opcode, dl, VTs, None);
7708 }
7709
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1)7710 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7711 EVT VT, SDValue Op1) {
7712 SDVTList VTs = getVTList(VT);
7713 SDValue Ops[] = { Op1 };
7714 return getMachineNode(Opcode, dl, VTs, Ops);
7715 }
7716
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2)7717 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7718 EVT VT, SDValue Op1, SDValue Op2) {
7719 SDVTList VTs = getVTList(VT);
7720 SDValue Ops[] = { Op1, Op2 };
7721 return getMachineNode(Opcode, dl, VTs, Ops);
7722 }
7723
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,SDValue Op1,SDValue Op2,SDValue Op3)7724 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7725 EVT VT, SDValue Op1, SDValue Op2,
7726 SDValue Op3) {
7727 SDVTList VTs = getVTList(VT);
7728 SDValue Ops[] = { Op1, Op2, Op3 };
7729 return getMachineNode(Opcode, dl, VTs, Ops);
7730 }
7731
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT,ArrayRef<SDValue> Ops)7732 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7733 EVT VT, ArrayRef<SDValue> Ops) {
7734 SDVTList VTs = getVTList(VT);
7735 return getMachineNode(Opcode, dl, VTs, Ops);
7736 }
7737
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2)7738 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7739 EVT VT1, EVT VT2, SDValue Op1,
7740 SDValue Op2) {
7741 SDVTList VTs = getVTList(VT1, VT2);
7742 SDValue Ops[] = { Op1, Op2 };
7743 return getMachineNode(Opcode, dl, VTs, Ops);
7744 }
7745
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,SDValue Op1,SDValue Op2,SDValue Op3)7746 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7747 EVT VT1, EVT VT2, SDValue Op1,
7748 SDValue Op2, SDValue Op3) {
7749 SDVTList VTs = getVTList(VT1, VT2);
7750 SDValue Ops[] = { Op1, Op2, Op3 };
7751 return getMachineNode(Opcode, dl, VTs, Ops);
7752 }
7753
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,ArrayRef<SDValue> Ops)7754 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7755 EVT VT1, EVT VT2,
7756 ArrayRef<SDValue> Ops) {
7757 SDVTList VTs = getVTList(VT1, VT2);
7758 return getMachineNode(Opcode, dl, VTs, Ops);
7759 }
7760
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2)7761 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7762 EVT VT1, EVT VT2, EVT VT3,
7763 SDValue Op1, SDValue Op2) {
7764 SDVTList VTs = getVTList(VT1, VT2, VT3);
7765 SDValue Ops[] = { Op1, Op2 };
7766 return getMachineNode(Opcode, dl, VTs, Ops);
7767 }
7768
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,SDValue Op1,SDValue Op2,SDValue Op3)7769 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7770 EVT VT1, EVT VT2, EVT VT3,
7771 SDValue Op1, SDValue Op2,
7772 SDValue Op3) {
7773 SDVTList VTs = getVTList(VT1, VT2, VT3);
7774 SDValue Ops[] = { Op1, Op2, Op3 };
7775 return getMachineNode(Opcode, dl, VTs, Ops);
7776 }
7777
getMachineNode(unsigned Opcode,const SDLoc & dl,EVT VT1,EVT VT2,EVT VT3,ArrayRef<SDValue> Ops)7778 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7779 EVT VT1, EVT VT2, EVT VT3,
7780 ArrayRef<SDValue> Ops) {
7781 SDVTList VTs = getVTList(VT1, VT2, VT3);
7782 return getMachineNode(Opcode, dl, VTs, Ops);
7783 }
7784
getMachineNode(unsigned Opcode,const SDLoc & dl,ArrayRef<EVT> ResultTys,ArrayRef<SDValue> Ops)7785 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &dl,
7786 ArrayRef<EVT> ResultTys,
7787 ArrayRef<SDValue> Ops) {
7788 SDVTList VTs = getVTList(ResultTys);
7789 return getMachineNode(Opcode, dl, VTs, Ops);
7790 }
7791
getMachineNode(unsigned Opcode,const SDLoc & DL,SDVTList VTs,ArrayRef<SDValue> Ops)7792 MachineSDNode *SelectionDAG::getMachineNode(unsigned Opcode, const SDLoc &DL,
7793 SDVTList VTs,
7794 ArrayRef<SDValue> Ops) {
7795 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
7796 MachineSDNode *N;
7797 void *IP = nullptr;
7798
7799 if (DoCSE) {
7800 FoldingSetNodeID ID;
7801 AddNodeIDNode(ID, ~Opcode, VTs, Ops);
7802 IP = nullptr;
7803 if (SDNode *E = FindNodeOrInsertPos(ID, DL, IP)) {
7804 return cast<MachineSDNode>(UpdateSDLocOnMergeSDNode(E, DL));
7805 }
7806 }
7807
7808 // Allocate a new MachineSDNode.
7809 N = newSDNode<MachineSDNode>(~Opcode, DL.getIROrder(), DL.getDebugLoc(), VTs);
7810 createOperands(N, Ops);
7811
7812 if (DoCSE)
7813 CSEMap.InsertNode(N, IP);
7814
7815 InsertNode(N);
7816 return N;
7817 }
7818
7819 /// getTargetExtractSubreg - A convenience function for creating
7820 /// TargetOpcode::EXTRACT_SUBREG nodes.
getTargetExtractSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand)7821 SDValue SelectionDAG::getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
7822 SDValue Operand) {
7823 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
7824 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
7825 VT, Operand, SRIdxVal);
7826 return SDValue(Subreg, 0);
7827 }
7828
7829 /// getTargetInsertSubreg - A convenience function for creating
7830 /// TargetOpcode::INSERT_SUBREG nodes.
getTargetInsertSubreg(int SRIdx,const SDLoc & DL,EVT VT,SDValue Operand,SDValue Subreg)7831 SDValue SelectionDAG::getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
7832 SDValue Operand, SDValue Subreg) {
7833 SDValue SRIdxVal = getTargetConstant(SRIdx, DL, MVT::i32);
7834 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
7835 VT, Operand, Subreg, SRIdxVal);
7836 return SDValue(Result, 0);
7837 }
7838
7839 /// getNodeIfExists - Get the specified node if it's already available, or
7840 /// else return NULL.
getNodeIfExists(unsigned Opcode,SDVTList VTList,ArrayRef<SDValue> Ops,const SDNodeFlags Flags)7841 SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
7842 ArrayRef<SDValue> Ops,
7843 const SDNodeFlags Flags) {
7844 if (VTList.VTs[VTList.NumVTs - 1] != MVT::Glue) {
7845 FoldingSetNodeID ID;
7846 AddNodeIDNode(ID, Opcode, VTList, Ops);
7847 void *IP = nullptr;
7848 if (SDNode *E = FindNodeOrInsertPos(ID, SDLoc(), IP)) {
7849 E->intersectFlagsWith(Flags);
7850 return E;
7851 }
7852 }
7853 return nullptr;
7854 }
7855
7856 /// getDbgValue - Creates a SDDbgValue node.
7857 ///
7858 /// SDNode
getDbgValue(DIVariable * Var,DIExpression * Expr,SDNode * N,unsigned R,bool IsIndirect,const DebugLoc & DL,unsigned O)7859 SDDbgValue *SelectionDAG::getDbgValue(DIVariable *Var, DIExpression *Expr,
7860 SDNode *N, unsigned R, bool IsIndirect,
7861 const DebugLoc &DL, unsigned O) {
7862 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7863 "Expected inlined-at fields to agree");
7864 return new (DbgInfo->getAlloc())
7865 SDDbgValue(Var, Expr, N, R, IsIndirect, DL, O);
7866 }
7867
7868 /// Constant
getConstantDbgValue(DIVariable * Var,DIExpression * Expr,const Value * C,const DebugLoc & DL,unsigned O)7869 SDDbgValue *SelectionDAG::getConstantDbgValue(DIVariable *Var,
7870 DIExpression *Expr,
7871 const Value *C,
7872 const DebugLoc &DL, unsigned O) {
7873 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7874 "Expected inlined-at fields to agree");
7875 return new (DbgInfo->getAlloc()) SDDbgValue(Var, Expr, C, DL, O);
7876 }
7877
7878 /// FrameIndex
getFrameIndexDbgValue(DIVariable * Var,DIExpression * Expr,unsigned FI,bool IsIndirect,const DebugLoc & DL,unsigned O)7879 SDDbgValue *SelectionDAG::getFrameIndexDbgValue(DIVariable *Var,
7880 DIExpression *Expr, unsigned FI,
7881 bool IsIndirect,
7882 const DebugLoc &DL,
7883 unsigned O) {
7884 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7885 "Expected inlined-at fields to agree");
7886 return new (DbgInfo->getAlloc())
7887 SDDbgValue(Var, Expr, FI, IsIndirect, DL, O, SDDbgValue::FRAMEIX);
7888 }
7889
7890 /// VReg
getVRegDbgValue(DIVariable * Var,DIExpression * Expr,unsigned VReg,bool IsIndirect,const DebugLoc & DL,unsigned O)7891 SDDbgValue *SelectionDAG::getVRegDbgValue(DIVariable *Var,
7892 DIExpression *Expr,
7893 unsigned VReg, bool IsIndirect,
7894 const DebugLoc &DL, unsigned O) {
7895 assert(cast<DILocalVariable>(Var)->isValidLocationForIntrinsic(DL) &&
7896 "Expected inlined-at fields to agree");
7897 return new (DbgInfo->getAlloc())
7898 SDDbgValue(Var, Expr, VReg, IsIndirect, DL, O, SDDbgValue::VREG);
7899 }
7900
transferDbgValues(SDValue From,SDValue To,unsigned OffsetInBits,unsigned SizeInBits,bool InvalidateDbg)7901 void SelectionDAG::transferDbgValues(SDValue From, SDValue To,
7902 unsigned OffsetInBits, unsigned SizeInBits,
7903 bool InvalidateDbg) {
7904 SDNode *FromNode = From.getNode();
7905 SDNode *ToNode = To.getNode();
7906 assert(FromNode && ToNode && "Can't modify dbg values");
7907
7908 // PR35338
7909 // TODO: assert(From != To && "Redundant dbg value transfer");
7910 // TODO: assert(FromNode != ToNode && "Intranode dbg value transfer");
7911 if (From == To || FromNode == ToNode)
7912 return;
7913
7914 if (!FromNode->getHasDebugValue())
7915 return;
7916
7917 SmallVector<SDDbgValue *, 2> ClonedDVs;
7918 for (SDDbgValue *Dbg : GetDbgValues(FromNode)) {
7919 if (Dbg->getKind() != SDDbgValue::SDNODE || Dbg->isInvalidated())
7920 continue;
7921
7922 // TODO: assert(!Dbg->isInvalidated() && "Transfer of invalid dbg value");
7923
7924 // Just transfer the dbg value attached to From.
7925 if (Dbg->getResNo() != From.getResNo())
7926 continue;
7927
7928 DIVariable *Var = Dbg->getVariable();
7929 auto *Expr = Dbg->getExpression();
7930 // If a fragment is requested, update the expression.
7931 if (SizeInBits) {
7932 // When splitting a larger (e.g., sign-extended) value whose
7933 // lower bits are described with an SDDbgValue, do not attempt
7934 // to transfer the SDDbgValue to the upper bits.
7935 if (auto FI = Expr->getFragmentInfo())
7936 if (OffsetInBits + SizeInBits > FI->SizeInBits)
7937 continue;
7938 auto Fragment = DIExpression::createFragmentExpression(Expr, OffsetInBits,
7939 SizeInBits);
7940 if (!Fragment)
7941 continue;
7942 Expr = *Fragment;
7943 }
7944 // Clone the SDDbgValue and move it to To.
7945 SDDbgValue *Clone =
7946 getDbgValue(Var, Expr, ToNode, To.getResNo(), Dbg->isIndirect(),
7947 Dbg->getDebugLoc(), Dbg->getOrder());
7948 ClonedDVs.push_back(Clone);
7949
7950 if (InvalidateDbg) {
7951 // Invalidate value and indicate the SDDbgValue should not be emitted.
7952 Dbg->setIsInvalidated();
7953 Dbg->setIsEmitted();
7954 }
7955 }
7956
7957 for (SDDbgValue *Dbg : ClonedDVs)
7958 AddDbgValue(Dbg, ToNode, false);
7959 }
7960
salvageDebugInfo(SDNode & N)7961 void SelectionDAG::salvageDebugInfo(SDNode &N) {
7962 if (!N.getHasDebugValue())
7963 return;
7964
7965 SmallVector<SDDbgValue *, 2> ClonedDVs;
7966 for (auto DV : GetDbgValues(&N)) {
7967 if (DV->isInvalidated())
7968 continue;
7969 switch (N.getOpcode()) {
7970 default:
7971 break;
7972 case ISD::ADD:
7973 SDValue N0 = N.getOperand(0);
7974 SDValue N1 = N.getOperand(1);
7975 if (!isConstantIntBuildVectorOrConstantInt(N0) &&
7976 isConstantIntBuildVectorOrConstantInt(N1)) {
7977 uint64_t Offset = N.getConstantOperandVal(1);
7978 // Rewrite an ADD constant node into a DIExpression. Since we are
7979 // performing arithmetic to compute the variable's *value* in the
7980 // DIExpression, we need to mark the expression with a
7981 // DW_OP_stack_value.
7982 auto *DIExpr = DV->getExpression();
7983 DIExpr = DIExpression::prepend(DIExpr, DIExpression::NoDeref, Offset,
7984 DIExpression::NoDeref,
7985 DIExpression::WithStackValue);
7986 SDDbgValue *Clone =
7987 getDbgValue(DV->getVariable(), DIExpr, N0.getNode(), N0.getResNo(),
7988 DV->isIndirect(), DV->getDebugLoc(), DV->getOrder());
7989 ClonedDVs.push_back(Clone);
7990 DV->setIsInvalidated();
7991 DV->setIsEmitted();
7992 LLVM_DEBUG(dbgs() << "SALVAGE: Rewriting";
7993 N0.getNode()->dumprFull(this);
7994 dbgs() << " into " << *DIExpr << '\n');
7995 }
7996 }
7997 }
7998
7999 for (SDDbgValue *Dbg : ClonedDVs)
8000 AddDbgValue(Dbg, Dbg->getSDNode(), false);
8001 }
8002
8003 /// Creates a SDDbgLabel node.
getDbgLabel(DILabel * Label,const DebugLoc & DL,unsigned O)8004 SDDbgLabel *SelectionDAG::getDbgLabel(DILabel *Label,
8005 const DebugLoc &DL, unsigned O) {
8006 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(DL) &&
8007 "Expected inlined-at fields to agree");
8008 return new (DbgInfo->getAlloc()) SDDbgLabel(Label, DL, O);
8009 }
8010
8011 namespace {
8012
8013 /// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
8014 /// pointed to by a use iterator is deleted, increment the use iterator
8015 /// so that it doesn't dangle.
8016 ///
8017 class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
8018 SDNode::use_iterator &UI;
8019 SDNode::use_iterator &UE;
8020
NodeDeleted(SDNode * N,SDNode * E)8021 void NodeDeleted(SDNode *N, SDNode *E) override {
8022 // Increment the iterator as needed.
8023 while (UI != UE && N == *UI)
8024 ++UI;
8025 }
8026
8027 public:
RAUWUpdateListener(SelectionDAG & d,SDNode::use_iterator & ui,SDNode::use_iterator & ue)8028 RAUWUpdateListener(SelectionDAG &d,
8029 SDNode::use_iterator &ui,
8030 SDNode::use_iterator &ue)
8031 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
8032 };
8033
8034 } // end anonymous namespace
8035
8036 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8037 /// This can cause recursive merging of nodes in the DAG.
8038 ///
8039 /// This version assumes From has a single result value.
8040 ///
ReplaceAllUsesWith(SDValue FromN,SDValue To)8041 void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
8042 SDNode *From = FromN.getNode();
8043 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
8044 "Cannot replace with this method!");
8045 assert(From != To.getNode() && "Cannot replace uses of with self");
8046
8047 // Preserve Debug Values
8048 transferDbgValues(FromN, To);
8049
8050 // Iterate over all the existing uses of From. New uses will be added
8051 // to the beginning of the use list, which we avoid visiting.
8052 // This specifically avoids visiting uses of From that arise while the
8053 // replacement is happening, because any such uses would be the result
8054 // of CSE: If an existing node looks like From after one of its operands
8055 // is replaced by To, we don't want to replace of all its users with To
8056 // too. See PR3018 for more info.
8057 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8058 RAUWUpdateListener Listener(*this, UI, UE);
8059 while (UI != UE) {
8060 SDNode *User = *UI;
8061
8062 // This node is about to morph, remove its old self from the CSE maps.
8063 RemoveNodeFromCSEMaps(User);
8064
8065 // A user can appear in a use list multiple times, and when this
8066 // happens the uses are usually next to each other in the list.
8067 // To help reduce the number of CSE recomputations, process all
8068 // the uses of this user that we can find this way.
8069 do {
8070 SDUse &Use = UI.getUse();
8071 ++UI;
8072 Use.set(To);
8073 if (To->isDivergent() != From->isDivergent())
8074 updateDivergence(User);
8075 } while (UI != UE && *UI == User);
8076 // Now that we have modified User, add it back to the CSE maps. If it
8077 // already exists there, recursively merge the results together.
8078 AddModifiedNodeToCSEMaps(User);
8079 }
8080
8081 // If we just RAUW'd the root, take note.
8082 if (FromN == getRoot())
8083 setRoot(To);
8084 }
8085
8086 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8087 /// This can cause recursive merging of nodes in the DAG.
8088 ///
8089 /// This version assumes that for each value of From, there is a
8090 /// corresponding value in To in the same position with the same type.
8091 ///
ReplaceAllUsesWith(SDNode * From,SDNode * To)8092 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
8093 #ifndef NDEBUG
8094 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8095 assert((!From->hasAnyUseOfValue(i) ||
8096 From->getValueType(i) == To->getValueType(i)) &&
8097 "Cannot use this version of ReplaceAllUsesWith!");
8098 #endif
8099
8100 // Handle the trivial case.
8101 if (From == To)
8102 return;
8103
8104 // Preserve Debug Info. Only do this if there's a use.
8105 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8106 if (From->hasAnyUseOfValue(i)) {
8107 assert((i < To->getNumValues()) && "Invalid To location");
8108 transferDbgValues(SDValue(From, i), SDValue(To, i));
8109 }
8110
8111 // Iterate over just the existing users of From. See the comments in
8112 // the ReplaceAllUsesWith above.
8113 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8114 RAUWUpdateListener Listener(*this, UI, UE);
8115 while (UI != UE) {
8116 SDNode *User = *UI;
8117
8118 // This node is about to morph, remove its old self from the CSE maps.
8119 RemoveNodeFromCSEMaps(User);
8120
8121 // A user can appear in a use list multiple times, and when this
8122 // happens the uses are usually next to each other in the list.
8123 // To help reduce the number of CSE recomputations, process all
8124 // the uses of this user that we can find this way.
8125 do {
8126 SDUse &Use = UI.getUse();
8127 ++UI;
8128 Use.setNode(To);
8129 if (To->isDivergent() != From->isDivergent())
8130 updateDivergence(User);
8131 } while (UI != UE && *UI == User);
8132
8133 // Now that we have modified User, add it back to the CSE maps. If it
8134 // already exists there, recursively merge the results together.
8135 AddModifiedNodeToCSEMaps(User);
8136 }
8137
8138 // If we just RAUW'd the root, take note.
8139 if (From == getRoot().getNode())
8140 setRoot(SDValue(To, getRoot().getResNo()));
8141 }
8142
8143 /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
8144 /// This can cause recursive merging of nodes in the DAG.
8145 ///
8146 /// This version can replace From with any result values. To must match the
8147 /// number and types of values returned by From.
ReplaceAllUsesWith(SDNode * From,const SDValue * To)8148 void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
8149 if (From->getNumValues() == 1) // Handle the simple case efficiently.
8150 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
8151
8152 // Preserve Debug Info.
8153 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
8154 transferDbgValues(SDValue(From, i), To[i]);
8155
8156 // Iterate over just the existing users of From. See the comments in
8157 // the ReplaceAllUsesWith above.
8158 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
8159 RAUWUpdateListener Listener(*this, UI, UE);
8160 while (UI != UE) {
8161 SDNode *User = *UI;
8162
8163 // This node is about to morph, remove its old self from the CSE maps.
8164 RemoveNodeFromCSEMaps(User);
8165
8166 // A user can appear in a use list multiple times, and when this happens the
8167 // uses are usually next to each other in the list. To help reduce the
8168 // number of CSE and divergence recomputations, process all the uses of this
8169 // user that we can find this way.
8170 bool To_IsDivergent = false;
8171 do {
8172 SDUse &Use = UI.getUse();
8173 const SDValue &ToOp = To[Use.getResNo()];
8174 ++UI;
8175 Use.set(ToOp);
8176 To_IsDivergent |= ToOp->isDivergent();
8177 } while (UI != UE && *UI == User);
8178
8179 if (To_IsDivergent != From->isDivergent())
8180 updateDivergence(User);
8181
8182 // Now that we have modified User, add it back to the CSE maps. If it
8183 // already exists there, recursively merge the results together.
8184 AddModifiedNodeToCSEMaps(User);
8185 }
8186
8187 // If we just RAUW'd the root, take note.
8188 if (From == getRoot().getNode())
8189 setRoot(SDValue(To[getRoot().getResNo()]));
8190 }
8191
8192 /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
8193 /// uses of other values produced by From.getNode() alone. The Deleted
8194 /// vector is handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValueWith(SDValue From,SDValue To)8195 void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
8196 // Handle the really simple, really trivial case efficiently.
8197 if (From == To) return;
8198
8199 // Handle the simple, trivial, case efficiently.
8200 if (From.getNode()->getNumValues() == 1) {
8201 ReplaceAllUsesWith(From, To);
8202 return;
8203 }
8204
8205 // Preserve Debug Info.
8206 transferDbgValues(From, To);
8207
8208 // Iterate over just the existing users of From. See the comments in
8209 // the ReplaceAllUsesWith above.
8210 SDNode::use_iterator UI = From.getNode()->use_begin(),
8211 UE = From.getNode()->use_end();
8212 RAUWUpdateListener Listener(*this, UI, UE);
8213 while (UI != UE) {
8214 SDNode *User = *UI;
8215 bool UserRemovedFromCSEMaps = false;
8216
8217 // A user can appear in a use list multiple times, and when this
8218 // happens the uses are usually next to each other in the list.
8219 // To help reduce the number of CSE recomputations, process all
8220 // the uses of this user that we can find this way.
8221 do {
8222 SDUse &Use = UI.getUse();
8223
8224 // Skip uses of different values from the same node.
8225 if (Use.getResNo() != From.getResNo()) {
8226 ++UI;
8227 continue;
8228 }
8229
8230 // If this node hasn't been modified yet, it's still in the CSE maps,
8231 // so remove its old self from the CSE maps.
8232 if (!UserRemovedFromCSEMaps) {
8233 RemoveNodeFromCSEMaps(User);
8234 UserRemovedFromCSEMaps = true;
8235 }
8236
8237 ++UI;
8238 Use.set(To);
8239 if (To->isDivergent() != From->isDivergent())
8240 updateDivergence(User);
8241 } while (UI != UE && *UI == User);
8242 // We are iterating over all uses of the From node, so if a use
8243 // doesn't use the specific value, no changes are made.
8244 if (!UserRemovedFromCSEMaps)
8245 continue;
8246
8247 // Now that we have modified User, add it back to the CSE maps. If it
8248 // already exists there, recursively merge the results together.
8249 AddModifiedNodeToCSEMaps(User);
8250 }
8251
8252 // If we just RAUW'd the root, take note.
8253 if (From == getRoot())
8254 setRoot(To);
8255 }
8256
8257 namespace {
8258
8259 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
8260 /// to record information about a use.
8261 struct UseMemo {
8262 SDNode *User;
8263 unsigned Index;
8264 SDUse *Use;
8265 };
8266
8267 /// operator< - Sort Memos by User.
operator <(const UseMemo & L,const UseMemo & R)8268 bool operator<(const UseMemo &L, const UseMemo &R) {
8269 return (intptr_t)L.User < (intptr_t)R.User;
8270 }
8271
8272 } // end anonymous namespace
8273
updateDivergence(SDNode * N)8274 void SelectionDAG::updateDivergence(SDNode * N)
8275 {
8276 if (TLI->isSDNodeAlwaysUniform(N))
8277 return;
8278 bool IsDivergent = TLI->isSDNodeSourceOfDivergence(N, FLI, DA);
8279 for (auto &Op : N->ops()) {
8280 if (Op.Val.getValueType() != MVT::Other)
8281 IsDivergent |= Op.getNode()->isDivergent();
8282 }
8283 if (N->SDNodeBits.IsDivergent != IsDivergent) {
8284 N->SDNodeBits.IsDivergent = IsDivergent;
8285 for (auto U : N->uses()) {
8286 updateDivergence(U);
8287 }
8288 }
8289 }
8290
8291
CreateTopologicalOrder(std::vector<SDNode * > & Order)8292 void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode*>& Order) {
8293 DenseMap<SDNode *, unsigned> Degree;
8294 Order.reserve(AllNodes.size());
8295 for (auto & N : allnodes()) {
8296 unsigned NOps = N.getNumOperands();
8297 Degree[&N] = NOps;
8298 if (0 == NOps)
8299 Order.push_back(&N);
8300 }
8301 for (std::vector<SDNode *>::iterator I = Order.begin();
8302 I!=Order.end();++I) {
8303 SDNode * N = *I;
8304 for (auto U : N->uses()) {
8305 unsigned &UnsortedOps = Degree[U];
8306 if (0 == --UnsortedOps)
8307 Order.push_back(U);
8308 }
8309 }
8310 }
8311
8312 #ifndef NDEBUG
VerifyDAGDiverence()8313 void SelectionDAG::VerifyDAGDiverence()
8314 {
8315 std::vector<SDNode*> TopoOrder;
8316 CreateTopologicalOrder(TopoOrder);
8317 const TargetLowering &TLI = getTargetLoweringInfo();
8318 DenseMap<const SDNode *, bool> DivergenceMap;
8319 for (auto &N : allnodes()) {
8320 DivergenceMap[&N] = false;
8321 }
8322 for (auto N : TopoOrder) {
8323 bool IsDivergent = DivergenceMap[N];
8324 bool IsSDNodeDivergent = TLI.isSDNodeSourceOfDivergence(N, FLI, DA);
8325 for (auto &Op : N->ops()) {
8326 if (Op.Val.getValueType() != MVT::Other)
8327 IsSDNodeDivergent |= DivergenceMap[Op.getNode()];
8328 }
8329 if (!IsDivergent && IsSDNodeDivergent && !TLI.isSDNodeAlwaysUniform(N)) {
8330 DivergenceMap[N] = true;
8331 }
8332 }
8333 for (auto &N : allnodes()) {
8334 (void)N;
8335 assert(DivergenceMap[&N] == N.isDivergent() &&
8336 "Divergence bit inconsistency detected\n");
8337 }
8338 }
8339 #endif
8340
8341
8342 /// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
8343 /// uses of other values produced by From.getNode() alone. The same value
8344 /// may appear in both the From and To list. The Deleted vector is
8345 /// handled the same way as for ReplaceAllUsesWith.
ReplaceAllUsesOfValuesWith(const SDValue * From,const SDValue * To,unsigned Num)8346 void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
8347 const SDValue *To,
8348 unsigned Num){
8349 // Handle the simple, trivial case efficiently.
8350 if (Num == 1)
8351 return ReplaceAllUsesOfValueWith(*From, *To);
8352
8353 transferDbgValues(*From, *To);
8354
8355 // Read up all the uses and make records of them. This helps
8356 // processing new uses that are introduced during the
8357 // replacement process.
8358 SmallVector<UseMemo, 4> Uses;
8359 for (unsigned i = 0; i != Num; ++i) {
8360 unsigned FromResNo = From[i].getResNo();
8361 SDNode *FromNode = From[i].getNode();
8362 for (SDNode::use_iterator UI = FromNode->use_begin(),
8363 E = FromNode->use_end(); UI != E; ++UI) {
8364 SDUse &Use = UI.getUse();
8365 if (Use.getResNo() == FromResNo) {
8366 UseMemo Memo = { *UI, i, &Use };
8367 Uses.push_back(Memo);
8368 }
8369 }
8370 }
8371
8372 // Sort the uses, so that all the uses from a given User are together.
8373 llvm::sort(Uses);
8374
8375 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
8376 UseIndex != UseIndexEnd; ) {
8377 // We know that this user uses some value of From. If it is the right
8378 // value, update it.
8379 SDNode *User = Uses[UseIndex].User;
8380
8381 // This node is about to morph, remove its old self from the CSE maps.
8382 RemoveNodeFromCSEMaps(User);
8383
8384 // The Uses array is sorted, so all the uses for a given User
8385 // are next to each other in the list.
8386 // To help reduce the number of CSE recomputations, process all
8387 // the uses of this user that we can find this way.
8388 do {
8389 unsigned i = Uses[UseIndex].Index;
8390 SDUse &Use = *Uses[UseIndex].Use;
8391 ++UseIndex;
8392
8393 Use.set(To[i]);
8394 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
8395
8396 // Now that we have modified User, add it back to the CSE maps. If it
8397 // already exists there, recursively merge the results together.
8398 AddModifiedNodeToCSEMaps(User);
8399 }
8400 }
8401
8402 /// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
8403 /// based on their topological order. It returns the maximum id and a vector
8404 /// of the SDNodes* in assigned order by reference.
AssignTopologicalOrder()8405 unsigned SelectionDAG::AssignTopologicalOrder() {
8406 unsigned DAGSize = 0;
8407
8408 // SortedPos tracks the progress of the algorithm. Nodes before it are
8409 // sorted, nodes after it are unsorted. When the algorithm completes
8410 // it is at the end of the list.
8411 allnodes_iterator SortedPos = allnodes_begin();
8412
8413 // Visit all the nodes. Move nodes with no operands to the front of
8414 // the list immediately. Annotate nodes that do have operands with their
8415 // operand count. Before we do this, the Node Id fields of the nodes
8416 // may contain arbitrary values. After, the Node Id fields for nodes
8417 // before SortedPos will contain the topological sort index, and the
8418 // Node Id fields for nodes At SortedPos and after will contain the
8419 // count of outstanding operands.
8420 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
8421 SDNode *N = &*I++;
8422 checkForCycles(N, this);
8423 unsigned Degree = N->getNumOperands();
8424 if (Degree == 0) {
8425 // A node with no uses, add it to the result array immediately.
8426 N->setNodeId(DAGSize++);
8427 allnodes_iterator Q(N);
8428 if (Q != SortedPos)
8429 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
8430 assert(SortedPos != AllNodes.end() && "Overran node list");
8431 ++SortedPos;
8432 } else {
8433 // Temporarily use the Node Id as scratch space for the degree count.
8434 N->setNodeId(Degree);
8435 }
8436 }
8437
8438 // Visit all the nodes. As we iterate, move nodes into sorted order,
8439 // such that by the time the end is reached all nodes will be sorted.
8440 for (SDNode &Node : allnodes()) {
8441 SDNode *N = &Node;
8442 checkForCycles(N, this);
8443 // N is in sorted position, so all its uses have one less operand
8444 // that needs to be sorted.
8445 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
8446 UI != UE; ++UI) {
8447 SDNode *P = *UI;
8448 unsigned Degree = P->getNodeId();
8449 assert(Degree != 0 && "Invalid node degree");
8450 --Degree;
8451 if (Degree == 0) {
8452 // All of P's operands are sorted, so P may sorted now.
8453 P->setNodeId(DAGSize++);
8454 if (P->getIterator() != SortedPos)
8455 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
8456 assert(SortedPos != AllNodes.end() && "Overran node list");
8457 ++SortedPos;
8458 } else {
8459 // Update P's outstanding operand count.
8460 P->setNodeId(Degree);
8461 }
8462 }
8463 if (Node.getIterator() == SortedPos) {
8464 #ifndef NDEBUG
8465 allnodes_iterator I(N);
8466 SDNode *S = &*++I;
8467 dbgs() << "Overran sorted position:\n";
8468 S->dumprFull(this); dbgs() << "\n";
8469 dbgs() << "Checking if this is due to cycles\n";
8470 checkForCycles(this, true);
8471 #endif
8472 llvm_unreachable(nullptr);
8473 }
8474 }
8475
8476 assert(SortedPos == AllNodes.end() &&
8477 "Topological sort incomplete!");
8478 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
8479 "First node in topological sort is not the entry token!");
8480 assert(AllNodes.front().getNodeId() == 0 &&
8481 "First node in topological sort has non-zero id!");
8482 assert(AllNodes.front().getNumOperands() == 0 &&
8483 "First node in topological sort has operands!");
8484 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
8485 "Last node in topologic sort has unexpected id!");
8486 assert(AllNodes.back().use_empty() &&
8487 "Last node in topologic sort has users!");
8488 assert(DAGSize == allnodes_size() && "Node count mismatch!");
8489 return DAGSize;
8490 }
8491
8492 /// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
8493 /// value is produced by SD.
AddDbgValue(SDDbgValue * DB,SDNode * SD,bool isParameter)8494 void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
8495 if (SD) {
8496 assert(DbgInfo->getSDDbgValues(SD).empty() || SD->getHasDebugValue());
8497 SD->setHasDebugValue(true);
8498 }
8499 DbgInfo->add(DB, SD, isParameter);
8500 }
8501
AddDbgLabel(SDDbgLabel * DB)8502 void SelectionDAG::AddDbgLabel(SDDbgLabel *DB) {
8503 DbgInfo->add(DB);
8504 }
8505
makeEquivalentMemoryOrdering(LoadSDNode * OldLoad,SDValue NewMemOp)8506 SDValue SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
8507 SDValue NewMemOp) {
8508 assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
8509 // The new memory operation must have the same position as the old load in
8510 // terms of memory dependency. Create a TokenFactor for the old load and new
8511 // memory operation and update uses of the old load's output chain to use that
8512 // TokenFactor.
8513 SDValue OldChain = SDValue(OldLoad, 1);
8514 SDValue NewChain = SDValue(NewMemOp.getNode(), 1);
8515 if (!OldLoad->hasAnyUseOfValue(1))
8516 return NewChain;
8517
8518 SDValue TokenFactor =
8519 getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain);
8520 ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
8521 UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain);
8522 return TokenFactor;
8523 }
8524
getSymbolFunctionGlobalAddress(SDValue Op,Function ** OutFunction)8525 SDValue SelectionDAG::getSymbolFunctionGlobalAddress(SDValue Op,
8526 Function **OutFunction) {
8527 assert(isa<ExternalSymbolSDNode>(Op) && "Node should be an ExternalSymbol");
8528
8529 auto *Symbol = cast<ExternalSymbolSDNode>(Op)->getSymbol();
8530 auto *Module = MF->getFunction().getParent();
8531 auto *Function = Module->getFunction(Symbol);
8532
8533 if (OutFunction != nullptr)
8534 *OutFunction = Function;
8535
8536 if (Function != nullptr) {
8537 auto PtrTy = TLI->getPointerTy(getDataLayout(), Function->getAddressSpace());
8538 return getGlobalAddress(Function, SDLoc(Op), PtrTy);
8539 }
8540
8541 std::string ErrorStr;
8542 raw_string_ostream ErrorFormatter(ErrorStr);
8543
8544 ErrorFormatter << "Undefined external symbol ";
8545 ErrorFormatter << '"' << Symbol << '"';
8546 ErrorFormatter.flush();
8547
8548 report_fatal_error(ErrorStr);
8549 }
8550
8551 //===----------------------------------------------------------------------===//
8552 // SDNode Class
8553 //===----------------------------------------------------------------------===//
8554
isNullConstant(SDValue V)8555 bool llvm::isNullConstant(SDValue V) {
8556 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8557 return Const != nullptr && Const->isNullValue();
8558 }
8559
isNullFPConstant(SDValue V)8560 bool llvm::isNullFPConstant(SDValue V) {
8561 ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
8562 return Const != nullptr && Const->isZero() && !Const->isNegative();
8563 }
8564
isAllOnesConstant(SDValue V)8565 bool llvm::isAllOnesConstant(SDValue V) {
8566 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8567 return Const != nullptr && Const->isAllOnesValue();
8568 }
8569
isOneConstant(SDValue V)8570 bool llvm::isOneConstant(SDValue V) {
8571 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
8572 return Const != nullptr && Const->isOne();
8573 }
8574
peekThroughBitcasts(SDValue V)8575 SDValue llvm::peekThroughBitcasts(SDValue V) {
8576 while (V.getOpcode() == ISD::BITCAST)
8577 V = V.getOperand(0);
8578 return V;
8579 }
8580
peekThroughOneUseBitcasts(SDValue V)8581 SDValue llvm::peekThroughOneUseBitcasts(SDValue V) {
8582 while (V.getOpcode() == ISD::BITCAST && V.getOperand(0).hasOneUse())
8583 V = V.getOperand(0);
8584 return V;
8585 }
8586
isBitwiseNot(SDValue V)8587 bool llvm::isBitwiseNot(SDValue V) {
8588 if (V.getOpcode() != ISD::XOR)
8589 return false;
8590 ConstantSDNode *C = isConstOrConstSplat(peekThroughBitcasts(V.getOperand(1)));
8591 return C && C->isAllOnesValue();
8592 }
8593
isConstOrConstSplat(SDValue N,bool AllowUndefs)8594 ConstantSDNode *llvm::isConstOrConstSplat(SDValue N, bool AllowUndefs) {
8595 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
8596 return CN;
8597
8598 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8599 BitVector UndefElements;
8600 ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements);
8601
8602 // BuildVectors can truncate their operands. Ignore that case here.
8603 if (CN && (UndefElements.none() || AllowUndefs) &&
8604 CN->getValueType(0) == N.getValueType().getScalarType())
8605 return CN;
8606 }
8607
8608 return nullptr;
8609 }
8610
isConstOrConstSplatFP(SDValue N,bool AllowUndefs)8611 ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N, bool AllowUndefs) {
8612 if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N))
8613 return CN;
8614
8615 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N)) {
8616 BitVector UndefElements;
8617 ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements);
8618 if (CN && (UndefElements.none() || AllowUndefs))
8619 return CN;
8620 }
8621
8622 return nullptr;
8623 }
8624
isNullOrNullSplat(SDValue N)8625 bool llvm::isNullOrNullSplat(SDValue N) {
8626 // TODO: may want to use peekThroughBitcast() here.
8627 ConstantSDNode *C = isConstOrConstSplat(N);
8628 return C && C->isNullValue();
8629 }
8630
isOneOrOneSplat(SDValue N)8631 bool llvm::isOneOrOneSplat(SDValue N) {
8632 // TODO: may want to use peekThroughBitcast() here.
8633 unsigned BitWidth = N.getScalarValueSizeInBits();
8634 ConstantSDNode *C = isConstOrConstSplat(N);
8635 return C && C->isOne() && C->getValueSizeInBits(0) == BitWidth;
8636 }
8637
isAllOnesOrAllOnesSplat(SDValue N)8638 bool llvm::isAllOnesOrAllOnesSplat(SDValue N) {
8639 N = peekThroughBitcasts(N);
8640 unsigned BitWidth = N.getScalarValueSizeInBits();
8641 ConstantSDNode *C = isConstOrConstSplat(N);
8642 return C && C->isAllOnesValue() && C->getValueSizeInBits(0) == BitWidth;
8643 }
8644
~HandleSDNode()8645 HandleSDNode::~HandleSDNode() {
8646 DropOperands();
8647 }
8648
GlobalAddressSDNode(unsigned Opc,unsigned Order,const DebugLoc & DL,const GlobalValue * GA,EVT VT,int64_t o,unsigned char TF)8649 GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, unsigned Order,
8650 const DebugLoc &DL,
8651 const GlobalValue *GA, EVT VT,
8652 int64_t o, unsigned char TF)
8653 : SDNode(Opc, Order, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
8654 TheGlobal = GA;
8655 }
8656
AddrSpaceCastSDNode(unsigned Order,const DebugLoc & dl,EVT VT,unsigned SrcAS,unsigned DestAS)8657 AddrSpaceCastSDNode::AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl,
8658 EVT VT, unsigned SrcAS,
8659 unsigned DestAS)
8660 : SDNode(ISD::ADDRSPACECAST, Order, dl, getSDVTList(VT)),
8661 SrcAddrSpace(SrcAS), DestAddrSpace(DestAS) {}
8662
MemSDNode(unsigned Opc,unsigned Order,const DebugLoc & dl,SDVTList VTs,EVT memvt,MachineMemOperand * mmo)8663 MemSDNode::MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
8664 SDVTList VTs, EVT memvt, MachineMemOperand *mmo)
8665 : SDNode(Opc, Order, dl, VTs), MemoryVT(memvt), MMO(mmo) {
8666 MemSDNodeBits.IsVolatile = MMO->isVolatile();
8667 MemSDNodeBits.IsNonTemporal = MMO->isNonTemporal();
8668 MemSDNodeBits.IsDereferenceable = MMO->isDereferenceable();
8669 MemSDNodeBits.IsInvariant = MMO->isInvariant();
8670
8671 // We check here that the size of the memory operand fits within the size of
8672 // the MMO. This is because the MMO might indicate only a possible address
8673 // range instead of specifying the affected memory addresses precisely.
8674 assert(memvt.getStoreSize() <= MMO->getSize() && "Size mismatch!");
8675 }
8676
8677 /// Profile - Gather unique data for the node.
8678 ///
Profile(FoldingSetNodeID & ID) const8679 void SDNode::Profile(FoldingSetNodeID &ID) const {
8680 AddNodeIDNode(ID, this);
8681 }
8682
8683 namespace {
8684
8685 struct EVTArray {
8686 std::vector<EVT> VTs;
8687
EVTArray__anon36908f4b0f11::EVTArray8688 EVTArray() {
8689 VTs.reserve(MVT::LAST_VALUETYPE);
8690 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
8691 VTs.push_back(MVT((MVT::SimpleValueType)i));
8692 }
8693 };
8694
8695 } // end anonymous namespace
8696
8697 static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
8698 static ManagedStatic<EVTArray> SimpleVTArray;
8699 static ManagedStatic<sys::SmartMutex<true>> VTMutex;
8700
8701 /// getValueTypeList - Return a pointer to the specified value type.
8702 ///
getValueTypeList(EVT VT)8703 const EVT *SDNode::getValueTypeList(EVT VT) {
8704 if (VT.isExtended()) {
8705 sys::SmartScopedLock<true> Lock(*VTMutex);
8706 return &(*EVTs->insert(VT).first);
8707 } else {
8708 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
8709 "Value type out of range!");
8710 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
8711 }
8712 }
8713
8714 /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
8715 /// indicated value. This method ignores uses of other values defined by this
8716 /// operation.
hasNUsesOfValue(unsigned NUses,unsigned Value) const8717 bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
8718 assert(Value < getNumValues() && "Bad value!");
8719
8720 // TODO: Only iterate over uses of a given value of the node
8721 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
8722 if (UI.getUse().getResNo() == Value) {
8723 if (NUses == 0)
8724 return false;
8725 --NUses;
8726 }
8727 }
8728
8729 // Found exactly the right number of uses?
8730 return NUses == 0;
8731 }
8732
8733 /// hasAnyUseOfValue - Return true if there are any use of the indicated
8734 /// value. This method ignores uses of other values defined by this operation.
hasAnyUseOfValue(unsigned Value) const8735 bool SDNode::hasAnyUseOfValue(unsigned Value) const {
8736 assert(Value < getNumValues() && "Bad value!");
8737
8738 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
8739 if (UI.getUse().getResNo() == Value)
8740 return true;
8741
8742 return false;
8743 }
8744
8745 /// isOnlyUserOf - Return true if this node is the only use of N.
isOnlyUserOf(const SDNode * N) const8746 bool SDNode::isOnlyUserOf(const SDNode *N) const {
8747 bool Seen = false;
8748 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
8749 SDNode *User = *I;
8750 if (User == this)
8751 Seen = true;
8752 else
8753 return false;
8754 }
8755
8756 return Seen;
8757 }
8758
8759 /// Return true if the only users of N are contained in Nodes.
areOnlyUsersOf(ArrayRef<const SDNode * > Nodes,const SDNode * N)8760 bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
8761 bool Seen = false;
8762 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
8763 SDNode *User = *I;
8764 if (llvm::any_of(Nodes,
8765 [&User](const SDNode *Node) { return User == Node; }))
8766 Seen = true;
8767 else
8768 return false;
8769 }
8770
8771 return Seen;
8772 }
8773
8774 /// isOperand - Return true if this node is an operand of N.
isOperandOf(const SDNode * N) const8775 bool SDValue::isOperandOf(const SDNode *N) const {
8776 for (const SDValue &Op : N->op_values())
8777 if (*this == Op)
8778 return true;
8779 return false;
8780 }
8781
isOperandOf(const SDNode * N) const8782 bool SDNode::isOperandOf(const SDNode *N) const {
8783 for (const SDValue &Op : N->op_values())
8784 if (this == Op.getNode())
8785 return true;
8786 return false;
8787 }
8788
8789 /// reachesChainWithoutSideEffects - Return true if this operand (which must
8790 /// be a chain) reaches the specified operand without crossing any
8791 /// side-effecting instructions on any chain path. In practice, this looks
8792 /// through token factors and non-volatile loads. In order to remain efficient,
8793 /// this only looks a couple of nodes in, it does not do an exhaustive search.
8794 ///
8795 /// Note that we only need to examine chains when we're searching for
8796 /// side-effects; SelectionDAG requires that all side-effects are represented
8797 /// by chains, even if another operand would force a specific ordering. This
8798 /// constraint is necessary to allow transformations like splitting loads.
reachesChainWithoutSideEffects(SDValue Dest,unsigned Depth) const8799 bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
8800 unsigned Depth) const {
8801 if (*this == Dest) return true;
8802
8803 // Don't search too deeply, we just want to be able to see through
8804 // TokenFactor's etc.
8805 if (Depth == 0) return false;
8806
8807 // If this is a token factor, all inputs to the TF happen in parallel.
8808 if (getOpcode() == ISD::TokenFactor) {
8809 // First, try a shallow search.
8810 if (is_contained((*this)->ops(), Dest)) {
8811 // We found the chain we want as an operand of this TokenFactor.
8812 // Essentially, we reach the chain without side-effects if we could
8813 // serialize the TokenFactor into a simple chain of operations with
8814 // Dest as the last operation. This is automatically true if the
8815 // chain has one use: there are no other ordering constraints.
8816 // If the chain has more than one use, we give up: some other
8817 // use of Dest might force a side-effect between Dest and the current
8818 // node.
8819 if (Dest.hasOneUse())
8820 return true;
8821 }
8822 // Next, try a deep search: check whether every operand of the TokenFactor
8823 // reaches Dest.
8824 return llvm::all_of((*this)->ops(), [=](SDValue Op) {
8825 return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
8826 });
8827 }
8828
8829 // Loads don't have side effects, look through them.
8830 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
8831 if (!Ld->isVolatile())
8832 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
8833 }
8834 return false;
8835 }
8836
hasPredecessor(const SDNode * N) const8837 bool SDNode::hasPredecessor(const SDNode *N) const {
8838 SmallPtrSet<const SDNode *, 32> Visited;
8839 SmallVector<const SDNode *, 16> Worklist;
8840 Worklist.push_back(this);
8841 return hasPredecessorHelper(N, Visited, Worklist);
8842 }
8843
intersectFlagsWith(const SDNodeFlags Flags)8844 void SDNode::intersectFlagsWith(const SDNodeFlags Flags) {
8845 this->Flags.intersectWith(Flags);
8846 }
8847
8848 SDValue
matchBinOpReduction(SDNode * Extract,ISD::NodeType & BinOp,ArrayRef<ISD::NodeType> CandidateBinOps)8849 SelectionDAG::matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
8850 ArrayRef<ISD::NodeType> CandidateBinOps) {
8851 // The pattern must end in an extract from index 0.
8852 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8853 !isNullConstant(Extract->getOperand(1)))
8854 return SDValue();
8855
8856 SDValue Op = Extract->getOperand(0);
8857 unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
8858
8859 // Match against one of the candidate binary ops.
8860 if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
8861 return Op.getOpcode() == unsigned(BinOp);
8862 }))
8863 return SDValue();
8864
8865 // At each stage, we're looking for something that looks like:
8866 // %s = shufflevector <8 x i32> %op, <8 x i32> undef,
8867 // <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
8868 // i32 undef, i32 undef, i32 undef, i32 undef>
8869 // %a = binop <8 x i32> %op, %s
8870 // Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
8871 // we expect something like:
8872 // <4,5,6,7,u,u,u,u>
8873 // <2,3,u,u,u,u,u,u>
8874 // <1,u,u,u,u,u,u,u>
8875 unsigned CandidateBinOp = Op.getOpcode();
8876 for (unsigned i = 0; i < Stages; ++i) {
8877 if (Op.getOpcode() != CandidateBinOp)
8878 return SDValue();
8879
8880 SDValue Op0 = Op.getOperand(0);
8881 SDValue Op1 = Op.getOperand(1);
8882
8883 ShuffleVectorSDNode *Shuffle = dyn_cast<ShuffleVectorSDNode>(Op0);
8884 if (Shuffle) {
8885 Op = Op1;
8886 } else {
8887 Shuffle = dyn_cast<ShuffleVectorSDNode>(Op1);
8888 Op = Op0;
8889 }
8890
8891 // The first operand of the shuffle should be the same as the other operand
8892 // of the binop.
8893 if (!Shuffle || Shuffle->getOperand(0) != Op)
8894 return SDValue();
8895
8896 // Verify the shuffle has the expected (at this stage of the pyramid) mask.
8897 for (int Index = 0, MaskEnd = 1 << i; Index < MaskEnd; ++Index)
8898 if (Shuffle->getMaskElt(Index) != MaskEnd + Index)
8899 return SDValue();
8900 }
8901
8902 BinOp = (ISD::NodeType)CandidateBinOp;
8903 return Op;
8904 }
8905
UnrollVectorOp(SDNode * N,unsigned ResNE)8906 SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
8907 assert(N->getNumValues() == 1 &&
8908 "Can't unroll a vector with multiple results!");
8909
8910 EVT VT = N->getValueType(0);
8911 unsigned NE = VT.getVectorNumElements();
8912 EVT EltVT = VT.getVectorElementType();
8913 SDLoc dl(N);
8914
8915 SmallVector<SDValue, 8> Scalars;
8916 SmallVector<SDValue, 4> Operands(N->getNumOperands());
8917
8918 // If ResNE is 0, fully unroll the vector op.
8919 if (ResNE == 0)
8920 ResNE = NE;
8921 else if (NE > ResNE)
8922 NE = ResNE;
8923
8924 unsigned i;
8925 for (i= 0; i != NE; ++i) {
8926 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
8927 SDValue Operand = N->getOperand(j);
8928 EVT OperandVT = Operand.getValueType();
8929 if (OperandVT.isVector()) {
8930 // A vector operand; extract a single element.
8931 EVT OperandEltVT = OperandVT.getVectorElementType();
8932 Operands[j] =
8933 getNode(ISD::EXTRACT_VECTOR_ELT, dl, OperandEltVT, Operand,
8934 getConstant(i, dl, TLI->getVectorIdxTy(getDataLayout())));
8935 } else {
8936 // A scalar operand; just use it as is.
8937 Operands[j] = Operand;
8938 }
8939 }
8940
8941 switch (N->getOpcode()) {
8942 default: {
8943 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands,
8944 N->getFlags()));
8945 break;
8946 }
8947 case ISD::VSELECT:
8948 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT, Operands));
8949 break;
8950 case ISD::SHL:
8951 case ISD::SRA:
8952 case ISD::SRL:
8953 case ISD::ROTL:
8954 case ISD::ROTR:
8955 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
8956 getShiftAmountOperand(Operands[0].getValueType(),
8957 Operands[1])));
8958 break;
8959 case ISD::SIGN_EXTEND_INREG:
8960 case ISD::FP_ROUND_INREG: {
8961 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
8962 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
8963 Operands[0],
8964 getValueType(ExtVT)));
8965 }
8966 }
8967 }
8968
8969 for (; i < ResNE; ++i)
8970 Scalars.push_back(getUNDEF(EltVT));
8971
8972 EVT VecVT = EVT::getVectorVT(*getContext(), EltVT, ResNE);
8973 return getBuildVector(VecVT, dl, Scalars);
8974 }
8975
areNonVolatileConsecutiveLoads(LoadSDNode * LD,LoadSDNode * Base,unsigned Bytes,int Dist) const8976 bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
8977 LoadSDNode *Base,
8978 unsigned Bytes,
8979 int Dist) const {
8980 if (LD->isVolatile() || Base->isVolatile())
8981 return false;
8982 if (LD->isIndexed() || Base->isIndexed())
8983 return false;
8984 if (LD->getChain() != Base->getChain())
8985 return false;
8986 EVT VT = LD->getValueType(0);
8987 if (VT.getSizeInBits() / 8 != Bytes)
8988 return false;
8989
8990 auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
8991 auto LocDecomp = BaseIndexOffset::match(LD, *this);
8992
8993 int64_t Offset = 0;
8994 if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
8995 return (Dist * Bytes == Offset);
8996 return false;
8997 }
8998
8999 /// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
9000 /// it cannot be inferred.
InferPtrAlignment(SDValue Ptr) const9001 unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
9002 // If this is a GlobalAddress + cst, return the alignment.
9003 const GlobalValue *GV;
9004 int64_t GVOffset = 0;
9005 if (TLI->isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
9006 unsigned IdxWidth = getDataLayout().getIndexTypeSizeInBits(GV->getType());
9007 KnownBits Known(IdxWidth);
9008 llvm::computeKnownBits(GV, Known, getDataLayout());
9009 unsigned AlignBits = Known.countMinTrailingZeros();
9010 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
9011 if (Align)
9012 return MinAlign(Align, GVOffset);
9013 }
9014
9015 // If this is a direct reference to a stack slot, use information about the
9016 // stack slot's alignment.
9017 int FrameIdx = 1 << 31;
9018 int64_t FrameOffset = 0;
9019 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
9020 FrameIdx = FI->getIndex();
9021 } else if (isBaseWithConstantOffset(Ptr) &&
9022 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9023 // Handle FI+Cst
9024 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9025 FrameOffset = Ptr.getConstantOperandVal(1);
9026 }
9027
9028 if (FrameIdx != (1 << 31)) {
9029 const MachineFrameInfo &MFI = getMachineFunction().getFrameInfo();
9030 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
9031 FrameOffset);
9032 return FIInfoAlign;
9033 }
9034
9035 return 0;
9036 }
9037
9038 /// GetSplitDestVTs - Compute the VTs needed for the low/hi parts of a type
9039 /// which is split (or expanded) into two not necessarily identical pieces.
GetSplitDestVTs(const EVT & VT) const9040 std::pair<EVT, EVT> SelectionDAG::GetSplitDestVTs(const EVT &VT) const {
9041 // Currently all types are split in half.
9042 EVT LoVT, HiVT;
9043 if (!VT.isVector())
9044 LoVT = HiVT = TLI->getTypeToTransformTo(*getContext(), VT);
9045 else
9046 LoVT = HiVT = VT.getHalfNumVectorElementsVT(*getContext());
9047
9048 return std::make_pair(LoVT, HiVT);
9049 }
9050
9051 /// SplitVector - Split the vector with EXTRACT_SUBVECTOR and return the
9052 /// low/high part.
9053 std::pair<SDValue, SDValue>
SplitVector(const SDValue & N,const SDLoc & DL,const EVT & LoVT,const EVT & HiVT)9054 SelectionDAG::SplitVector(const SDValue &N, const SDLoc &DL, const EVT &LoVT,
9055 const EVT &HiVT) {
9056 assert(LoVT.getVectorNumElements() + HiVT.getVectorNumElements() <=
9057 N.getValueType().getVectorNumElements() &&
9058 "More vector elements requested than available!");
9059 SDValue Lo, Hi;
9060 Lo = getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
9061 getConstant(0, DL, TLI->getVectorIdxTy(getDataLayout())));
9062 Hi = getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, N,
9063 getConstant(LoVT.getVectorNumElements(), DL,
9064 TLI->getVectorIdxTy(getDataLayout())));
9065 return std::make_pair(Lo, Hi);
9066 }
9067
ExtractVectorElements(SDValue Op,SmallVectorImpl<SDValue> & Args,unsigned Start,unsigned Count)9068 void SelectionDAG::ExtractVectorElements(SDValue Op,
9069 SmallVectorImpl<SDValue> &Args,
9070 unsigned Start, unsigned Count) {
9071 EVT VT = Op.getValueType();
9072 if (Count == 0)
9073 Count = VT.getVectorNumElements();
9074
9075 EVT EltVT = VT.getVectorElementType();
9076 EVT IdxTy = TLI->getVectorIdxTy(getDataLayout());
9077 SDLoc SL(Op);
9078 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
9079 Args.push_back(getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
9080 Op, getConstant(i, SL, IdxTy)));
9081 }
9082 }
9083
9084 // getAddressSpace - Return the address space this GlobalAddress belongs to.
getAddressSpace() const9085 unsigned GlobalAddressSDNode::getAddressSpace() const {
9086 return getGlobal()->getType()->getAddressSpace();
9087 }
9088
getType() const9089 Type *ConstantPoolSDNode::getType() const {
9090 if (isMachineConstantPoolEntry())
9091 return Val.MachineCPVal->getType();
9092 return Val.ConstVal->getType();
9093 }
9094
isConstantSplat(APInt & SplatValue,APInt & SplatUndef,unsigned & SplatBitSize,bool & HasAnyUndefs,unsigned MinSplatBits,bool IsBigEndian) const9095 bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
9096 unsigned &SplatBitSize,
9097 bool &HasAnyUndefs,
9098 unsigned MinSplatBits,
9099 bool IsBigEndian) const {
9100 EVT VT = getValueType(0);
9101 assert(VT.isVector() && "Expected a vector type");
9102 unsigned VecWidth = VT.getSizeInBits();
9103 if (MinSplatBits > VecWidth)
9104 return false;
9105
9106 // FIXME: The widths are based on this node's type, but build vectors can
9107 // truncate their operands.
9108 SplatValue = APInt(VecWidth, 0);
9109 SplatUndef = APInt(VecWidth, 0);
9110
9111 // Get the bits. Bits with undefined values (when the corresponding element
9112 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
9113 // in SplatValue. If any of the values are not constant, give up and return
9114 // false.
9115 unsigned int NumOps = getNumOperands();
9116 assert(NumOps > 0 && "isConstantSplat has 0-size build vector");
9117 unsigned EltWidth = VT.getScalarSizeInBits();
9118
9119 for (unsigned j = 0; j < NumOps; ++j) {
9120 unsigned i = IsBigEndian ? NumOps - 1 - j : j;
9121 SDValue OpVal = getOperand(i);
9122 unsigned BitPos = j * EltWidth;
9123
9124 if (OpVal.isUndef())
9125 SplatUndef.setBits(BitPos, BitPos + EltWidth);
9126 else if (auto *CN = dyn_cast<ConstantSDNode>(OpVal))
9127 SplatValue.insertBits(CN->getAPIntValue().zextOrTrunc(EltWidth), BitPos);
9128 else if (auto *CN = dyn_cast<ConstantFPSDNode>(OpVal))
9129 SplatValue.insertBits(CN->getValueAPF().bitcastToAPInt(), BitPos);
9130 else
9131 return false;
9132 }
9133
9134 // The build_vector is all constants or undefs. Find the smallest element
9135 // size that splats the vector.
9136 HasAnyUndefs = (SplatUndef != 0);
9137
9138 // FIXME: This does not work for vectors with elements less than 8 bits.
9139 while (VecWidth > 8) {
9140 unsigned HalfSize = VecWidth / 2;
9141 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
9142 APInt LowValue = SplatValue.trunc(HalfSize);
9143 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
9144 APInt LowUndef = SplatUndef.trunc(HalfSize);
9145
9146 // If the two halves do not match (ignoring undef bits), stop here.
9147 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
9148 MinSplatBits > HalfSize)
9149 break;
9150
9151 SplatValue = HighValue | LowValue;
9152 SplatUndef = HighUndef & LowUndef;
9153
9154 VecWidth = HalfSize;
9155 }
9156
9157 SplatBitSize = VecWidth;
9158 return true;
9159 }
9160
getSplatValue(BitVector * UndefElements) const9161 SDValue BuildVectorSDNode::getSplatValue(BitVector *UndefElements) const {
9162 if (UndefElements) {
9163 UndefElements->clear();
9164 UndefElements->resize(getNumOperands());
9165 }
9166 SDValue Splatted;
9167 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
9168 SDValue Op = getOperand(i);
9169 if (Op.isUndef()) {
9170 if (UndefElements)
9171 (*UndefElements)[i] = true;
9172 } else if (!Splatted) {
9173 Splatted = Op;
9174 } else if (Splatted != Op) {
9175 return SDValue();
9176 }
9177 }
9178
9179 if (!Splatted) {
9180 assert(getOperand(0).isUndef() &&
9181 "Can only have a splat without a constant for all undefs.");
9182 return getOperand(0);
9183 }
9184
9185 return Splatted;
9186 }
9187
9188 ConstantSDNode *
getConstantSplatNode(BitVector * UndefElements) const9189 BuildVectorSDNode::getConstantSplatNode(BitVector *UndefElements) const {
9190 return dyn_cast_or_null<ConstantSDNode>(getSplatValue(UndefElements));
9191 }
9192
9193 ConstantFPSDNode *
getConstantFPSplatNode(BitVector * UndefElements) const9194 BuildVectorSDNode::getConstantFPSplatNode(BitVector *UndefElements) const {
9195 return dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements));
9196 }
9197
9198 int32_t
getConstantFPSplatPow2ToLog2Int(BitVector * UndefElements,uint32_t BitWidth) const9199 BuildVectorSDNode::getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
9200 uint32_t BitWidth) const {
9201 if (ConstantFPSDNode *CN =
9202 dyn_cast_or_null<ConstantFPSDNode>(getSplatValue(UndefElements))) {
9203 bool IsExact;
9204 APSInt IntVal(BitWidth);
9205 const APFloat &APF = CN->getValueAPF();
9206 if (APF.convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact) !=
9207 APFloat::opOK ||
9208 !IsExact)
9209 return -1;
9210
9211 return IntVal.exactLogBase2();
9212 }
9213 return -1;
9214 }
9215
isConstant() const9216 bool BuildVectorSDNode::isConstant() const {
9217 for (const SDValue &Op : op_values()) {
9218 unsigned Opc = Op.getOpcode();
9219 if (Opc != ISD::UNDEF && Opc != ISD::Constant && Opc != ISD::ConstantFP)
9220 return false;
9221 }
9222 return true;
9223 }
9224
isSplatMask(const int * Mask,EVT VT)9225 bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
9226 // Find the first non-undef value in the shuffle mask.
9227 unsigned i, e;
9228 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
9229 /* search */;
9230
9231 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
9232
9233 // Make sure all remaining elements are either undef or the same as the first
9234 // non-undef value.
9235 for (int Idx = Mask[i]; i != e; ++i)
9236 if (Mask[i] >= 0 && Mask[i] != Idx)
9237 return false;
9238 return true;
9239 }
9240
9241 // Returns the SDNode if it is a constant integer BuildVector
9242 // or constant integer.
isConstantIntBuildVectorOrConstantInt(SDValue N)9243 SDNode *SelectionDAG::isConstantIntBuildVectorOrConstantInt(SDValue N) {
9244 if (isa<ConstantSDNode>(N))
9245 return N.getNode();
9246 if (ISD::isBuildVectorOfConstantSDNodes(N.getNode()))
9247 return N.getNode();
9248 // Treat a GlobalAddress supporting constant offset folding as a
9249 // constant integer.
9250 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N))
9251 if (GA->getOpcode() == ISD::GlobalAddress &&
9252 TLI->isOffsetFoldingLegal(GA))
9253 return GA;
9254 return nullptr;
9255 }
9256
isConstantFPBuildVectorOrConstantFP(SDValue N)9257 SDNode *SelectionDAG::isConstantFPBuildVectorOrConstantFP(SDValue N) {
9258 if (isa<ConstantFPSDNode>(N))
9259 return N.getNode();
9260
9261 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode()))
9262 return N.getNode();
9263
9264 return nullptr;
9265 }
9266
createOperands(SDNode * Node,ArrayRef<SDValue> Vals)9267 void SelectionDAG::createOperands(SDNode *Node, ArrayRef<SDValue> Vals) {
9268 assert(!Node->OperandList && "Node already has operands");
9269 assert(std::numeric_limits<decltype(SDNode::NumOperands)>::max() >=
9270 Vals.size() &&
9271 "too many operands to fit into SDNode");
9272 SDUse *Ops = OperandRecycler.allocate(
9273 ArrayRecycler<SDUse>::Capacity::get(Vals.size()), OperandAllocator);
9274
9275 bool IsDivergent = false;
9276 for (unsigned I = 0; I != Vals.size(); ++I) {
9277 Ops[I].setUser(Node);
9278 Ops[I].setInitial(Vals[I]);
9279 if (Ops[I].Val.getValueType() != MVT::Other) // Skip Chain. It does not carry divergence.
9280 IsDivergent = IsDivergent || Ops[I].getNode()->isDivergent();
9281 }
9282 Node->NumOperands = Vals.size();
9283 Node->OperandList = Ops;
9284 IsDivergent |= TLI->isSDNodeSourceOfDivergence(Node, FLI, DA);
9285 if (!TLI->isSDNodeAlwaysUniform(Node))
9286 Node->SDNodeBits.IsDivergent = IsDivergent;
9287 checkForCycles(Node);
9288 }
9289
9290 #ifndef NDEBUG
checkForCyclesHelper(const SDNode * N,SmallPtrSetImpl<const SDNode * > & Visited,SmallPtrSetImpl<const SDNode * > & Checked,const llvm::SelectionDAG * DAG)9291 static void checkForCyclesHelper(const SDNode *N,
9292 SmallPtrSetImpl<const SDNode*> &Visited,
9293 SmallPtrSetImpl<const SDNode*> &Checked,
9294 const llvm::SelectionDAG *DAG) {
9295 // If this node has already been checked, don't check it again.
9296 if (Checked.count(N))
9297 return;
9298
9299 // If a node has already been visited on this depth-first walk, reject it as
9300 // a cycle.
9301 if (!Visited.insert(N).second) {
9302 errs() << "Detected cycle in SelectionDAG\n";
9303 dbgs() << "Offending node:\n";
9304 N->dumprFull(DAG); dbgs() << "\n";
9305 abort();
9306 }
9307
9308 for (const SDValue &Op : N->op_values())
9309 checkForCyclesHelper(Op.getNode(), Visited, Checked, DAG);
9310
9311 Checked.insert(N);
9312 Visited.erase(N);
9313 }
9314 #endif
9315
checkForCycles(const llvm::SDNode * N,const llvm::SelectionDAG * DAG,bool force)9316 void llvm::checkForCycles(const llvm::SDNode *N,
9317 const llvm::SelectionDAG *DAG,
9318 bool force) {
9319 #ifndef NDEBUG
9320 bool check = force;
9321 #ifdef EXPENSIVE_CHECKS
9322 check = true;
9323 #endif // EXPENSIVE_CHECKS
9324 if (check) {
9325 assert(N && "Checking nonexistent SDNode");
9326 SmallPtrSet<const SDNode*, 32> visited;
9327 SmallPtrSet<const SDNode*, 32> checked;
9328 checkForCyclesHelper(N, visited, checked, DAG);
9329 }
9330 #endif // !NDEBUG
9331 }
9332
checkForCycles(const llvm::SelectionDAG * DAG,bool force)9333 void llvm::checkForCycles(const llvm::SelectionDAG *DAG, bool force) {
9334 checkForCycles(DAG->getRoot().getNode(), DAG, force);
9335 }
9336