1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines a DAG pattern matching instruction selector for X86,
10 // converting from a legalized dag to a X86 dag.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "X86.h"
15 #include "X86MachineFunctionInfo.h"
16 #include "X86RegisterInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/CodeGen/SelectionDAGISel.h"
21 #include "llvm/Config/llvm-config.h"
22 #include "llvm/IR/ConstantRange.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/IntrinsicsX86.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MathExtras.h"
32 #include <stdint.h>
33 using namespace llvm;
34
35 #define DEBUG_TYPE "x86-isel"
36
37 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
38
39 static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
40 cl::desc("Enable setting constant bits to reduce size of mask immediates"),
41 cl::Hidden);
42
43 static cl::opt<bool> EnablePromoteAnyextLoad(
44 "x86-promote-anyext-load", cl::init(true),
45 cl::desc("Enable promoting aligned anyext load to wider load"), cl::Hidden);
46
47 //===----------------------------------------------------------------------===//
48 // Pattern Matcher Implementation
49 //===----------------------------------------------------------------------===//
50
51 namespace {
52 /// This corresponds to X86AddressMode, but uses SDValue's instead of register
53 /// numbers for the leaves of the matched tree.
54 struct X86ISelAddressMode {
55 enum {
56 RegBase,
57 FrameIndexBase
58 } BaseType;
59
60 // This is really a union, discriminated by BaseType!
61 SDValue Base_Reg;
62 int Base_FrameIndex;
63
64 unsigned Scale;
65 SDValue IndexReg;
66 int32_t Disp;
67 SDValue Segment;
68 const GlobalValue *GV;
69 const Constant *CP;
70 const BlockAddress *BlockAddr;
71 const char *ES;
72 MCSymbol *MCSym;
73 int JT;
74 Align Alignment; // CP alignment.
75 unsigned char SymbolFlags; // X86II::MO_*
76 bool NegateIndex = false;
77
X86ISelAddressMode__anon2095b7570111::X86ISelAddressMode78 X86ISelAddressMode()
79 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
80 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
81 MCSym(nullptr), JT(-1), SymbolFlags(X86II::MO_NO_FLAG) {}
82
hasSymbolicDisplacement__anon2095b7570111::X86ISelAddressMode83 bool hasSymbolicDisplacement() const {
84 return GV != nullptr || CP != nullptr || ES != nullptr ||
85 MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
86 }
87
hasBaseOrIndexReg__anon2095b7570111::X86ISelAddressMode88 bool hasBaseOrIndexReg() const {
89 return BaseType == FrameIndexBase ||
90 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
91 }
92
93 /// Return true if this addressing mode is already RIP-relative.
isRIPRelative__anon2095b7570111::X86ISelAddressMode94 bool isRIPRelative() const {
95 if (BaseType != RegBase) return false;
96 if (RegisterSDNode *RegNode =
97 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
98 return RegNode->getReg() == X86::RIP;
99 return false;
100 }
101
setBaseReg__anon2095b7570111::X86ISelAddressMode102 void setBaseReg(SDValue Reg) {
103 BaseType = RegBase;
104 Base_Reg = Reg;
105 }
106
107 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump__anon2095b7570111::X86ISelAddressMode108 void dump(SelectionDAG *DAG = nullptr) {
109 dbgs() << "X86ISelAddressMode " << this << '\n';
110 dbgs() << "Base_Reg ";
111 if (Base_Reg.getNode())
112 Base_Reg.getNode()->dump(DAG);
113 else
114 dbgs() << "nul\n";
115 if (BaseType == FrameIndexBase)
116 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
117 dbgs() << " Scale " << Scale << '\n'
118 << "IndexReg ";
119 if (NegateIndex)
120 dbgs() << "negate ";
121 if (IndexReg.getNode())
122 IndexReg.getNode()->dump(DAG);
123 else
124 dbgs() << "nul\n";
125 dbgs() << " Disp " << Disp << '\n'
126 << "GV ";
127 if (GV)
128 GV->dump();
129 else
130 dbgs() << "nul";
131 dbgs() << " CP ";
132 if (CP)
133 CP->dump();
134 else
135 dbgs() << "nul";
136 dbgs() << '\n'
137 << "ES ";
138 if (ES)
139 dbgs() << ES;
140 else
141 dbgs() << "nul";
142 dbgs() << " MCSym ";
143 if (MCSym)
144 dbgs() << MCSym;
145 else
146 dbgs() << "nul";
147 dbgs() << " JT" << JT << " Align" << Alignment.value() << '\n';
148 }
149 #endif
150 };
151 }
152
153 namespace {
154 //===--------------------------------------------------------------------===//
155 /// ISel - X86-specific code to select X86 machine instructions for
156 /// SelectionDAG operations.
157 ///
158 class X86DAGToDAGISel final : public SelectionDAGISel {
159 /// Keep a pointer to the X86Subtarget around so that we can
160 /// make the right decision when generating code for different targets.
161 const X86Subtarget *Subtarget;
162
163 /// If true, selector should try to optimize for minimum code size.
164 bool OptForMinSize;
165
166 /// Disable direct TLS access through segment registers.
167 bool IndirectTlsSegRefs;
168
169 public:
X86DAGToDAGISel(X86TargetMachine & tm,CodeGenOpt::Level OptLevel)170 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
171 : SelectionDAGISel(tm, OptLevel), Subtarget(nullptr),
172 OptForMinSize(false), IndirectTlsSegRefs(false) {}
173
getPassName() const174 StringRef getPassName() const override {
175 return "X86 DAG->DAG Instruction Selection";
176 }
177
runOnMachineFunction(MachineFunction & MF)178 bool runOnMachineFunction(MachineFunction &MF) override {
179 // Reset the subtarget each time through.
180 Subtarget = &MF.getSubtarget<X86Subtarget>();
181 IndirectTlsSegRefs = MF.getFunction().hasFnAttribute(
182 "indirect-tls-seg-refs");
183
184 // OptFor[Min]Size are used in pattern predicates that isel is matching.
185 OptForMinSize = MF.getFunction().hasMinSize();
186 assert((!OptForMinSize || MF.getFunction().hasOptSize()) &&
187 "OptForMinSize implies OptForSize");
188
189 SelectionDAGISel::runOnMachineFunction(MF);
190 return true;
191 }
192
193 void emitFunctionEntryCode() override;
194
195 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
196
197 void PreprocessISelDAG() override;
198 void PostprocessISelDAG() override;
199
200 // Include the pieces autogenerated from the target description.
201 #include "X86GenDAGISel.inc"
202
203 private:
204 void Select(SDNode *N) override;
205
206 bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
207 bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
208 bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
209 bool matchAddress(SDValue N, X86ISelAddressMode &AM);
210 bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
211 bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth);
212 bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
213 unsigned Depth);
214 bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
215 bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
216 SDValue &Scale, SDValue &Index, SDValue &Disp,
217 SDValue &Segment);
218 bool selectVectorAddr(MemSDNode *Parent, SDValue BasePtr, SDValue IndexOp,
219 SDValue ScaleOp, SDValue &Base, SDValue &Scale,
220 SDValue &Index, SDValue &Disp, SDValue &Segment);
221 bool selectMOV64Imm32(SDValue N, SDValue &Imm);
222 bool selectLEAAddr(SDValue N, SDValue &Base,
223 SDValue &Scale, SDValue &Index, SDValue &Disp,
224 SDValue &Segment);
225 bool selectLEA64_32Addr(SDValue N, SDValue &Base,
226 SDValue &Scale, SDValue &Index, SDValue &Disp,
227 SDValue &Segment);
228 bool selectTLSADDRAddr(SDValue N, SDValue &Base,
229 SDValue &Scale, SDValue &Index, SDValue &Disp,
230 SDValue &Segment);
231 bool selectRelocImm(SDValue N, SDValue &Op);
232
233 bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
234 SDValue &Base, SDValue &Scale,
235 SDValue &Index, SDValue &Disp,
236 SDValue &Segment);
237
238 // Convenience method where P is also root.
tryFoldLoad(SDNode * P,SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)239 bool tryFoldLoad(SDNode *P, SDValue N,
240 SDValue &Base, SDValue &Scale,
241 SDValue &Index, SDValue &Disp,
242 SDValue &Segment) {
243 return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
244 }
245
246 bool tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
247 SDValue &Base, SDValue &Scale,
248 SDValue &Index, SDValue &Disp,
249 SDValue &Segment);
250
251 bool isProfitableToFormMaskedOp(SDNode *N) const;
252
253 /// Implement addressing mode selection for inline asm expressions.
254 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
255 unsigned ConstraintID,
256 std::vector<SDValue> &OutOps) override;
257
258 void emitSpecialCodeForMain();
259
getAddressOperands(X86ISelAddressMode & AM,const SDLoc & DL,MVT VT,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)260 inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
261 MVT VT, SDValue &Base, SDValue &Scale,
262 SDValue &Index, SDValue &Disp,
263 SDValue &Segment) {
264 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
265 Base = CurDAG->getTargetFrameIndex(
266 AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
267 else if (AM.Base_Reg.getNode())
268 Base = AM.Base_Reg;
269 else
270 Base = CurDAG->getRegister(0, VT);
271
272 Scale = getI8Imm(AM.Scale, DL);
273
274 // Negate the index if needed.
275 if (AM.NegateIndex) {
276 unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r;
277 SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32,
278 AM.IndexReg), 0);
279 AM.IndexReg = Neg;
280 }
281
282 if (AM.IndexReg.getNode())
283 Index = AM.IndexReg;
284 else
285 Index = CurDAG->getRegister(0, VT);
286
287 // These are 32-bit even in 64-bit mode since RIP-relative offset
288 // is 32-bit.
289 if (AM.GV)
290 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
291 MVT::i32, AM.Disp,
292 AM.SymbolFlags);
293 else if (AM.CP)
294 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Alignment,
295 AM.Disp, AM.SymbolFlags);
296 else if (AM.ES) {
297 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
298 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
299 } else if (AM.MCSym) {
300 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
301 assert(AM.SymbolFlags == 0 && "oo");
302 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
303 } else if (AM.JT != -1) {
304 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
305 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
306 } else if (AM.BlockAddr)
307 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
308 AM.SymbolFlags);
309 else
310 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
311
312 if (AM.Segment.getNode())
313 Segment = AM.Segment;
314 else
315 Segment = CurDAG->getRegister(0, MVT::i16);
316 }
317
318 // Utility function to determine whether we should avoid selecting
319 // immediate forms of instructions for better code size or not.
320 // At a high level, we'd like to avoid such instructions when
321 // we have similar constants used within the same basic block
322 // that can be kept in a register.
323 //
shouldAvoidImmediateInstFormsForSize(SDNode * N) const324 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
325 uint32_t UseCount = 0;
326
327 // Do not want to hoist if we're not optimizing for size.
328 // TODO: We'd like to remove this restriction.
329 // See the comment in X86InstrInfo.td for more info.
330 if (!CurDAG->shouldOptForSize())
331 return false;
332
333 // Walk all the users of the immediate.
334 for (SDNode::use_iterator UI = N->use_begin(),
335 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
336
337 SDNode *User = *UI;
338
339 // This user is already selected. Count it as a legitimate use and
340 // move on.
341 if (User->isMachineOpcode()) {
342 UseCount++;
343 continue;
344 }
345
346 // We want to count stores of immediates as real uses.
347 if (User->getOpcode() == ISD::STORE &&
348 User->getOperand(1).getNode() == N) {
349 UseCount++;
350 continue;
351 }
352
353 // We don't currently match users that have > 2 operands (except
354 // for stores, which are handled above)
355 // Those instruction won't match in ISEL, for now, and would
356 // be counted incorrectly.
357 // This may change in the future as we add additional instruction
358 // types.
359 if (User->getNumOperands() != 2)
360 continue;
361
362 // If this is a sign-extended 8-bit integer immediate used in an ALU
363 // instruction, there is probably an opcode encoding to save space.
364 auto *C = dyn_cast<ConstantSDNode>(N);
365 if (C && isInt<8>(C->getSExtValue()))
366 continue;
367
368 // Immediates that are used for offsets as part of stack
369 // manipulation should be left alone. These are typically
370 // used to indicate SP offsets for argument passing and
371 // will get pulled into stores/pushes (implicitly).
372 if (User->getOpcode() == X86ISD::ADD ||
373 User->getOpcode() == ISD::ADD ||
374 User->getOpcode() == X86ISD::SUB ||
375 User->getOpcode() == ISD::SUB) {
376
377 // Find the other operand of the add/sub.
378 SDValue OtherOp = User->getOperand(0);
379 if (OtherOp.getNode() == N)
380 OtherOp = User->getOperand(1);
381
382 // Don't count if the other operand is SP.
383 RegisterSDNode *RegNode;
384 if (OtherOp->getOpcode() == ISD::CopyFromReg &&
385 (RegNode = dyn_cast_or_null<RegisterSDNode>(
386 OtherOp->getOperand(1).getNode())))
387 if ((RegNode->getReg() == X86::ESP) ||
388 (RegNode->getReg() == X86::RSP))
389 continue;
390 }
391
392 // ... otherwise, count this and move on.
393 UseCount++;
394 }
395
396 // If we have more than 1 use, then recommend for hoisting.
397 return (UseCount > 1);
398 }
399
400 /// Return a target constant with the specified value of type i8.
getI8Imm(unsigned Imm,const SDLoc & DL)401 inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
402 return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
403 }
404
405 /// Return a target constant with the specified value, of type i32.
getI32Imm(unsigned Imm,const SDLoc & DL)406 inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
407 return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
408 }
409
410 /// Return a target constant with the specified value, of type i64.
getI64Imm(uint64_t Imm,const SDLoc & DL)411 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
412 return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
413 }
414
getExtractVEXTRACTImmediate(SDNode * N,unsigned VecWidth,const SDLoc & DL)415 SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
416 const SDLoc &DL) {
417 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
418 uint64_t Index = N->getConstantOperandVal(1);
419 MVT VecVT = N->getOperand(0).getSimpleValueType();
420 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
421 }
422
getInsertVINSERTImmediate(SDNode * N,unsigned VecWidth,const SDLoc & DL)423 SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
424 const SDLoc &DL) {
425 assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
426 uint64_t Index = N->getConstantOperandVal(2);
427 MVT VecVT = N->getSimpleValueType(0);
428 return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
429 }
430
431 // Helper to detect unneeded and instructions on shift amounts. Called
432 // from PatFrags in tablegen.
isUnneededShiftMask(SDNode * N,unsigned Width) const433 bool isUnneededShiftMask(SDNode *N, unsigned Width) const {
434 assert(N->getOpcode() == ISD::AND && "Unexpected opcode");
435 const APInt &Val = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
436
437 if (Val.countTrailingOnes() >= Width)
438 return true;
439
440 APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero;
441 return Mask.countTrailingOnes() >= Width;
442 }
443
444 /// Return an SDNode that returns the value of the global base register.
445 /// Output instructions required to initialize the global base register,
446 /// if necessary.
447 SDNode *getGlobalBaseReg();
448
449 /// Return a reference to the TargetMachine, casted to the target-specific
450 /// type.
getTargetMachine() const451 const X86TargetMachine &getTargetMachine() const {
452 return static_cast<const X86TargetMachine &>(TM);
453 }
454
455 /// Return a reference to the TargetInstrInfo, casted to the target-specific
456 /// type.
getInstrInfo() const457 const X86InstrInfo *getInstrInfo() const {
458 return Subtarget->getInstrInfo();
459 }
460
461 /// Address-mode matching performs shift-of-and to and-of-shift
462 /// reassociation in order to expose more scaled addressing
463 /// opportunities.
ComplexPatternFuncMutatesDAG() const464 bool ComplexPatternFuncMutatesDAG() const override {
465 return true;
466 }
467
468 bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
469
470 // Indicates we should prefer to use a non-temporal load for this load.
useNonTemporalLoad(LoadSDNode * N) const471 bool useNonTemporalLoad(LoadSDNode *N) const {
472 if (!N->isNonTemporal())
473 return false;
474
475 unsigned StoreSize = N->getMemoryVT().getStoreSize();
476
477 if (N->getAlignment() < StoreSize)
478 return false;
479
480 switch (StoreSize) {
481 default: llvm_unreachable("Unsupported store size");
482 case 4:
483 case 8:
484 return false;
485 case 16:
486 return Subtarget->hasSSE41();
487 case 32:
488 return Subtarget->hasAVX2();
489 case 64:
490 return Subtarget->hasAVX512();
491 }
492 }
493
494 bool foldLoadStoreIntoMemOperand(SDNode *Node);
495 MachineSDNode *matchBEXTRFromAndImm(SDNode *Node);
496 bool matchBitExtract(SDNode *Node);
497 bool shrinkAndImmediate(SDNode *N);
498 bool isMaskZeroExtended(SDNode *N) const;
499 bool tryShiftAmountMod(SDNode *N);
500 bool tryShrinkShlLogicImm(SDNode *N);
501 bool tryVPTERNLOG(SDNode *N);
502 bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
503 bool tryMatchBitSelect(SDNode *N);
504
505 MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
506 const SDLoc &dl, MVT VT, SDNode *Node);
507 MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
508 const SDLoc &dl, MVT VT, SDNode *Node,
509 SDValue &InFlag);
510
511 bool tryOptimizeRem8Extend(SDNode *N);
512
513 bool onlyUsesZeroFlag(SDValue Flags) const;
514 bool hasNoSignFlagUses(SDValue Flags) const;
515 bool hasNoCarryFlagUses(SDValue Flags) const;
516 };
517 }
518
519
520 // Returns true if this masked compare can be implemented legally with this
521 // type.
isLegalMaskCompare(SDNode * N,const X86Subtarget * Subtarget)522 static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
523 unsigned Opcode = N->getOpcode();
524 if (Opcode == X86ISD::CMPM || Opcode == X86ISD::STRICT_CMPM ||
525 Opcode == ISD::SETCC || Opcode == X86ISD::CMPM_SAE ||
526 Opcode == X86ISD::VFPCLASS) {
527 // We can get 256-bit 8 element types here without VLX being enabled. When
528 // this happens we will use 512-bit operations and the mask will not be
529 // zero extended.
530 EVT OpVT = N->getOperand(0).getValueType();
531 // The first operand of X86ISD::STRICT_CMPM is chain, so we need to get the
532 // second operand.
533 if (Opcode == X86ISD::STRICT_CMPM)
534 OpVT = N->getOperand(1).getValueType();
535 if (OpVT.is256BitVector() || OpVT.is128BitVector())
536 return Subtarget->hasVLX();
537
538 return true;
539 }
540 // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
541 if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
542 Opcode == X86ISD::FSETCCM_SAE)
543 return true;
544
545 return false;
546 }
547
548 // Returns true if we can assume the writer of the mask has zero extended it
549 // for us.
isMaskZeroExtended(SDNode * N) const550 bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
551 // If this is an AND, check if we have a compare on either side. As long as
552 // one side guarantees the mask is zero extended, the AND will preserve those
553 // zeros.
554 if (N->getOpcode() == ISD::AND)
555 return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
556 isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
557
558 return isLegalMaskCompare(N, Subtarget);
559 }
560
561 bool
IsProfitableToFold(SDValue N,SDNode * U,SDNode * Root) const562 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
563 if (OptLevel == CodeGenOpt::None) return false;
564
565 if (!N.hasOneUse())
566 return false;
567
568 if (N.getOpcode() != ISD::LOAD)
569 return true;
570
571 // Don't fold non-temporal loads if we have an instruction for them.
572 if (useNonTemporalLoad(cast<LoadSDNode>(N)))
573 return false;
574
575 // If N is a load, do additional profitability checks.
576 if (U == Root) {
577 switch (U->getOpcode()) {
578 default: break;
579 case X86ISD::ADD:
580 case X86ISD::ADC:
581 case X86ISD::SUB:
582 case X86ISD::SBB:
583 case X86ISD::AND:
584 case X86ISD::XOR:
585 case X86ISD::OR:
586 case ISD::ADD:
587 case ISD::ADDCARRY:
588 case ISD::AND:
589 case ISD::OR:
590 case ISD::XOR: {
591 SDValue Op1 = U->getOperand(1);
592
593 // If the other operand is a 8-bit immediate we should fold the immediate
594 // instead. This reduces code size.
595 // e.g.
596 // movl 4(%esp), %eax
597 // addl $4, %eax
598 // vs.
599 // movl $4, %eax
600 // addl 4(%esp), %eax
601 // The former is 2 bytes shorter. In case where the increment is 1, then
602 // the saving can be 4 bytes (by using incl %eax).
603 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
604 if (Imm->getAPIntValue().isSignedIntN(8))
605 return false;
606
607 // If this is a 64-bit AND with an immediate that fits in 32-bits,
608 // prefer using the smaller and over folding the load. This is needed to
609 // make sure immediates created by shrinkAndImmediate are always folded.
610 // Ideally we would narrow the load during DAG combine and get the
611 // best of both worlds.
612 if (U->getOpcode() == ISD::AND &&
613 Imm->getAPIntValue().getBitWidth() == 64 &&
614 Imm->getAPIntValue().isIntN(32))
615 return false;
616
617 // If this really a zext_inreg that can be represented with a movzx
618 // instruction, prefer that.
619 // TODO: We could shrink the load and fold if it is non-volatile.
620 if (U->getOpcode() == ISD::AND &&
621 (Imm->getAPIntValue() == UINT8_MAX ||
622 Imm->getAPIntValue() == UINT16_MAX ||
623 Imm->getAPIntValue() == UINT32_MAX))
624 return false;
625
626 // ADD/SUB with can negate the immediate and use the opposite operation
627 // to fit 128 into a sign extended 8 bit immediate.
628 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) &&
629 (-Imm->getAPIntValue()).isSignedIntN(8))
630 return false;
631
632 if ((U->getOpcode() == X86ISD::ADD || U->getOpcode() == X86ISD::SUB) &&
633 (-Imm->getAPIntValue()).isSignedIntN(8) &&
634 hasNoCarryFlagUses(SDValue(U, 1)))
635 return false;
636 }
637
638 // If the other operand is a TLS address, we should fold it instead.
639 // This produces
640 // movl %gs:0, %eax
641 // leal i@NTPOFF(%eax), %eax
642 // instead of
643 // movl $i@NTPOFF, %eax
644 // addl %gs:0, %eax
645 // if the block also has an access to a second TLS address this will save
646 // a load.
647 // FIXME: This is probably also true for non-TLS addresses.
648 if (Op1.getOpcode() == X86ISD::Wrapper) {
649 SDValue Val = Op1.getOperand(0);
650 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
651 return false;
652 }
653
654 // Don't fold load if this matches the BTS/BTR/BTC patterns.
655 // BTS: (or X, (shl 1, n))
656 // BTR: (and X, (rotl -2, n))
657 // BTC: (xor X, (shl 1, n))
658 if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
659 if (U->getOperand(0).getOpcode() == ISD::SHL &&
660 isOneConstant(U->getOperand(0).getOperand(0)))
661 return false;
662
663 if (U->getOperand(1).getOpcode() == ISD::SHL &&
664 isOneConstant(U->getOperand(1).getOperand(0)))
665 return false;
666 }
667 if (U->getOpcode() == ISD::AND) {
668 SDValue U0 = U->getOperand(0);
669 SDValue U1 = U->getOperand(1);
670 if (U0.getOpcode() == ISD::ROTL) {
671 auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
672 if (C && C->getSExtValue() == -2)
673 return false;
674 }
675
676 if (U1.getOpcode() == ISD::ROTL) {
677 auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
678 if (C && C->getSExtValue() == -2)
679 return false;
680 }
681 }
682
683 break;
684 }
685 case ISD::SHL:
686 case ISD::SRA:
687 case ISD::SRL:
688 // Don't fold a load into a shift by immediate. The BMI2 instructions
689 // support folding a load, but not an immediate. The legacy instructions
690 // support folding an immediate, but can't fold a load. Folding an
691 // immediate is preferable to folding a load.
692 if (isa<ConstantSDNode>(U->getOperand(1)))
693 return false;
694
695 break;
696 }
697 }
698
699 // Prevent folding a load if this can implemented with an insert_subreg or
700 // a move that implicitly zeroes.
701 if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
702 isNullConstant(Root->getOperand(2)) &&
703 (Root->getOperand(0).isUndef() ||
704 ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode())))
705 return false;
706
707 return true;
708 }
709
710 // Indicates it is profitable to form an AVX512 masked operation. Returning
711 // false will favor a masked register-register masked move or vblendm and the
712 // operation will be selected separately.
isProfitableToFormMaskedOp(SDNode * N) const713 bool X86DAGToDAGISel::isProfitableToFormMaskedOp(SDNode *N) const {
714 assert(
715 (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::SELECTS) &&
716 "Unexpected opcode!");
717
718 // If the operation has additional users, the operation will be duplicated.
719 // Check the use count to prevent that.
720 // FIXME: Are there cheap opcodes we might want to duplicate?
721 return N->getOperand(1).hasOneUse();
722 }
723
724 /// Replace the original chain operand of the call with
725 /// load's chain operand and move load below the call's chain operand.
moveBelowOrigChain(SelectionDAG * CurDAG,SDValue Load,SDValue Call,SDValue OrigChain)726 static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
727 SDValue Call, SDValue OrigChain) {
728 SmallVector<SDValue, 8> Ops;
729 SDValue Chain = OrigChain.getOperand(0);
730 if (Chain.getNode() == Load.getNode())
731 Ops.push_back(Load.getOperand(0));
732 else {
733 assert(Chain.getOpcode() == ISD::TokenFactor &&
734 "Unexpected chain operand");
735 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
736 if (Chain.getOperand(i).getNode() == Load.getNode())
737 Ops.push_back(Load.getOperand(0));
738 else
739 Ops.push_back(Chain.getOperand(i));
740 SDValue NewChain =
741 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
742 Ops.clear();
743 Ops.push_back(NewChain);
744 }
745 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
746 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
747 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
748 Load.getOperand(1), Load.getOperand(2));
749
750 Ops.clear();
751 Ops.push_back(SDValue(Load.getNode(), 1));
752 Ops.append(Call->op_begin() + 1, Call->op_end());
753 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
754 }
755
756 /// Return true if call address is a load and it can be
757 /// moved below CALLSEQ_START and the chains leading up to the call.
758 /// Return the CALLSEQ_START by reference as a second output.
759 /// In the case of a tail call, there isn't a callseq node between the call
760 /// chain and the load.
isCalleeLoad(SDValue Callee,SDValue & Chain,bool HasCallSeq)761 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
762 // The transformation is somewhat dangerous if the call's chain was glued to
763 // the call. After MoveBelowOrigChain the load is moved between the call and
764 // the chain, this can create a cycle if the load is not folded. So it is
765 // *really* important that we are sure the load will be folded.
766 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
767 return false;
768 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
769 if (!LD ||
770 !LD->isSimple() ||
771 LD->getAddressingMode() != ISD::UNINDEXED ||
772 LD->getExtensionType() != ISD::NON_EXTLOAD)
773 return false;
774
775 // Now let's find the callseq_start.
776 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
777 if (!Chain.hasOneUse())
778 return false;
779 Chain = Chain.getOperand(0);
780 }
781
782 if (!Chain.getNumOperands())
783 return false;
784 // Since we are not checking for AA here, conservatively abort if the chain
785 // writes to memory. It's not safe to move the callee (a load) across a store.
786 if (isa<MemSDNode>(Chain.getNode()) &&
787 cast<MemSDNode>(Chain.getNode())->writeMem())
788 return false;
789 if (Chain.getOperand(0).getNode() == Callee.getNode())
790 return true;
791 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
792 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
793 Callee.getValue(1).hasOneUse())
794 return true;
795 return false;
796 }
797
PreprocessISelDAG()798 void X86DAGToDAGISel::PreprocessISelDAG() {
799 bool MadeChange = false;
800 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
801 E = CurDAG->allnodes_end(); I != E; ) {
802 SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
803
804 // If this is a target specific AND node with no flag usages, turn it back
805 // into ISD::AND to enable test instruction matching.
806 if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
807 SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
808 N->getOperand(0), N->getOperand(1));
809 --I;
810 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
811 ++I;
812 MadeChange = true;
813 continue;
814 }
815
816 /// Convert vector increment or decrement to sub/add with an all-ones
817 /// constant:
818 /// add X, <1, 1...> --> sub X, <-1, -1...>
819 /// sub X, <1, 1...> --> add X, <-1, -1...>
820 /// The all-ones vector constant can be materialized using a pcmpeq
821 /// instruction that is commonly recognized as an idiom (has no register
822 /// dependency), so that's better/smaller than loading a splat 1 constant.
823 if ((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
824 N->getSimpleValueType(0).isVector()) {
825
826 APInt SplatVal;
827 if (X86::isConstantSplat(N->getOperand(1), SplatVal) &&
828 SplatVal.isOneValue()) {
829 SDLoc DL(N);
830
831 MVT VT = N->getSimpleValueType(0);
832 unsigned NumElts = VT.getSizeInBits() / 32;
833 SDValue AllOnes =
834 CurDAG->getAllOnesConstant(DL, MVT::getVectorVT(MVT::i32, NumElts));
835 AllOnes = CurDAG->getBitcast(VT, AllOnes);
836
837 unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
838 SDValue Res =
839 CurDAG->getNode(NewOpcode, DL, VT, N->getOperand(0), AllOnes);
840 --I;
841 CurDAG->ReplaceAllUsesWith(N, Res.getNode());
842 ++I;
843 MadeChange = true;
844 continue;
845 }
846 }
847
848 switch (N->getOpcode()) {
849 case X86ISD::VBROADCAST: {
850 MVT VT = N->getSimpleValueType(0);
851 // Emulate v32i16/v64i8 broadcast without BWI.
852 if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
853 MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
854 SDLoc dl(N);
855 SDValue NarrowBCast =
856 CurDAG->getNode(X86ISD::VBROADCAST, dl, NarrowVT, N->getOperand(0));
857 SDValue Res =
858 CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
859 NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
860 unsigned Index = VT == MVT::v32i16 ? 16 : 32;
861 Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
862 CurDAG->getIntPtrConstant(Index, dl));
863
864 --I;
865 CurDAG->ReplaceAllUsesWith(N, Res.getNode());
866 ++I;
867 MadeChange = true;
868 continue;
869 }
870
871 break;
872 }
873 case X86ISD::VBROADCAST_LOAD: {
874 MVT VT = N->getSimpleValueType(0);
875 // Emulate v32i16/v64i8 broadcast without BWI.
876 if (!Subtarget->hasBWI() && (VT == MVT::v32i16 || VT == MVT::v64i8)) {
877 MVT NarrowVT = VT == MVT::v32i16 ? MVT::v16i16 : MVT::v32i8;
878 auto *MemNode = cast<MemSDNode>(N);
879 SDLoc dl(N);
880 SDVTList VTs = CurDAG->getVTList(NarrowVT, MVT::Other);
881 SDValue Ops[] = {MemNode->getChain(), MemNode->getBasePtr()};
882 SDValue NarrowBCast = CurDAG->getMemIntrinsicNode(
883 X86ISD::VBROADCAST_LOAD, dl, VTs, Ops, MemNode->getMemoryVT(),
884 MemNode->getMemOperand());
885 SDValue Res =
886 CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
887 NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
888 unsigned Index = VT == MVT::v32i16 ? 16 : 32;
889 Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
890 CurDAG->getIntPtrConstant(Index, dl));
891
892 --I;
893 SDValue To[] = {Res, NarrowBCast.getValue(1)};
894 CurDAG->ReplaceAllUsesWith(N, To);
895 ++I;
896 MadeChange = true;
897 continue;
898 }
899
900 break;
901 }
902 case ISD::VSELECT: {
903 // Replace VSELECT with non-mask conditions with with BLENDV.
904 if (N->getOperand(0).getValueType().getVectorElementType() == MVT::i1)
905 break;
906
907 assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!");
908 SDValue Blendv =
909 CurDAG->getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0),
910 N->getOperand(0), N->getOperand(1), N->getOperand(2));
911 --I;
912 CurDAG->ReplaceAllUsesWith(N, Blendv.getNode());
913 ++I;
914 MadeChange = true;
915 continue;
916 }
917 case ISD::FP_ROUND:
918 case ISD::STRICT_FP_ROUND:
919 case ISD::FP_TO_SINT:
920 case ISD::FP_TO_UINT:
921 case ISD::STRICT_FP_TO_SINT:
922 case ISD::STRICT_FP_TO_UINT: {
923 // Replace vector fp_to_s/uint with their X86 specific equivalent so we
924 // don't need 2 sets of patterns.
925 if (!N->getSimpleValueType(0).isVector())
926 break;
927
928 unsigned NewOpc;
929 switch (N->getOpcode()) {
930 default: llvm_unreachable("Unexpected opcode!");
931 case ISD::FP_ROUND: NewOpc = X86ISD::VFPROUND; break;
932 case ISD::STRICT_FP_ROUND: NewOpc = X86ISD::STRICT_VFPROUND; break;
933 case ISD::STRICT_FP_TO_SINT: NewOpc = X86ISD::STRICT_CVTTP2SI; break;
934 case ISD::FP_TO_SINT: NewOpc = X86ISD::CVTTP2SI; break;
935 case ISD::STRICT_FP_TO_UINT: NewOpc = X86ISD::STRICT_CVTTP2UI; break;
936 case ISD::FP_TO_UINT: NewOpc = X86ISD::CVTTP2UI; break;
937 }
938 SDValue Res;
939 if (N->isStrictFPOpcode())
940 Res =
941 CurDAG->getNode(NewOpc, SDLoc(N), {N->getValueType(0), MVT::Other},
942 {N->getOperand(0), N->getOperand(1)});
943 else
944 Res =
945 CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
946 N->getOperand(0));
947 --I;
948 CurDAG->ReplaceAllUsesWith(N, Res.getNode());
949 ++I;
950 MadeChange = true;
951 continue;
952 }
953 case ISD::SHL:
954 case ISD::SRA:
955 case ISD::SRL: {
956 // Replace vector shifts with their X86 specific equivalent so we don't
957 // need 2 sets of patterns.
958 if (!N->getValueType(0).isVector())
959 break;
960
961 unsigned NewOpc;
962 switch (N->getOpcode()) {
963 default: llvm_unreachable("Unexpected opcode!");
964 case ISD::SHL: NewOpc = X86ISD::VSHLV; break;
965 case ISD::SRA: NewOpc = X86ISD::VSRAV; break;
966 case ISD::SRL: NewOpc = X86ISD::VSRLV; break;
967 }
968 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
969 N->getOperand(0), N->getOperand(1));
970 --I;
971 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
972 ++I;
973 MadeChange = true;
974 continue;
975 }
976 case ISD::ANY_EXTEND:
977 case ISD::ANY_EXTEND_VECTOR_INREG: {
978 // Replace vector any extend with the zero extend equivalents so we don't
979 // need 2 sets of patterns. Ignore vXi1 extensions.
980 if (!N->getValueType(0).isVector())
981 break;
982
983 unsigned NewOpc;
984 if (N->getOperand(0).getScalarValueSizeInBits() == 1) {
985 assert(N->getOpcode() == ISD::ANY_EXTEND &&
986 "Unexpected opcode for mask vector!");
987 NewOpc = ISD::SIGN_EXTEND;
988 } else {
989 NewOpc = N->getOpcode() == ISD::ANY_EXTEND
990 ? ISD::ZERO_EXTEND
991 : ISD::ZERO_EXTEND_VECTOR_INREG;
992 }
993
994 SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
995 N->getOperand(0));
996 --I;
997 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
998 ++I;
999 MadeChange = true;
1000 continue;
1001 }
1002 case ISD::FCEIL:
1003 case ISD::STRICT_FCEIL:
1004 case ISD::FFLOOR:
1005 case ISD::STRICT_FFLOOR:
1006 case ISD::FTRUNC:
1007 case ISD::STRICT_FTRUNC:
1008 case ISD::FNEARBYINT:
1009 case ISD::STRICT_FNEARBYINT:
1010 case ISD::FRINT:
1011 case ISD::STRICT_FRINT: {
1012 // Replace fp rounding with their X86 specific equivalent so we don't
1013 // need 2 sets of patterns.
1014 unsigned Imm;
1015 switch (N->getOpcode()) {
1016 default: llvm_unreachable("Unexpected opcode!");
1017 case ISD::STRICT_FCEIL:
1018 case ISD::FCEIL: Imm = 0xA; break;
1019 case ISD::STRICT_FFLOOR:
1020 case ISD::FFLOOR: Imm = 0x9; break;
1021 case ISD::STRICT_FTRUNC:
1022 case ISD::FTRUNC: Imm = 0xB; break;
1023 case ISD::STRICT_FNEARBYINT:
1024 case ISD::FNEARBYINT: Imm = 0xC; break;
1025 case ISD::STRICT_FRINT:
1026 case ISD::FRINT: Imm = 0x4; break;
1027 }
1028 SDLoc dl(N);
1029 bool IsStrict = N->isStrictFPOpcode();
1030 SDValue Res;
1031 if (IsStrict)
1032 Res = CurDAG->getNode(X86ISD::STRICT_VRNDSCALE, dl,
1033 {N->getValueType(0), MVT::Other},
1034 {N->getOperand(0), N->getOperand(1),
1035 CurDAG->getTargetConstant(Imm, dl, MVT::i8)});
1036 else
1037 Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl, N->getValueType(0),
1038 N->getOperand(0),
1039 CurDAG->getTargetConstant(Imm, dl, MVT::i8));
1040 --I;
1041 CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1042 ++I;
1043 MadeChange = true;
1044 continue;
1045 }
1046 case X86ISD::FANDN:
1047 case X86ISD::FAND:
1048 case X86ISD::FOR:
1049 case X86ISD::FXOR: {
1050 // Widen scalar fp logic ops to vector to reduce isel patterns.
1051 // FIXME: Can we do this during lowering/combine.
1052 MVT VT = N->getSimpleValueType(0);
1053 if (VT.isVector() || VT == MVT::f128)
1054 break;
1055
1056 MVT VecVT = VT == MVT::f64 ? MVT::v2f64 : MVT::v4f32;
1057 SDLoc dl(N);
1058 SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1059 N->getOperand(0));
1060 SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1061 N->getOperand(1));
1062
1063 SDValue Res;
1064 if (Subtarget->hasSSE2()) {
1065 EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger();
1066 Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0);
1067 Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1);
1068 unsigned Opc;
1069 switch (N->getOpcode()) {
1070 default: llvm_unreachable("Unexpected opcode!");
1071 case X86ISD::FANDN: Opc = X86ISD::ANDNP; break;
1072 case X86ISD::FAND: Opc = ISD::AND; break;
1073 case X86ISD::FOR: Opc = ISD::OR; break;
1074 case X86ISD::FXOR: Opc = ISD::XOR; break;
1075 }
1076 Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1);
1077 Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res);
1078 } else {
1079 Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1);
1080 }
1081 Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res,
1082 CurDAG->getIntPtrConstant(0, dl));
1083 --I;
1084 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1085 ++I;
1086 MadeChange = true;
1087 continue;
1088 }
1089 }
1090
1091 if (OptLevel != CodeGenOpt::None &&
1092 // Only do this when the target can fold the load into the call or
1093 // jmp.
1094 !Subtarget->useIndirectThunkCalls() &&
1095 ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
1096 (N->getOpcode() == X86ISD::TC_RETURN &&
1097 (Subtarget->is64Bit() ||
1098 !getTargetMachine().isPositionIndependent())))) {
1099 /// Also try moving call address load from outside callseq_start to just
1100 /// before the call to allow it to be folded.
1101 ///
1102 /// [Load chain]
1103 /// ^
1104 /// |
1105 /// [Load]
1106 /// ^ ^
1107 /// | |
1108 /// / \--
1109 /// / |
1110 ///[CALLSEQ_START] |
1111 /// ^ |
1112 /// | |
1113 /// [LOAD/C2Reg] |
1114 /// | |
1115 /// \ /
1116 /// \ /
1117 /// [CALL]
1118 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
1119 SDValue Chain = N->getOperand(0);
1120 SDValue Load = N->getOperand(1);
1121 if (!isCalleeLoad(Load, Chain, HasCallSeq))
1122 continue;
1123 moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
1124 ++NumLoadMoved;
1125 MadeChange = true;
1126 continue;
1127 }
1128
1129 // Lower fpround and fpextend nodes that target the FP stack to be store and
1130 // load to the stack. This is a gross hack. We would like to simply mark
1131 // these as being illegal, but when we do that, legalize produces these when
1132 // it expands calls, then expands these in the same legalize pass. We would
1133 // like dag combine to be able to hack on these between the call expansion
1134 // and the node legalization. As such this pass basically does "really
1135 // late" legalization of these inline with the X86 isel pass.
1136 // FIXME: This should only happen when not compiled with -O0.
1137 switch (N->getOpcode()) {
1138 default: continue;
1139 case ISD::FP_ROUND:
1140 case ISD::FP_EXTEND:
1141 {
1142 MVT SrcVT = N->getOperand(0).getSimpleValueType();
1143 MVT DstVT = N->getSimpleValueType(0);
1144
1145 // If any of the sources are vectors, no fp stack involved.
1146 if (SrcVT.isVector() || DstVT.isVector())
1147 continue;
1148
1149 // If the source and destination are SSE registers, then this is a legal
1150 // conversion that should not be lowered.
1151 const X86TargetLowering *X86Lowering =
1152 static_cast<const X86TargetLowering *>(TLI);
1153 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1154 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1155 if (SrcIsSSE && DstIsSSE)
1156 continue;
1157
1158 if (!SrcIsSSE && !DstIsSSE) {
1159 // If this is an FPStack extension, it is a noop.
1160 if (N->getOpcode() == ISD::FP_EXTEND)
1161 continue;
1162 // If this is a value-preserving FPStack truncation, it is a noop.
1163 if (N->getConstantOperandVal(1))
1164 continue;
1165 }
1166
1167 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1168 // FPStack has extload and truncstore. SSE can fold direct loads into other
1169 // operations. Based on this, decide what we want to do.
1170 MVT MemVT = (N->getOpcode() == ISD::FP_ROUND) ? DstVT : SrcVT;
1171 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1172 int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1173 MachinePointerInfo MPI =
1174 MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1175 SDLoc dl(N);
1176
1177 // FIXME: optimize the case where the src/dest is a load or store?
1178
1179 SDValue Store = CurDAG->getTruncStore(
1180 CurDAG->getEntryNode(), dl, N->getOperand(0), MemTmp, MPI, MemVT);
1181 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store,
1182 MemTmp, MPI, MemVT);
1183
1184 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1185 // extload we created. This will cause general havok on the dag because
1186 // anything below the conversion could be folded into other existing nodes.
1187 // To avoid invalidating 'I', back it up to the convert node.
1188 --I;
1189 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1190 break;
1191 }
1192
1193 //The sequence of events for lowering STRICT_FP versions of these nodes requires
1194 //dealing with the chain differently, as there is already a preexisting chain.
1195 case ISD::STRICT_FP_ROUND:
1196 case ISD::STRICT_FP_EXTEND:
1197 {
1198 MVT SrcVT = N->getOperand(1).getSimpleValueType();
1199 MVT DstVT = N->getSimpleValueType(0);
1200
1201 // If any of the sources are vectors, no fp stack involved.
1202 if (SrcVT.isVector() || DstVT.isVector())
1203 continue;
1204
1205 // If the source and destination are SSE registers, then this is a legal
1206 // conversion that should not be lowered.
1207 const X86TargetLowering *X86Lowering =
1208 static_cast<const X86TargetLowering *>(TLI);
1209 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1210 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1211 if (SrcIsSSE && DstIsSSE)
1212 continue;
1213
1214 if (!SrcIsSSE && !DstIsSSE) {
1215 // If this is an FPStack extension, it is a noop.
1216 if (N->getOpcode() == ISD::STRICT_FP_EXTEND)
1217 continue;
1218 // If this is a value-preserving FPStack truncation, it is a noop.
1219 if (N->getConstantOperandVal(2))
1220 continue;
1221 }
1222
1223 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1224 // FPStack has extload and truncstore. SSE can fold direct loads into other
1225 // operations. Based on this, decide what we want to do.
1226 MVT MemVT = (N->getOpcode() == ISD::STRICT_FP_ROUND) ? DstVT : SrcVT;
1227 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1228 int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1229 MachinePointerInfo MPI =
1230 MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1231 SDLoc dl(N);
1232
1233 // FIXME: optimize the case where the src/dest is a load or store?
1234
1235 //Since the operation is StrictFP, use the preexisting chain.
1236 SDValue Store, Result;
1237 if (!SrcIsSSE) {
1238 SDVTList VTs = CurDAG->getVTList(MVT::Other);
1239 SDValue Ops[] = {N->getOperand(0), N->getOperand(1), MemTmp};
1240 Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT,
1241 MPI, /*Align*/ None,
1242 MachineMemOperand::MOStore);
1243 if (N->getFlags().hasNoFPExcept()) {
1244 SDNodeFlags Flags = Store->getFlags();
1245 Flags.setNoFPExcept(true);
1246 Store->setFlags(Flags);
1247 }
1248 } else {
1249 assert(SrcVT == MemVT && "Unexpected VT!");
1250 Store = CurDAG->getStore(N->getOperand(0), dl, N->getOperand(1), MemTmp,
1251 MPI);
1252 }
1253
1254 if (!DstIsSSE) {
1255 SDVTList VTs = CurDAG->getVTList(DstVT, MVT::Other);
1256 SDValue Ops[] = {Store, MemTmp};
1257 Result = CurDAG->getMemIntrinsicNode(
1258 X86ISD::FLD, dl, VTs, Ops, MemVT, MPI,
1259 /*Align*/ None, MachineMemOperand::MOLoad);
1260 if (N->getFlags().hasNoFPExcept()) {
1261 SDNodeFlags Flags = Result->getFlags();
1262 Flags.setNoFPExcept(true);
1263 Result->setFlags(Flags);
1264 }
1265 } else {
1266 assert(DstVT == MemVT && "Unexpected VT!");
1267 Result = CurDAG->getLoad(DstVT, dl, Store, MemTmp, MPI);
1268 }
1269
1270 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1271 // extload we created. This will cause general havok on the dag because
1272 // anything below the conversion could be folded into other existing nodes.
1273 // To avoid invalidating 'I', back it up to the convert node.
1274 --I;
1275 CurDAG->ReplaceAllUsesWith(N, Result.getNode());
1276 break;
1277 }
1278 }
1279
1280
1281 // Now that we did that, the node is dead. Increment the iterator to the
1282 // next node to process, then delete N.
1283 ++I;
1284 MadeChange = true;
1285 }
1286
1287 // Remove any dead nodes that may have been left behind.
1288 if (MadeChange)
1289 CurDAG->RemoveDeadNodes();
1290 }
1291
1292 // Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
tryOptimizeRem8Extend(SDNode * N)1293 bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) {
1294 unsigned Opc = N->getMachineOpcode();
1295 if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 &&
1296 Opc != X86::MOVSX64rr8)
1297 return false;
1298
1299 SDValue N0 = N->getOperand(0);
1300
1301 // We need to be extracting the lower bit of an extend.
1302 if (!N0.isMachineOpcode() ||
1303 N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG ||
1304 N0.getConstantOperandVal(1) != X86::sub_8bit)
1305 return false;
1306
1307 // We're looking for either a movsx or movzx to match the original opcode.
1308 unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX
1309 : X86::MOVSX32rr8_NOREX;
1310 SDValue N00 = N0.getOperand(0);
1311 if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc)
1312 return false;
1313
1314 if (Opc == X86::MOVSX64rr8) {
1315 // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1316 // to 64.
1317 MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N),
1318 MVT::i64, N00);
1319 ReplaceUses(N, Extend);
1320 } else {
1321 // Ok we can drop this extend and just use the original extend.
1322 ReplaceUses(N, N00.getNode());
1323 }
1324
1325 return true;
1326 }
1327
PostprocessISelDAG()1328 void X86DAGToDAGISel::PostprocessISelDAG() {
1329 // Skip peepholes at -O0.
1330 if (TM.getOptLevel() == CodeGenOpt::None)
1331 return;
1332
1333 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
1334
1335 bool MadeChange = false;
1336 while (Position != CurDAG->allnodes_begin()) {
1337 SDNode *N = &*--Position;
1338 // Skip dead nodes and any non-machine opcodes.
1339 if (N->use_empty() || !N->isMachineOpcode())
1340 continue;
1341
1342 if (tryOptimizeRem8Extend(N)) {
1343 MadeChange = true;
1344 continue;
1345 }
1346
1347 // Look for a TESTrr+ANDrr pattern where both operands of the test are
1348 // the same. Rewrite to remove the AND.
1349 unsigned Opc = N->getMachineOpcode();
1350 if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
1351 Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
1352 N->getOperand(0) == N->getOperand(1) &&
1353 N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1354 N->getOperand(0).isMachineOpcode()) {
1355 SDValue And = N->getOperand(0);
1356 unsigned N0Opc = And.getMachineOpcode();
1357 if (N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
1358 N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) {
1359 MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
1360 MVT::i32,
1361 And.getOperand(0),
1362 And.getOperand(1));
1363 ReplaceUses(N, Test);
1364 MadeChange = true;
1365 continue;
1366 }
1367 if (N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
1368 N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) {
1369 unsigned NewOpc;
1370 switch (N0Opc) {
1371 case X86::AND8rm: NewOpc = X86::TEST8mr; break;
1372 case X86::AND16rm: NewOpc = X86::TEST16mr; break;
1373 case X86::AND32rm: NewOpc = X86::TEST32mr; break;
1374 case X86::AND64rm: NewOpc = X86::TEST64mr; break;
1375 }
1376
1377 // Need to swap the memory and register operand.
1378 SDValue Ops[] = { And.getOperand(1),
1379 And.getOperand(2),
1380 And.getOperand(3),
1381 And.getOperand(4),
1382 And.getOperand(5),
1383 And.getOperand(0),
1384 And.getOperand(6) /* Chain */ };
1385 MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1386 MVT::i32, MVT::Other, Ops);
1387 CurDAG->setNodeMemRefs(
1388 Test, cast<MachineSDNode>(And.getNode())->memoperands());
1389 ReplaceUses(N, Test);
1390 MadeChange = true;
1391 continue;
1392 }
1393 }
1394
1395 // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1396 // used. We're doing this late so we can prefer to fold the AND into masked
1397 // comparisons. Doing that can be better for the live range of the mask
1398 // register.
1399 if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
1400 Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
1401 N->getOperand(0) == N->getOperand(1) &&
1402 N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1403 N->getOperand(0).isMachineOpcode() &&
1404 onlyUsesZeroFlag(SDValue(N, 0))) {
1405 SDValue And = N->getOperand(0);
1406 unsigned N0Opc = And.getMachineOpcode();
1407 // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1408 // KAND instructions and KTEST use the same ISA feature.
1409 if (N0Opc == X86::KANDBrr ||
1410 (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
1411 N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
1412 unsigned NewOpc;
1413 switch (Opc) {
1414 default: llvm_unreachable("Unexpected opcode!");
1415 case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
1416 case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
1417 case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
1418 case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
1419 }
1420 MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1421 MVT::i32,
1422 And.getOperand(0),
1423 And.getOperand(1));
1424 ReplaceUses(N, KTest);
1425 MadeChange = true;
1426 continue;
1427 }
1428 }
1429
1430 // Attempt to remove vectors moves that were inserted to zero upper bits.
1431 if (Opc != TargetOpcode::SUBREG_TO_REG)
1432 continue;
1433
1434 unsigned SubRegIdx = N->getConstantOperandVal(2);
1435 if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
1436 continue;
1437
1438 SDValue Move = N->getOperand(1);
1439 if (!Move.isMachineOpcode())
1440 continue;
1441
1442 // Make sure its one of the move opcodes we recognize.
1443 switch (Move.getMachineOpcode()) {
1444 default:
1445 continue;
1446 case X86::VMOVAPDrr: case X86::VMOVUPDrr:
1447 case X86::VMOVAPSrr: case X86::VMOVUPSrr:
1448 case X86::VMOVDQArr: case X86::VMOVDQUrr:
1449 case X86::VMOVAPDYrr: case X86::VMOVUPDYrr:
1450 case X86::VMOVAPSYrr: case X86::VMOVUPSYrr:
1451 case X86::VMOVDQAYrr: case X86::VMOVDQUYrr:
1452 case X86::VMOVAPDZ128rr: case X86::VMOVUPDZ128rr:
1453 case X86::VMOVAPSZ128rr: case X86::VMOVUPSZ128rr:
1454 case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
1455 case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
1456 case X86::VMOVAPDZ256rr: case X86::VMOVUPDZ256rr:
1457 case X86::VMOVAPSZ256rr: case X86::VMOVUPSZ256rr:
1458 case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
1459 case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
1460 break;
1461 }
1462
1463 SDValue In = Move.getOperand(0);
1464 if (!In.isMachineOpcode() ||
1465 In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
1466 continue;
1467
1468 // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1469 // the SHA instructions which use a legacy encoding.
1470 uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags;
1471 if ((TSFlags & X86II::EncodingMask) != X86II::VEX &&
1472 (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
1473 (TSFlags & X86II::EncodingMask) != X86II::XOP)
1474 continue;
1475
1476 // Producing instruction is another vector instruction. We can drop the
1477 // move.
1478 CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
1479 MadeChange = true;
1480 }
1481
1482 if (MadeChange)
1483 CurDAG->RemoveDeadNodes();
1484 }
1485
1486
1487 /// Emit any code that needs to be executed only in the main function.
emitSpecialCodeForMain()1488 void X86DAGToDAGISel::emitSpecialCodeForMain() {
1489 if (Subtarget->isTargetCygMing()) {
1490 TargetLowering::ArgListTy Args;
1491
1492 TargetLowering::CallLoweringInfo CLI(*CurDAG);
1493 CLI.setChain(CurDAG->getRoot())
1494 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
1495 CurDAG->getExternalFunctionSymbol("__main"),
1496 std::move(Args));
1497 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
1498 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
1499 CurDAG->setRoot(Result.second);
1500 }
1501 }
1502
emitFunctionEntryCode()1503 void X86DAGToDAGISel::emitFunctionEntryCode() {
1504 // If this is main, emit special code for main.
1505 const Function &F = MF->getFunction();
1506 if (F.hasExternalLinkage() && F.getName() == "main")
1507 emitSpecialCodeForMain();
1508 }
1509
isDispSafeForFrameIndex(int64_t Val)1510 static bool isDispSafeForFrameIndex(int64_t Val) {
1511 // On 64-bit platforms, we can run into an issue where a frame index
1512 // includes a displacement that, when added to the explicit displacement,
1513 // will overflow the displacement field. Assuming that the frame index
1514 // displacement fits into a 31-bit integer (which is only slightly more
1515 // aggressive than the current fundamental assumption that it fits into
1516 // a 32-bit integer), a 31-bit disp should always be safe.
1517 return isInt<31>(Val);
1518 }
1519
foldOffsetIntoAddress(uint64_t Offset,X86ISelAddressMode & AM)1520 bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
1521 X86ISelAddressMode &AM) {
1522 // We may have already matched a displacement and the caller just added the
1523 // symbolic displacement. So we still need to do the checks even if Offset
1524 // is zero.
1525
1526 int64_t Val = AM.Disp + Offset;
1527
1528 // Cannot combine ExternalSymbol displacements with integer offsets.
1529 if (Val != 0 && (AM.ES || AM.MCSym))
1530 return true;
1531
1532 CodeModel::Model M = TM.getCodeModel();
1533 if (Subtarget->is64Bit()) {
1534 if (Val != 0 &&
1535 !X86::isOffsetSuitableForCodeModel(Val, M,
1536 AM.hasSymbolicDisplacement()))
1537 return true;
1538 // In addition to the checks required for a register base, check that
1539 // we do not try to use an unsafe Disp with a frame index.
1540 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
1541 !isDispSafeForFrameIndex(Val))
1542 return true;
1543 }
1544 AM.Disp = Val;
1545 return false;
1546
1547 }
1548
matchLoadInAddress(LoadSDNode * N,X86ISelAddressMode & AM)1549 bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
1550 SDValue Address = N->getOperand(1);
1551
1552 // load gs:0 -> GS segment register.
1553 // load fs:0 -> FS segment register.
1554 //
1555 // This optimization is valid because the GNU TLS model defines that
1556 // gs:0 (or fs:0 on X86-64) contains its own address.
1557 // For more information see http://people.redhat.com/drepper/tls.pdf
1558 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
1559 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
1560 !IndirectTlsSegRefs &&
1561 (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
1562 Subtarget->isTargetFuchsia()))
1563 switch (N->getPointerInfo().getAddrSpace()) {
1564 case X86AS::GS:
1565 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1566 return false;
1567 case X86AS::FS:
1568 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1569 return false;
1570 // Address space X86AS::SS is not handled here, because it is not used to
1571 // address TLS areas.
1572 }
1573
1574 return true;
1575 }
1576
1577 /// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1578 /// mode. These wrap things that will resolve down into a symbol reference.
1579 /// If no match is possible, this returns true, otherwise it returns false.
matchWrapper(SDValue N,X86ISelAddressMode & AM)1580 bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
1581 // If the addressing mode already has a symbol as the displacement, we can
1582 // never match another symbol.
1583 if (AM.hasSymbolicDisplacement())
1584 return true;
1585
1586 bool IsRIPRelTLS = false;
1587 bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
1588 if (IsRIPRel) {
1589 SDValue Val = N.getOperand(0);
1590 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
1591 IsRIPRelTLS = true;
1592 }
1593
1594 // We can't use an addressing mode in the 64-bit large code model.
1595 // Global TLS addressing is an exception. In the medium code model,
1596 // we use can use a mode when RIP wrappers are present.
1597 // That signifies access to globals that are known to be "near",
1598 // such as the GOT itself.
1599 CodeModel::Model M = TM.getCodeModel();
1600 if (Subtarget->is64Bit() &&
1601 ((M == CodeModel::Large && !IsRIPRelTLS) ||
1602 (M == CodeModel::Medium && !IsRIPRel)))
1603 return true;
1604
1605 // Base and index reg must be 0 in order to use %rip as base.
1606 if (IsRIPRel && AM.hasBaseOrIndexReg())
1607 return true;
1608
1609 // Make a local copy in case we can't do this fold.
1610 X86ISelAddressMode Backup = AM;
1611
1612 int64_t Offset = 0;
1613 SDValue N0 = N.getOperand(0);
1614 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1615 AM.GV = G->getGlobal();
1616 AM.SymbolFlags = G->getTargetFlags();
1617 Offset = G->getOffset();
1618 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1619 AM.CP = CP->getConstVal();
1620 AM.Alignment = CP->getAlign();
1621 AM.SymbolFlags = CP->getTargetFlags();
1622 Offset = CP->getOffset();
1623 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1624 AM.ES = S->getSymbol();
1625 AM.SymbolFlags = S->getTargetFlags();
1626 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1627 AM.MCSym = S->getMCSymbol();
1628 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
1629 AM.JT = J->getIndex();
1630 AM.SymbolFlags = J->getTargetFlags();
1631 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1632 AM.BlockAddr = BA->getBlockAddress();
1633 AM.SymbolFlags = BA->getTargetFlags();
1634 Offset = BA->getOffset();
1635 } else
1636 llvm_unreachable("Unhandled symbol reference node.");
1637
1638 if (foldOffsetIntoAddress(Offset, AM)) {
1639 AM = Backup;
1640 return true;
1641 }
1642
1643 if (IsRIPRel)
1644 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1645
1646 // Commit the changes now that we know this fold is safe.
1647 return false;
1648 }
1649
1650 /// Add the specified node to the specified addressing mode, returning true if
1651 /// it cannot be done. This just pattern matches for the addressing mode.
matchAddress(SDValue N,X86ISelAddressMode & AM)1652 bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1653 if (matchAddressRecursively(N, AM, 0))
1654 return true;
1655
1656 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1657 // a smaller encoding and avoids a scaled-index.
1658 if (AM.Scale == 2 &&
1659 AM.BaseType == X86ISelAddressMode::RegBase &&
1660 AM.Base_Reg.getNode() == nullptr) {
1661 AM.Base_Reg = AM.IndexReg;
1662 AM.Scale = 1;
1663 }
1664
1665 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1666 // because it has a smaller encoding.
1667 // TODO: Which other code models can use this?
1668 switch (TM.getCodeModel()) {
1669 default: break;
1670 case CodeModel::Small:
1671 case CodeModel::Kernel:
1672 if (Subtarget->is64Bit() &&
1673 AM.Scale == 1 &&
1674 AM.BaseType == X86ISelAddressMode::RegBase &&
1675 AM.Base_Reg.getNode() == nullptr &&
1676 AM.IndexReg.getNode() == nullptr &&
1677 AM.SymbolFlags == X86II::MO_NO_FLAG &&
1678 AM.hasSymbolicDisplacement())
1679 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1680 break;
1681 }
1682
1683 return false;
1684 }
1685
matchAdd(SDValue & N,X86ISelAddressMode & AM,unsigned Depth)1686 bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
1687 unsigned Depth) {
1688 // Add an artificial use to this node so that we can keep track of
1689 // it if it gets CSE'd with a different node.
1690 HandleSDNode Handle(N);
1691
1692 X86ISelAddressMode Backup = AM;
1693 if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1694 !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1695 return false;
1696 AM = Backup;
1697
1698 // Try again after commutating the operands.
1699 if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM,
1700 Depth + 1) &&
1701 !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1))
1702 return false;
1703 AM = Backup;
1704
1705 // If we couldn't fold both operands into the address at the same time,
1706 // see if we can just put each operand into a register and fold at least
1707 // the add.
1708 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1709 !AM.Base_Reg.getNode() &&
1710 !AM.IndexReg.getNode()) {
1711 N = Handle.getValue();
1712 AM.Base_Reg = N.getOperand(0);
1713 AM.IndexReg = N.getOperand(1);
1714 AM.Scale = 1;
1715 return false;
1716 }
1717 N = Handle.getValue();
1718 return true;
1719 }
1720
1721 // Insert a node into the DAG at least before the Pos node's position. This
1722 // will reposition the node as needed, and will assign it a node ID that is <=
1723 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1724 // IDs! The selection DAG must no longer depend on their uniqueness when this
1725 // is used.
insertDAGNode(SelectionDAG & DAG,SDValue Pos,SDValue N)1726 static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1727 if (N->getNodeId() == -1 ||
1728 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
1729 SelectionDAGISel::getUninvalidatedNodeId(Pos.getNode()))) {
1730 DAG.RepositionNode(Pos->getIterator(), N.getNode());
1731 // Mark Node as invalid for pruning as after this it may be a successor to a
1732 // selected node but otherwise be in the same position of Pos.
1733 // Conservatively mark it with the same -abs(Id) to assure node id
1734 // invariant is preserved.
1735 N->setNodeId(Pos->getNodeId());
1736 SelectionDAGISel::InvalidateNodeId(N.getNode());
1737 }
1738 }
1739
1740 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1741 // safe. This allows us to convert the shift and and into an h-register
1742 // extract and a scaled index. Returns false if the simplification is
1743 // performed.
foldMaskAndShiftToExtract(SelectionDAG & DAG,SDValue N,uint64_t Mask,SDValue Shift,SDValue X,X86ISelAddressMode & AM)1744 static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
1745 uint64_t Mask,
1746 SDValue Shift, SDValue X,
1747 X86ISelAddressMode &AM) {
1748 if (Shift.getOpcode() != ISD::SRL ||
1749 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1750 !Shift.hasOneUse())
1751 return true;
1752
1753 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1754 if (ScaleLog <= 0 || ScaleLog >= 4 ||
1755 Mask != (0xffu << ScaleLog))
1756 return true;
1757
1758 MVT VT = N.getSimpleValueType();
1759 SDLoc DL(N);
1760 SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
1761 SDValue NewMask = DAG.getConstant(0xff, DL, VT);
1762 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
1763 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
1764 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
1765 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
1766
1767 // Insert the new nodes into the topological ordering. We must do this in
1768 // a valid topological ordering as nothing is going to go back and re-sort
1769 // these nodes. We continually insert before 'N' in sequence as this is
1770 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1771 // hierarchy left to express.
1772 insertDAGNode(DAG, N, Eight);
1773 insertDAGNode(DAG, N, Srl);
1774 insertDAGNode(DAG, N, NewMask);
1775 insertDAGNode(DAG, N, And);
1776 insertDAGNode(DAG, N, ShlCount);
1777 insertDAGNode(DAG, N, Shl);
1778 DAG.ReplaceAllUsesWith(N, Shl);
1779 DAG.RemoveDeadNode(N.getNode());
1780 AM.IndexReg = And;
1781 AM.Scale = (1 << ScaleLog);
1782 return false;
1783 }
1784
1785 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
1786 // allows us to fold the shift into this addressing mode. Returns false if the
1787 // transform succeeded.
foldMaskedShiftToScaledMask(SelectionDAG & DAG,SDValue N,X86ISelAddressMode & AM)1788 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
1789 X86ISelAddressMode &AM) {
1790 SDValue Shift = N.getOperand(0);
1791
1792 // Use a signed mask so that shifting right will insert sign bits. These
1793 // bits will be removed when we shift the result left so it doesn't matter
1794 // what we use. This might allow a smaller immediate encoding.
1795 int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
1796
1797 // If we have an any_extend feeding the AND, look through it to see if there
1798 // is a shift behind it. But only if the AND doesn't use the extended bits.
1799 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
1800 bool FoundAnyExtend = false;
1801 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
1802 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
1803 isUInt<32>(Mask)) {
1804 FoundAnyExtend = true;
1805 Shift = Shift.getOperand(0);
1806 }
1807
1808 if (Shift.getOpcode() != ISD::SHL ||
1809 !isa<ConstantSDNode>(Shift.getOperand(1)))
1810 return true;
1811
1812 SDValue X = Shift.getOperand(0);
1813
1814 // Not likely to be profitable if either the AND or SHIFT node has more
1815 // than one use (unless all uses are for address computation). Besides,
1816 // isel mechanism requires their node ids to be reused.
1817 if (!N.hasOneUse() || !Shift.hasOneUse())
1818 return true;
1819
1820 // Verify that the shift amount is something we can fold.
1821 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1822 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
1823 return true;
1824
1825 MVT VT = N.getSimpleValueType();
1826 SDLoc DL(N);
1827 if (FoundAnyExtend) {
1828 SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X);
1829 insertDAGNode(DAG, N, NewX);
1830 X = NewX;
1831 }
1832
1833 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
1834 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
1835 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
1836
1837 // Insert the new nodes into the topological ordering. We must do this in
1838 // a valid topological ordering as nothing is going to go back and re-sort
1839 // these nodes. We continually insert before 'N' in sequence as this is
1840 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1841 // hierarchy left to express.
1842 insertDAGNode(DAG, N, NewMask);
1843 insertDAGNode(DAG, N, NewAnd);
1844 insertDAGNode(DAG, N, NewShift);
1845 DAG.ReplaceAllUsesWith(N, NewShift);
1846 DAG.RemoveDeadNode(N.getNode());
1847
1848 AM.Scale = 1 << ShiftAmt;
1849 AM.IndexReg = NewAnd;
1850 return false;
1851 }
1852
1853 // Implement some heroics to detect shifts of masked values where the mask can
1854 // be replaced by extending the shift and undoing that in the addressing mode
1855 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
1856 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
1857 // the addressing mode. This results in code such as:
1858 //
1859 // int f(short *y, int *lookup_table) {
1860 // ...
1861 // return *y + lookup_table[*y >> 11];
1862 // }
1863 //
1864 // Turning into:
1865 // movzwl (%rdi), %eax
1866 // movl %eax, %ecx
1867 // shrl $11, %ecx
1868 // addl (%rsi,%rcx,4), %eax
1869 //
1870 // Instead of:
1871 // movzwl (%rdi), %eax
1872 // movl %eax, %ecx
1873 // shrl $9, %ecx
1874 // andl $124, %rcx
1875 // addl (%rsi,%rcx), %eax
1876 //
1877 // Note that this function assumes the mask is provided as a mask *after* the
1878 // value is shifted. The input chain may or may not match that, but computing
1879 // such a mask is trivial.
foldMaskAndShiftToScale(SelectionDAG & DAG,SDValue N,uint64_t Mask,SDValue Shift,SDValue X,X86ISelAddressMode & AM)1880 static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
1881 uint64_t Mask,
1882 SDValue Shift, SDValue X,
1883 X86ISelAddressMode &AM) {
1884 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1885 !isa<ConstantSDNode>(Shift.getOperand(1)))
1886 return true;
1887
1888 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1889 unsigned MaskLZ = countLeadingZeros(Mask);
1890 unsigned MaskTZ = countTrailingZeros(Mask);
1891
1892 // The amount of shift we're trying to fit into the addressing mode is taken
1893 // from the trailing zeros of the mask.
1894 unsigned AMShiftAmt = MaskTZ;
1895
1896 // There is nothing we can do here unless the mask is removing some bits.
1897 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1898 if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
1899
1900 // We also need to ensure that mask is a continuous run of bits.
1901 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1902
1903 // Scale the leading zero count down based on the actual size of the value.
1904 // Also scale it down based on the size of the shift.
1905 unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1906 if (MaskLZ < ScaleDown)
1907 return true;
1908 MaskLZ -= ScaleDown;
1909
1910 // The final check is to ensure that any masked out high bits of X are
1911 // already known to be zero. Otherwise, the mask has a semantic impact
1912 // other than masking out a couple of low bits. Unfortunately, because of
1913 // the mask, zero extensions will be removed from operands in some cases.
1914 // This code works extra hard to look through extensions because we can
1915 // replace them with zero extensions cheaply if necessary.
1916 bool ReplacingAnyExtend = false;
1917 if (X.getOpcode() == ISD::ANY_EXTEND) {
1918 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
1919 X.getOperand(0).getSimpleValueType().getSizeInBits();
1920 // Assume that we'll replace the any-extend with a zero-extend, and
1921 // narrow the search to the extended value.
1922 X = X.getOperand(0);
1923 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
1924 ReplacingAnyExtend = true;
1925 }
1926 APInt MaskedHighBits =
1927 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
1928 KnownBits Known = DAG.computeKnownBits(X);
1929 if (MaskedHighBits != Known.Zero) return true;
1930
1931 // We've identified a pattern that can be transformed into a single shift
1932 // and an addressing mode. Make it so.
1933 MVT VT = N.getSimpleValueType();
1934 if (ReplacingAnyExtend) {
1935 assert(X.getValueType() != VT);
1936 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1937 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
1938 insertDAGNode(DAG, N, NewX);
1939 X = NewX;
1940 }
1941 SDLoc DL(N);
1942 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1943 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1944 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1945 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
1946
1947 // Insert the new nodes into the topological ordering. We must do this in
1948 // a valid topological ordering as nothing is going to go back and re-sort
1949 // these nodes. We continually insert before 'N' in sequence as this is
1950 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1951 // hierarchy left to express.
1952 insertDAGNode(DAG, N, NewSRLAmt);
1953 insertDAGNode(DAG, N, NewSRL);
1954 insertDAGNode(DAG, N, NewSHLAmt);
1955 insertDAGNode(DAG, N, NewSHL);
1956 DAG.ReplaceAllUsesWith(N, NewSHL);
1957 DAG.RemoveDeadNode(N.getNode());
1958
1959 AM.Scale = 1 << AMShiftAmt;
1960 AM.IndexReg = NewSRL;
1961 return false;
1962 }
1963
1964 // Transform "(X >> SHIFT) & (MASK << C1)" to
1965 // "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
1966 // matched to a BEXTR later. Returns false if the simplification is performed.
foldMaskedShiftToBEXTR(SelectionDAG & DAG,SDValue N,uint64_t Mask,SDValue Shift,SDValue X,X86ISelAddressMode & AM,const X86Subtarget & Subtarget)1967 static bool foldMaskedShiftToBEXTR(SelectionDAG &DAG, SDValue N,
1968 uint64_t Mask,
1969 SDValue Shift, SDValue X,
1970 X86ISelAddressMode &AM,
1971 const X86Subtarget &Subtarget) {
1972 if (Shift.getOpcode() != ISD::SRL ||
1973 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1974 !Shift.hasOneUse() || !N.hasOneUse())
1975 return true;
1976
1977 // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
1978 if (!Subtarget.hasTBM() &&
1979 !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR()))
1980 return true;
1981
1982 // We need to ensure that mask is a continuous run of bits.
1983 if (!isShiftedMask_64(Mask)) return true;
1984
1985 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1986
1987 // The amount of shift we're trying to fit into the addressing mode is taken
1988 // from the trailing zeros of the mask.
1989 unsigned AMShiftAmt = countTrailingZeros(Mask);
1990
1991 // There is nothing we can do here unless the mask is removing some bits.
1992 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1993 if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
1994
1995 MVT VT = N.getSimpleValueType();
1996 SDLoc DL(N);
1997 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1998 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1999 SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, VT);
2000 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, NewSRL, NewMask);
2001 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2002 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewAnd, NewSHLAmt);
2003
2004 // Insert the new nodes into the topological ordering. We must do this in
2005 // a valid topological ordering as nothing is going to go back and re-sort
2006 // these nodes. We continually insert before 'N' in sequence as this is
2007 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2008 // hierarchy left to express.
2009 insertDAGNode(DAG, N, NewSRLAmt);
2010 insertDAGNode(DAG, N, NewSRL);
2011 insertDAGNode(DAG, N, NewMask);
2012 insertDAGNode(DAG, N, NewAnd);
2013 insertDAGNode(DAG, N, NewSHLAmt);
2014 insertDAGNode(DAG, N, NewSHL);
2015 DAG.ReplaceAllUsesWith(N, NewSHL);
2016 DAG.RemoveDeadNode(N.getNode());
2017
2018 AM.Scale = 1 << AMShiftAmt;
2019 AM.IndexReg = NewAnd;
2020 return false;
2021 }
2022
matchAddressRecursively(SDValue N,X86ISelAddressMode & AM,unsigned Depth)2023 bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
2024 unsigned Depth) {
2025 SDLoc dl(N);
2026 LLVM_DEBUG({
2027 dbgs() << "MatchAddress: ";
2028 AM.dump(CurDAG);
2029 });
2030 // Limit recursion.
2031 if (Depth > 5)
2032 return matchAddressBase(N, AM);
2033
2034 // If this is already a %rip relative address, we can only merge immediates
2035 // into it. Instead of handling this in every case, we handle it here.
2036 // RIP relative addressing: %rip + 32-bit displacement!
2037 if (AM.isRIPRelative()) {
2038 // FIXME: JumpTable and ExternalSymbol address currently don't like
2039 // displacements. It isn't very important, but this should be fixed for
2040 // consistency.
2041 if (!(AM.ES || AM.MCSym) && AM.JT != -1)
2042 return true;
2043
2044 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
2045 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
2046 return false;
2047 return true;
2048 }
2049
2050 switch (N.getOpcode()) {
2051 default: break;
2052 case ISD::LOCAL_RECOVER: {
2053 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
2054 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
2055 // Use the symbol and don't prefix it.
2056 AM.MCSym = ESNode->getMCSymbol();
2057 return false;
2058 }
2059 break;
2060 }
2061 case ISD::Constant: {
2062 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2063 if (!foldOffsetIntoAddress(Val, AM))
2064 return false;
2065 break;
2066 }
2067
2068 case X86ISD::Wrapper:
2069 case X86ISD::WrapperRIP:
2070 if (!matchWrapper(N, AM))
2071 return false;
2072 break;
2073
2074 case ISD::LOAD:
2075 if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
2076 return false;
2077 break;
2078
2079 case ISD::FrameIndex:
2080 if (AM.BaseType == X86ISelAddressMode::RegBase &&
2081 AM.Base_Reg.getNode() == nullptr &&
2082 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
2083 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
2084 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
2085 return false;
2086 }
2087 break;
2088
2089 case ISD::SHL:
2090 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2091 break;
2092
2093 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2094 unsigned Val = CN->getZExtValue();
2095 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
2096 // that the base operand remains free for further matching. If
2097 // the base doesn't end up getting used, a post-processing step
2098 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
2099 if (Val == 1 || Val == 2 || Val == 3) {
2100 AM.Scale = 1 << Val;
2101 SDValue ShVal = N.getOperand(0);
2102
2103 // Okay, we know that we have a scale by now. However, if the scaled
2104 // value is an add of something and a constant, we can fold the
2105 // constant into the disp field here.
2106 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
2107 AM.IndexReg = ShVal.getOperand(0);
2108 ConstantSDNode *AddVal = cast<ConstantSDNode>(ShVal.getOperand(1));
2109 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
2110 if (!foldOffsetIntoAddress(Disp, AM))
2111 return false;
2112 }
2113
2114 AM.IndexReg = ShVal;
2115 return false;
2116 }
2117 }
2118 break;
2119
2120 case ISD::SRL: {
2121 // Scale must not be used already.
2122 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2123
2124 // We only handle up to 64-bit values here as those are what matter for
2125 // addressing mode optimizations.
2126 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2127 "Unexpected value size!");
2128
2129 SDValue And = N.getOperand(0);
2130 if (And.getOpcode() != ISD::AND) break;
2131 SDValue X = And.getOperand(0);
2132
2133 // The mask used for the transform is expected to be post-shift, but we
2134 // found the shift first so just apply the shift to the mask before passing
2135 // it down.
2136 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
2137 !isa<ConstantSDNode>(And.getOperand(1)))
2138 break;
2139 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
2140
2141 // Try to fold the mask and shift into the scale, and return false if we
2142 // succeed.
2143 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
2144 return false;
2145 break;
2146 }
2147
2148 case ISD::SMUL_LOHI:
2149 case ISD::UMUL_LOHI:
2150 // A mul_lohi where we need the low part can be folded as a plain multiply.
2151 if (N.getResNo() != 0) break;
2152 LLVM_FALLTHROUGH;
2153 case ISD::MUL:
2154 case X86ISD::MUL_IMM:
2155 // X*[3,5,9] -> X+X*[2,4,8]
2156 if (AM.BaseType == X86ISelAddressMode::RegBase &&
2157 AM.Base_Reg.getNode() == nullptr &&
2158 AM.IndexReg.getNode() == nullptr) {
2159 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
2160 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
2161 CN->getZExtValue() == 9) {
2162 AM.Scale = unsigned(CN->getZExtValue())-1;
2163
2164 SDValue MulVal = N.getOperand(0);
2165 SDValue Reg;
2166
2167 // Okay, we know that we have a scale by now. However, if the scaled
2168 // value is an add of something and a constant, we can fold the
2169 // constant into the disp field here.
2170 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
2171 isa<ConstantSDNode>(MulVal.getOperand(1))) {
2172 Reg = MulVal.getOperand(0);
2173 ConstantSDNode *AddVal =
2174 cast<ConstantSDNode>(MulVal.getOperand(1));
2175 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
2176 if (foldOffsetIntoAddress(Disp, AM))
2177 Reg = N.getOperand(0);
2178 } else {
2179 Reg = N.getOperand(0);
2180 }
2181
2182 AM.IndexReg = AM.Base_Reg = Reg;
2183 return false;
2184 }
2185 }
2186 break;
2187
2188 case ISD::SUB: {
2189 // Given A-B, if A can be completely folded into the address and
2190 // the index field with the index field unused, use -B as the index.
2191 // This is a win if a has multiple parts that can be folded into
2192 // the address. Also, this saves a mov if the base register has
2193 // other uses, since it avoids a two-address sub instruction, however
2194 // it costs an additional mov if the index register has other uses.
2195
2196 // Add an artificial use to this node so that we can keep track of
2197 // it if it gets CSE'd with a different node.
2198 HandleSDNode Handle(N);
2199
2200 // Test if the LHS of the sub can be folded.
2201 X86ISelAddressMode Backup = AM;
2202 if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
2203 N = Handle.getValue();
2204 AM = Backup;
2205 break;
2206 }
2207 N = Handle.getValue();
2208 // Test if the index field is free for use.
2209 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
2210 AM = Backup;
2211 break;
2212 }
2213
2214 int Cost = 0;
2215 SDValue RHS = N.getOperand(1);
2216 // If the RHS involves a register with multiple uses, this
2217 // transformation incurs an extra mov, due to the neg instruction
2218 // clobbering its operand.
2219 if (!RHS.getNode()->hasOneUse() ||
2220 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
2221 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
2222 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
2223 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
2224 RHS.getOperand(0).getValueType() == MVT::i32))
2225 ++Cost;
2226 // If the base is a register with multiple uses, this
2227 // transformation may save a mov.
2228 if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
2229 !AM.Base_Reg.getNode()->hasOneUse()) ||
2230 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2231 --Cost;
2232 // If the folded LHS was interesting, this transformation saves
2233 // address arithmetic.
2234 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
2235 ((AM.Disp != 0) && (Backup.Disp == 0)) +
2236 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
2237 --Cost;
2238 // If it doesn't look like it may be an overall win, don't do it.
2239 if (Cost >= 0) {
2240 AM = Backup;
2241 break;
2242 }
2243
2244 // Ok, the transformation is legal and appears profitable. Go for it.
2245 // Negation will be emitted later to avoid creating dangling nodes if this
2246 // was an unprofitable LEA.
2247 AM.IndexReg = RHS;
2248 AM.NegateIndex = true;
2249 AM.Scale = 1;
2250 return false;
2251 }
2252
2253 case ISD::ADD:
2254 if (!matchAdd(N, AM, Depth))
2255 return false;
2256 break;
2257
2258 case ISD::OR:
2259 // We want to look through a transform in InstCombine and DAGCombiner that
2260 // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
2261 // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
2262 // An 'lea' can then be used to match the shift (multiply) and add:
2263 // and $1, %esi
2264 // lea (%rsi, %rdi, 8), %rax
2265 if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
2266 !matchAdd(N, AM, Depth))
2267 return false;
2268 break;
2269
2270 case ISD::AND: {
2271 // Perform some heroic transforms on an and of a constant-count shift
2272 // with a constant to enable use of the scaled offset field.
2273
2274 // Scale must not be used already.
2275 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2276
2277 // We only handle up to 64-bit values here as those are what matter for
2278 // addressing mode optimizations.
2279 assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2280 "Unexpected value size!");
2281
2282 if (!isa<ConstantSDNode>(N.getOperand(1)))
2283 break;
2284
2285 if (N.getOperand(0).getOpcode() == ISD::SRL) {
2286 SDValue Shift = N.getOperand(0);
2287 SDValue X = Shift.getOperand(0);
2288
2289 uint64_t Mask = N.getConstantOperandVal(1);
2290
2291 // Try to fold the mask and shift into an extract and scale.
2292 if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
2293 return false;
2294
2295 // Try to fold the mask and shift directly into the scale.
2296 if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
2297 return false;
2298
2299 // Try to fold the mask and shift into BEXTR and scale.
2300 if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget))
2301 return false;
2302 }
2303
2304 // Try to swap the mask and shift to place shifts which can be done as
2305 // a scale on the outside of the mask.
2306 if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM))
2307 return false;
2308
2309 break;
2310 }
2311 case ISD::ZERO_EXTEND: {
2312 // Try to widen a zexted shift left to the same size as its use, so we can
2313 // match the shift as a scale factor.
2314 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2315 break;
2316 if (N.getOperand(0).getOpcode() != ISD::SHL || !N.getOperand(0).hasOneUse())
2317 break;
2318
2319 // Give up if the shift is not a valid scale factor [1,2,3].
2320 SDValue Shl = N.getOperand(0);
2321 auto *ShAmtC = dyn_cast<ConstantSDNode>(Shl.getOperand(1));
2322 if (!ShAmtC || ShAmtC->getZExtValue() > 3)
2323 break;
2324
2325 // The narrow shift must only shift out zero bits (it must be 'nuw').
2326 // That makes it safe to widen to the destination type.
2327 APInt HighZeros = APInt::getHighBitsSet(Shl.getValueSizeInBits(),
2328 ShAmtC->getZExtValue());
2329 if (!CurDAG->MaskedValueIsZero(Shl.getOperand(0), HighZeros))
2330 break;
2331
2332 // zext (shl nuw i8 %x, C) to i32 --> shl (zext i8 %x to i32), (zext C)
2333 MVT VT = N.getSimpleValueType();
2334 SDLoc DL(N);
2335 SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Shl.getOperand(0));
2336 SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, Shl.getOperand(1));
2337
2338 // Convert the shift to scale factor.
2339 AM.Scale = 1 << ShAmtC->getZExtValue();
2340 AM.IndexReg = Zext;
2341
2342 insertDAGNode(*CurDAG, N, Zext);
2343 insertDAGNode(*CurDAG, N, NewShl);
2344 CurDAG->ReplaceAllUsesWith(N, NewShl);
2345 CurDAG->RemoveDeadNode(N.getNode());
2346 return false;
2347 }
2348 }
2349
2350 return matchAddressBase(N, AM);
2351 }
2352
2353 /// Helper for MatchAddress. Add the specified node to the
2354 /// specified addressing mode without any further recursion.
matchAddressBase(SDValue N,X86ISelAddressMode & AM)2355 bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
2356 // Is the base register already occupied?
2357 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
2358 // If so, check to see if the scale index register is set.
2359 if (!AM.IndexReg.getNode()) {
2360 AM.IndexReg = N;
2361 AM.Scale = 1;
2362 return false;
2363 }
2364
2365 // Otherwise, we cannot select it.
2366 return true;
2367 }
2368
2369 // Default, generate it as a register.
2370 AM.BaseType = X86ISelAddressMode::RegBase;
2371 AM.Base_Reg = N;
2372 return false;
2373 }
2374
2375 /// Helper for selectVectorAddr. Handles things that can be folded into a
2376 /// gather scatter address. The index register and scale should have already
2377 /// been handled.
matchVectorAddress(SDValue N,X86ISelAddressMode & AM)2378 bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
2379 // TODO: Support other operations.
2380 switch (N.getOpcode()) {
2381 case ISD::Constant: {
2382 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2383 if (!foldOffsetIntoAddress(Val, AM))
2384 return false;
2385 break;
2386 }
2387 case X86ISD::Wrapper:
2388 if (!matchWrapper(N, AM))
2389 return false;
2390 break;
2391 }
2392
2393 return matchAddressBase(N, AM);
2394 }
2395
selectVectorAddr(MemSDNode * Parent,SDValue BasePtr,SDValue IndexOp,SDValue ScaleOp,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)2396 bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
2397 SDValue IndexOp, SDValue ScaleOp,
2398 SDValue &Base, SDValue &Scale,
2399 SDValue &Index, SDValue &Disp,
2400 SDValue &Segment) {
2401 X86ISelAddressMode AM;
2402 AM.IndexReg = IndexOp;
2403 AM.Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
2404
2405 unsigned AddrSpace = Parent->getPointerInfo().getAddrSpace();
2406 if (AddrSpace == X86AS::GS)
2407 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2408 if (AddrSpace == X86AS::FS)
2409 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2410 if (AddrSpace == X86AS::SS)
2411 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2412
2413 SDLoc DL(BasePtr);
2414 MVT VT = BasePtr.getSimpleValueType();
2415
2416 // Try to match into the base and displacement fields.
2417 if (matchVectorAddress(BasePtr, AM))
2418 return false;
2419
2420 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2421 return true;
2422 }
2423
2424 /// Returns true if it is able to pattern match an addressing mode.
2425 /// It returns the operands which make up the maximal addressing mode it can
2426 /// match by reference.
2427 ///
2428 /// Parent is the parent node of the addr operand that is being matched. It
2429 /// is always a load, store, atomic node, or null. It is only null when
2430 /// checking memory operands for inline asm nodes.
selectAddr(SDNode * Parent,SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)2431 bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
2432 SDValue &Scale, SDValue &Index,
2433 SDValue &Disp, SDValue &Segment) {
2434 X86ISelAddressMode AM;
2435
2436 if (Parent &&
2437 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2438 // that are not a MemSDNode, and thus don't have proper addrspace info.
2439 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
2440 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
2441 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
2442 Parent->getOpcode() != X86ISD::ENQCMD && // Fixme
2443 Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme
2444 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
2445 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
2446 unsigned AddrSpace =
2447 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2448 if (AddrSpace == X86AS::GS)
2449 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2450 if (AddrSpace == X86AS::FS)
2451 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2452 if (AddrSpace == X86AS::SS)
2453 AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2454 }
2455
2456 // Save the DL and VT before calling matchAddress, it can invalidate N.
2457 SDLoc DL(N);
2458 MVT VT = N.getSimpleValueType();
2459
2460 if (matchAddress(N, AM))
2461 return false;
2462
2463 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2464 return true;
2465 }
2466
selectMOV64Imm32(SDValue N,SDValue & Imm)2467 bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
2468 // In static codegen with small code model, we can get the address of a label
2469 // into a register with 'movl'
2470 if (N->getOpcode() != X86ISD::Wrapper)
2471 return false;
2472
2473 N = N.getOperand(0);
2474
2475 // At least GNU as does not accept 'movl' for TPOFF relocations.
2476 // FIXME: We could use 'movl' when we know we are targeting MC.
2477 if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
2478 return false;
2479
2480 Imm = N;
2481 if (N->getOpcode() != ISD::TargetGlobalAddress)
2482 return TM.getCodeModel() == CodeModel::Small;
2483
2484 Optional<ConstantRange> CR =
2485 cast<GlobalAddressSDNode>(N)->getGlobal()->getAbsoluteSymbolRange();
2486 if (!CR)
2487 return TM.getCodeModel() == CodeModel::Small;
2488
2489 return CR->getUnsignedMax().ult(1ull << 32);
2490 }
2491
selectLEA64_32Addr(SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)2492 bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
2493 SDValue &Scale, SDValue &Index,
2494 SDValue &Disp, SDValue &Segment) {
2495 // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2496 SDLoc DL(N);
2497
2498 if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
2499 return false;
2500
2501 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
2502 if (RN && RN->getReg() == 0)
2503 Base = CurDAG->getRegister(0, MVT::i64);
2504 else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) {
2505 // Base could already be %rip, particularly in the x32 ABI.
2506 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2507 MVT::i64), 0);
2508 Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2509 Base);
2510 }
2511
2512 RN = dyn_cast<RegisterSDNode>(Index);
2513 if (RN && RN->getReg() == 0)
2514 Index = CurDAG->getRegister(0, MVT::i64);
2515 else {
2516 assert(Index.getValueType() == MVT::i32 &&
2517 "Expect to be extending 32-bit registers for use in LEA");
2518 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2519 MVT::i64), 0);
2520 Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2521 Index);
2522 }
2523
2524 return true;
2525 }
2526
2527 /// Calls SelectAddr and determines if the maximal addressing
2528 /// mode it matches can be cost effectively emitted as an LEA instruction.
selectLEAAddr(SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)2529 bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
2530 SDValue &Base, SDValue &Scale,
2531 SDValue &Index, SDValue &Disp,
2532 SDValue &Segment) {
2533 X86ISelAddressMode AM;
2534
2535 // Save the DL and VT before calling matchAddress, it can invalidate N.
2536 SDLoc DL(N);
2537 MVT VT = N.getSimpleValueType();
2538
2539 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
2540 // segments.
2541 SDValue Copy = AM.Segment;
2542 SDValue T = CurDAG->getRegister(0, MVT::i32);
2543 AM.Segment = T;
2544 if (matchAddress(N, AM))
2545 return false;
2546 assert (T == AM.Segment);
2547 AM.Segment = Copy;
2548
2549 unsigned Complexity = 0;
2550 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode())
2551 Complexity = 1;
2552 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2553 Complexity = 4;
2554
2555 if (AM.IndexReg.getNode())
2556 Complexity++;
2557
2558 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
2559 // a simple shift.
2560 if (AM.Scale > 1)
2561 Complexity++;
2562
2563 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
2564 // to a LEA. This is determined with some experimentation but is by no means
2565 // optimal (especially for code size consideration). LEA is nice because of
2566 // its three-address nature. Tweak the cost function again when we can run
2567 // convertToThreeAddress() at register allocation time.
2568 if (AM.hasSymbolicDisplacement()) {
2569 // For X86-64, always use LEA to materialize RIP-relative addresses.
2570 if (Subtarget->is64Bit())
2571 Complexity = 4;
2572 else
2573 Complexity += 2;
2574 }
2575
2576 // Heuristic: try harder to form an LEA from ADD if the operands set flags.
2577 // Unlike ADD, LEA does not affect flags, so we will be less likely to require
2578 // duplicating flag-producing instructions later in the pipeline.
2579 if (N.getOpcode() == ISD::ADD) {
2580 auto isMathWithFlags = [](SDValue V) {
2581 switch (V.getOpcode()) {
2582 case X86ISD::ADD:
2583 case X86ISD::SUB:
2584 case X86ISD::ADC:
2585 case X86ISD::SBB:
2586 /* TODO: These opcodes can be added safely, but we may want to justify
2587 their inclusion for different reasons (better for reg-alloc).
2588 case X86ISD::SMUL:
2589 case X86ISD::UMUL:
2590 case X86ISD::OR:
2591 case X86ISD::XOR:
2592 case X86ISD::AND:
2593 */
2594 // Value 1 is the flag output of the node - verify it's not dead.
2595 return !SDValue(V.getNode(), 1).use_empty();
2596 default:
2597 return false;
2598 }
2599 };
2600 // TODO: This could be an 'or' rather than 'and' to make the transform more
2601 // likely to happen. We might want to factor in whether there's a
2602 // load folding opportunity for the math op that disappears with LEA.
2603 if (isMathWithFlags(N.getOperand(0)) && isMathWithFlags(N.getOperand(1)))
2604 Complexity++;
2605 }
2606
2607 if (AM.Disp)
2608 Complexity++;
2609
2610 // If it isn't worth using an LEA, reject it.
2611 if (Complexity <= 2)
2612 return false;
2613
2614 getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2615 return true;
2616 }
2617
2618 /// This is only run on TargetGlobalTLSAddress nodes.
selectTLSADDRAddr(SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)2619 bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
2620 SDValue &Scale, SDValue &Index,
2621 SDValue &Disp, SDValue &Segment) {
2622 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
2623 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
2624
2625 X86ISelAddressMode AM;
2626 AM.GV = GA->getGlobal();
2627 AM.Disp += GA->getOffset();
2628 AM.SymbolFlags = GA->getTargetFlags();
2629
2630 MVT VT = N.getSimpleValueType();
2631 if (VT == MVT::i32) {
2632 AM.Scale = 1;
2633 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
2634 }
2635
2636 getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment);
2637 return true;
2638 }
2639
selectRelocImm(SDValue N,SDValue & Op)2640 bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
2641 // Keep track of the original value type and whether this value was
2642 // truncated. If we see a truncation from pointer type to VT that truncates
2643 // bits that are known to be zero, we can use a narrow reference.
2644 EVT VT = N.getValueType();
2645 bool WasTruncated = false;
2646 if (N.getOpcode() == ISD::TRUNCATE) {
2647 WasTruncated = true;
2648 N = N.getOperand(0);
2649 }
2650
2651 if (N.getOpcode() != X86ISD::Wrapper)
2652 return false;
2653
2654 // We can only use non-GlobalValues as immediates if they were not truncated,
2655 // as we do not have any range information. If we have a GlobalValue and the
2656 // address was not truncated, we can select it as an operand directly.
2657 unsigned Opc = N.getOperand(0)->getOpcode();
2658 if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
2659 Op = N.getOperand(0);
2660 // We can only select the operand directly if we didn't have to look past a
2661 // truncate.
2662 return !WasTruncated;
2663 }
2664
2665 // Check that the global's range fits into VT.
2666 auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
2667 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2668 if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
2669 return false;
2670
2671 // Okay, we can use a narrow reference.
2672 Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
2673 GA->getOffset(), GA->getTargetFlags());
2674 return true;
2675 }
2676
tryFoldLoad(SDNode * Root,SDNode * P,SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)2677 bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
2678 SDValue &Base, SDValue &Scale,
2679 SDValue &Index, SDValue &Disp,
2680 SDValue &Segment) {
2681 assert(Root && P && "Unknown root/parent nodes");
2682 if (!ISD::isNON_EXTLoad(N.getNode()) ||
2683 !IsProfitableToFold(N, P, Root) ||
2684 !IsLegalToFold(N, P, Root, OptLevel))
2685 return false;
2686
2687 return selectAddr(N.getNode(),
2688 N.getOperand(1), Base, Scale, Index, Disp, Segment);
2689 }
2690
tryFoldBroadcast(SDNode * Root,SDNode * P,SDValue N,SDValue & Base,SDValue & Scale,SDValue & Index,SDValue & Disp,SDValue & Segment)2691 bool X86DAGToDAGISel::tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
2692 SDValue &Base, SDValue &Scale,
2693 SDValue &Index, SDValue &Disp,
2694 SDValue &Segment) {
2695 assert(Root && P && "Unknown root/parent nodes");
2696 if (N->getOpcode() != X86ISD::VBROADCAST_LOAD ||
2697 !IsProfitableToFold(N, P, Root) ||
2698 !IsLegalToFold(N, P, Root, OptLevel))
2699 return false;
2700
2701 return selectAddr(N.getNode(),
2702 N.getOperand(1), Base, Scale, Index, Disp, Segment);
2703 }
2704
2705 /// Return an SDNode that returns the value of the global base register.
2706 /// Output instructions required to initialize the global base register,
2707 /// if necessary.
getGlobalBaseReg()2708 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
2709 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
2710 auto &DL = MF->getDataLayout();
2711 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
2712 }
2713
isSExtAbsoluteSymbolRef(unsigned Width,SDNode * N) const2714 bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
2715 if (N->getOpcode() == ISD::TRUNCATE)
2716 N = N->getOperand(0).getNode();
2717 if (N->getOpcode() != X86ISD::Wrapper)
2718 return false;
2719
2720 auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
2721 if (!GA)
2722 return false;
2723
2724 Optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
2725 return CR && CR->getSignedMin().sge(-1ull << Width) &&
2726 CR->getSignedMax().slt(1ull << Width);
2727 }
2728
getCondFromNode(SDNode * N)2729 static X86::CondCode getCondFromNode(SDNode *N) {
2730 assert(N->isMachineOpcode() && "Unexpected node");
2731 X86::CondCode CC = X86::COND_INVALID;
2732 unsigned Opc = N->getMachineOpcode();
2733 if (Opc == X86::JCC_1)
2734 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(1));
2735 else if (Opc == X86::SETCCr)
2736 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(0));
2737 else if (Opc == X86::SETCCm)
2738 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(5));
2739 else if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr ||
2740 Opc == X86::CMOV64rr)
2741 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(2));
2742 else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm ||
2743 Opc == X86::CMOV64rm)
2744 CC = static_cast<X86::CondCode>(N->getConstantOperandVal(6));
2745
2746 return CC;
2747 }
2748
2749 /// Test whether the given X86ISD::CMP node has any users that use a flag
2750 /// other than ZF.
onlyUsesZeroFlag(SDValue Flags) const2751 bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
2752 // Examine each user of the node.
2753 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2754 UI != UE; ++UI) {
2755 // Only check things that use the flags.
2756 if (UI.getUse().getResNo() != Flags.getResNo())
2757 continue;
2758 // Only examine CopyToReg uses that copy to EFLAGS.
2759 if (UI->getOpcode() != ISD::CopyToReg ||
2760 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2761 return false;
2762 // Examine each user of the CopyToReg use.
2763 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2764 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2765 // Only examine the Flag result.
2766 if (FlagUI.getUse().getResNo() != 1) continue;
2767 // Anything unusual: assume conservatively.
2768 if (!FlagUI->isMachineOpcode()) return false;
2769 // Examine the condition code of the user.
2770 X86::CondCode CC = getCondFromNode(*FlagUI);
2771
2772 switch (CC) {
2773 // Comparisons which only use the zero flag.
2774 case X86::COND_E: case X86::COND_NE:
2775 continue;
2776 // Anything else: assume conservatively.
2777 default:
2778 return false;
2779 }
2780 }
2781 }
2782 return true;
2783 }
2784
2785 /// Test whether the given X86ISD::CMP node has any uses which require the SF
2786 /// flag to be accurate.
hasNoSignFlagUses(SDValue Flags) const2787 bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
2788 // Examine each user of the node.
2789 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2790 UI != UE; ++UI) {
2791 // Only check things that use the flags.
2792 if (UI.getUse().getResNo() != Flags.getResNo())
2793 continue;
2794 // Only examine CopyToReg uses that copy to EFLAGS.
2795 if (UI->getOpcode() != ISD::CopyToReg ||
2796 cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2797 return false;
2798 // Examine each user of the CopyToReg use.
2799 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2800 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2801 // Only examine the Flag result.
2802 if (FlagUI.getUse().getResNo() != 1) continue;
2803 // Anything unusual: assume conservatively.
2804 if (!FlagUI->isMachineOpcode()) return false;
2805 // Examine the condition code of the user.
2806 X86::CondCode CC = getCondFromNode(*FlagUI);
2807
2808 switch (CC) {
2809 // Comparisons which don't examine the SF flag.
2810 case X86::COND_A: case X86::COND_AE:
2811 case X86::COND_B: case X86::COND_BE:
2812 case X86::COND_E: case X86::COND_NE:
2813 case X86::COND_O: case X86::COND_NO:
2814 case X86::COND_P: case X86::COND_NP:
2815 continue;
2816 // Anything else: assume conservatively.
2817 default:
2818 return false;
2819 }
2820 }
2821 }
2822 return true;
2823 }
2824
mayUseCarryFlag(X86::CondCode CC)2825 static bool mayUseCarryFlag(X86::CondCode CC) {
2826 switch (CC) {
2827 // Comparisons which don't examine the CF flag.
2828 case X86::COND_O: case X86::COND_NO:
2829 case X86::COND_E: case X86::COND_NE:
2830 case X86::COND_S: case X86::COND_NS:
2831 case X86::COND_P: case X86::COND_NP:
2832 case X86::COND_L: case X86::COND_GE:
2833 case X86::COND_G: case X86::COND_LE:
2834 return false;
2835 // Anything else: assume conservatively.
2836 default:
2837 return true;
2838 }
2839 }
2840
2841 /// Test whether the given node which sets flags has any uses which require the
2842 /// CF flag to be accurate.
hasNoCarryFlagUses(SDValue Flags) const2843 bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
2844 // Examine each user of the node.
2845 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
2846 UI != UE; ++UI) {
2847 // Only check things that use the flags.
2848 if (UI.getUse().getResNo() != Flags.getResNo())
2849 continue;
2850
2851 unsigned UIOpc = UI->getOpcode();
2852
2853 if (UIOpc == ISD::CopyToReg) {
2854 // Only examine CopyToReg uses that copy to EFLAGS.
2855 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
2856 return false;
2857 // Examine each user of the CopyToReg use.
2858 for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
2859 FlagUI != FlagUE; ++FlagUI) {
2860 // Only examine the Flag result.
2861 if (FlagUI.getUse().getResNo() != 1)
2862 continue;
2863 // Anything unusual: assume conservatively.
2864 if (!FlagUI->isMachineOpcode())
2865 return false;
2866 // Examine the condition code of the user.
2867 X86::CondCode CC = getCondFromNode(*FlagUI);
2868
2869 if (mayUseCarryFlag(CC))
2870 return false;
2871 }
2872
2873 // This CopyToReg is ok. Move on to the next user.
2874 continue;
2875 }
2876
2877 // This might be an unselected node. So look for the pre-isel opcodes that
2878 // use flags.
2879 unsigned CCOpNo;
2880 switch (UIOpc) {
2881 default:
2882 // Something unusual. Be conservative.
2883 return false;
2884 case X86ISD::SETCC: CCOpNo = 0; break;
2885 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
2886 case X86ISD::CMOV: CCOpNo = 2; break;
2887 case X86ISD::BRCOND: CCOpNo = 2; break;
2888 }
2889
2890 X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
2891 if (mayUseCarryFlag(CC))
2892 return false;
2893 }
2894 return true;
2895 }
2896
2897 /// Check whether or not the chain ending in StoreNode is suitable for doing
2898 /// the {load; op; store} to modify transformation.
isFusableLoadOpStorePattern(StoreSDNode * StoreNode,SDValue StoredVal,SelectionDAG * CurDAG,unsigned LoadOpNo,LoadSDNode * & LoadNode,SDValue & InputChain)2899 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
2900 SDValue StoredVal, SelectionDAG *CurDAG,
2901 unsigned LoadOpNo,
2902 LoadSDNode *&LoadNode,
2903 SDValue &InputChain) {
2904 // Is the stored value result 0 of the operation?
2905 if (StoredVal.getResNo() != 0) return false;
2906
2907 // Are there other uses of the operation other than the store?
2908 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
2909
2910 // Is the store non-extending and non-indexed?
2911 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
2912 return false;
2913
2914 SDValue Load = StoredVal->getOperand(LoadOpNo);
2915 // Is the stored value a non-extending and non-indexed load?
2916 if (!ISD::isNormalLoad(Load.getNode())) return false;
2917
2918 // Return LoadNode by reference.
2919 LoadNode = cast<LoadSDNode>(Load);
2920
2921 // Is store the only read of the loaded value?
2922 if (!Load.hasOneUse())
2923 return false;
2924
2925 // Is the address of the store the same as the load?
2926 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
2927 LoadNode->getOffset() != StoreNode->getOffset())
2928 return false;
2929
2930 bool FoundLoad = false;
2931 SmallVector<SDValue, 4> ChainOps;
2932 SmallVector<const SDNode *, 4> LoopWorklist;
2933 SmallPtrSet<const SDNode *, 16> Visited;
2934 const unsigned int Max = 1024;
2935
2936 // Visualization of Load-Op-Store fusion:
2937 // -------------------------
2938 // Legend:
2939 // *-lines = Chain operand dependencies.
2940 // |-lines = Normal operand dependencies.
2941 // Dependencies flow down and right. n-suffix references multiple nodes.
2942 //
2943 // C Xn C
2944 // * * *
2945 // * * *
2946 // Xn A-LD Yn TF Yn
2947 // * * \ | * |
2948 // * * \ | * |
2949 // * * \ | => A--LD_OP_ST
2950 // * * \| \
2951 // TF OP \
2952 // * | \ Zn
2953 // * | \
2954 // A-ST Zn
2955 //
2956
2957 // This merge induced dependences from: #1: Xn -> LD, OP, Zn
2958 // #2: Yn -> LD
2959 // #3: ST -> Zn
2960
2961 // Ensure the transform is safe by checking for the dual
2962 // dependencies to make sure we do not induce a loop.
2963
2964 // As LD is a predecessor to both OP and ST we can do this by checking:
2965 // a). if LD is a predecessor to a member of Xn or Yn.
2966 // b). if a Zn is a predecessor to ST.
2967
2968 // However, (b) can only occur through being a chain predecessor to
2969 // ST, which is the same as Zn being a member or predecessor of Xn,
2970 // which is a subset of LD being a predecessor of Xn. So it's
2971 // subsumed by check (a).
2972
2973 SDValue Chain = StoreNode->getChain();
2974
2975 // Gather X elements in ChainOps.
2976 if (Chain == Load.getValue(1)) {
2977 FoundLoad = true;
2978 ChainOps.push_back(Load.getOperand(0));
2979 } else if (Chain.getOpcode() == ISD::TokenFactor) {
2980 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
2981 SDValue Op = Chain.getOperand(i);
2982 if (Op == Load.getValue(1)) {
2983 FoundLoad = true;
2984 // Drop Load, but keep its chain. No cycle check necessary.
2985 ChainOps.push_back(Load.getOperand(0));
2986 continue;
2987 }
2988 LoopWorklist.push_back(Op.getNode());
2989 ChainOps.push_back(Op);
2990 }
2991 }
2992
2993 if (!FoundLoad)
2994 return false;
2995
2996 // Worklist is currently Xn. Add Yn to worklist.
2997 for (SDValue Op : StoredVal->ops())
2998 if (Op.getNode() != LoadNode)
2999 LoopWorklist.push_back(Op.getNode());
3000
3001 // Check (a) if Load is a predecessor to Xn + Yn
3002 if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
3003 true))
3004 return false;
3005
3006 InputChain =
3007 CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
3008 return true;
3009 }
3010
3011 // Change a chain of {load; op; store} of the same value into a simple op
3012 // through memory of that value, if the uses of the modified value and its
3013 // address are suitable.
3014 //
3015 // The tablegen pattern memory operand pattern is currently not able to match
3016 // the case where the EFLAGS on the original operation are used.
3017 //
3018 // To move this to tablegen, we'll need to improve tablegen to allow flags to
3019 // be transferred from a node in the pattern to the result node, probably with
3020 // a new keyword. For example, we have this
3021 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3022 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3023 // (implicit EFLAGS)]>;
3024 // but maybe need something like this
3025 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3026 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3027 // (transferrable EFLAGS)]>;
3028 //
3029 // Until then, we manually fold these and instruction select the operation
3030 // here.
foldLoadStoreIntoMemOperand(SDNode * Node)3031 bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
3032 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
3033 SDValue StoredVal = StoreNode->getOperand(1);
3034 unsigned Opc = StoredVal->getOpcode();
3035
3036 // Before we try to select anything, make sure this is memory operand size
3037 // and opcode we can handle. Note that this must match the code below that
3038 // actually lowers the opcodes.
3039 EVT MemVT = StoreNode->getMemoryVT();
3040 if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
3041 MemVT != MVT::i8)
3042 return false;
3043
3044 bool IsCommutable = false;
3045 bool IsNegate = false;
3046 switch (Opc) {
3047 default:
3048 return false;
3049 case X86ISD::SUB:
3050 IsNegate = isNullConstant(StoredVal.getOperand(0));
3051 break;
3052 case X86ISD::SBB:
3053 break;
3054 case X86ISD::ADD:
3055 case X86ISD::ADC:
3056 case X86ISD::AND:
3057 case X86ISD::OR:
3058 case X86ISD::XOR:
3059 IsCommutable = true;
3060 break;
3061 }
3062
3063 unsigned LoadOpNo = IsNegate ? 1 : 0;
3064 LoadSDNode *LoadNode = nullptr;
3065 SDValue InputChain;
3066 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3067 LoadNode, InputChain)) {
3068 if (!IsCommutable)
3069 return false;
3070
3071 // This operation is commutable, try the other operand.
3072 LoadOpNo = 1;
3073 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3074 LoadNode, InputChain))
3075 return false;
3076 }
3077
3078 SDValue Base, Scale, Index, Disp, Segment;
3079 if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
3080 Segment))
3081 return false;
3082
3083 auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
3084 unsigned Opc8) {
3085 switch (MemVT.getSimpleVT().SimpleTy) {
3086 case MVT::i64:
3087 return Opc64;
3088 case MVT::i32:
3089 return Opc32;
3090 case MVT::i16:
3091 return Opc16;
3092 case MVT::i8:
3093 return Opc8;
3094 default:
3095 llvm_unreachable("Invalid size!");
3096 }
3097 };
3098
3099 MachineSDNode *Result;
3100 switch (Opc) {
3101 case X86ISD::SUB:
3102 // Handle negate.
3103 if (IsNegate) {
3104 unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m,
3105 X86::NEG8m);
3106 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3107 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3108 MVT::Other, Ops);
3109 break;
3110 }
3111 LLVM_FALLTHROUGH;
3112 case X86ISD::ADD:
3113 // Try to match inc/dec.
3114 if (!Subtarget->slowIncDec() || CurDAG->shouldOptForSize()) {
3115 bool IsOne = isOneConstant(StoredVal.getOperand(1));
3116 bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
3117 // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3118 if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
3119 unsigned NewOpc =
3120 ((Opc == X86ISD::ADD) == IsOne)
3121 ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
3122 : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
3123 const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3124 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3125 MVT::Other, Ops);
3126 break;
3127 }
3128 }
3129 LLVM_FALLTHROUGH;
3130 case X86ISD::ADC:
3131 case X86ISD::SBB:
3132 case X86ISD::AND:
3133 case X86ISD::OR:
3134 case X86ISD::XOR: {
3135 auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
3136 switch (Opc) {
3137 case X86ISD::ADD:
3138 return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
3139 X86::ADD8mr);
3140 case X86ISD::ADC:
3141 return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
3142 X86::ADC8mr);
3143 case X86ISD::SUB:
3144 return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
3145 X86::SUB8mr);
3146 case X86ISD::SBB:
3147 return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
3148 X86::SBB8mr);
3149 case X86ISD::AND:
3150 return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
3151 X86::AND8mr);
3152 case X86ISD::OR:
3153 return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
3154 case X86ISD::XOR:
3155 return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
3156 X86::XOR8mr);
3157 default:
3158 llvm_unreachable("Invalid opcode!");
3159 }
3160 };
3161 auto SelectImm8Opcode = [SelectOpcode](unsigned Opc) {
3162 switch (Opc) {
3163 case X86ISD::ADD:
3164 return SelectOpcode(X86::ADD64mi8, X86::ADD32mi8, X86::ADD16mi8, 0);
3165 case X86ISD::ADC:
3166 return SelectOpcode(X86::ADC64mi8, X86::ADC32mi8, X86::ADC16mi8, 0);
3167 case X86ISD::SUB:
3168 return SelectOpcode(X86::SUB64mi8, X86::SUB32mi8, X86::SUB16mi8, 0);
3169 case X86ISD::SBB:
3170 return SelectOpcode(X86::SBB64mi8, X86::SBB32mi8, X86::SBB16mi8, 0);
3171 case X86ISD::AND:
3172 return SelectOpcode(X86::AND64mi8, X86::AND32mi8, X86::AND16mi8, 0);
3173 case X86ISD::OR:
3174 return SelectOpcode(X86::OR64mi8, X86::OR32mi8, X86::OR16mi8, 0);
3175 case X86ISD::XOR:
3176 return SelectOpcode(X86::XOR64mi8, X86::XOR32mi8, X86::XOR16mi8, 0);
3177 default:
3178 llvm_unreachable("Invalid opcode!");
3179 }
3180 };
3181 auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
3182 switch (Opc) {
3183 case X86ISD::ADD:
3184 return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
3185 X86::ADD8mi);
3186 case X86ISD::ADC:
3187 return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
3188 X86::ADC8mi);
3189 case X86ISD::SUB:
3190 return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
3191 X86::SUB8mi);
3192 case X86ISD::SBB:
3193 return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
3194 X86::SBB8mi);
3195 case X86ISD::AND:
3196 return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
3197 X86::AND8mi);
3198 case X86ISD::OR:
3199 return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
3200 X86::OR8mi);
3201 case X86ISD::XOR:
3202 return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
3203 X86::XOR8mi);
3204 default:
3205 llvm_unreachable("Invalid opcode!");
3206 }
3207 };
3208
3209 unsigned NewOpc = SelectRegOpcode(Opc);
3210 SDValue Operand = StoredVal->getOperand(1-LoadOpNo);
3211
3212 // See if the operand is a constant that we can fold into an immediate
3213 // operand.
3214 if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
3215 int64_t OperandV = OperandC->getSExtValue();
3216
3217 // Check if we can shrink the operand enough to fit in an immediate (or
3218 // fit into a smaller immediate) by negating it and switching the
3219 // operation.
3220 if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
3221 ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) ||
3222 (MemVT == MVT::i64 && !isInt<32>(OperandV) &&
3223 isInt<32>(-OperandV))) &&
3224 hasNoCarryFlagUses(StoredVal.getValue(1))) {
3225 OperandV = -OperandV;
3226 Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
3227 }
3228
3229 // First try to fit this into an Imm8 operand. If it doesn't fit, then try
3230 // the larger immediate operand.
3231 if (MemVT != MVT::i8 && isInt<8>(OperandV)) {
3232 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3233 NewOpc = SelectImm8Opcode(Opc);
3234 } else if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
3235 Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3236 NewOpc = SelectImmOpcode(Opc);
3237 }
3238 }
3239
3240 if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
3241 SDValue CopyTo =
3242 CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
3243 StoredVal.getOperand(2), SDValue());
3244
3245 const SDValue Ops[] = {Base, Scale, Index, Disp,
3246 Segment, Operand, CopyTo, CopyTo.getValue(1)};
3247 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3248 Ops);
3249 } else {
3250 const SDValue Ops[] = {Base, Scale, Index, Disp,
3251 Segment, Operand, InputChain};
3252 Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3253 Ops);
3254 }
3255 break;
3256 }
3257 default:
3258 llvm_unreachable("Invalid opcode!");
3259 }
3260
3261 MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(),
3262 LoadNode->getMemOperand()};
3263 CurDAG->setNodeMemRefs(Result, MemOps);
3264
3265 // Update Load Chain uses as well.
3266 ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
3267 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3268 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3269 CurDAG->RemoveDeadNode(Node);
3270 return true;
3271 }
3272
3273 // See if this is an X & Mask that we can match to BEXTR/BZHI.
3274 // Where Mask is one of the following patterns:
3275 // a) x & (1 << nbits) - 1
3276 // b) x & ~(-1 << nbits)
3277 // c) x & (-1 >> (32 - y))
3278 // d) x << (32 - y) >> (32 - y)
matchBitExtract(SDNode * Node)3279 bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
3280 assert(
3281 (Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::SRL) &&
3282 "Should be either an and-mask, or right-shift after clearing high bits.");
3283
3284 // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3285 if (!Subtarget->hasBMI() && !Subtarget->hasBMI2())
3286 return false;
3287
3288 MVT NVT = Node->getSimpleValueType(0);
3289
3290 // Only supported for 32 and 64 bits.
3291 if (NVT != MVT::i32 && NVT != MVT::i64)
3292 return false;
3293
3294 SDValue NBits;
3295
3296 // If we have BMI2's BZHI, we are ok with muti-use patterns.
3297 // Else, if we only have BMI1's BEXTR, we require one-use.
3298 const bool CanHaveExtraUses = Subtarget->hasBMI2();
3299 auto checkUses = [CanHaveExtraUses](SDValue Op, unsigned NUses) {
3300 return CanHaveExtraUses ||
3301 Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
3302 };
3303 auto checkOneUse = [checkUses](SDValue Op) { return checkUses(Op, 1); };
3304 auto checkTwoUse = [checkUses](SDValue Op) { return checkUses(Op, 2); };
3305
3306 auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
3307 if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
3308 assert(V.getSimpleValueType() == MVT::i32 &&
3309 V.getOperand(0).getSimpleValueType() == MVT::i64 &&
3310 "Expected i64 -> i32 truncation");
3311 V = V.getOperand(0);
3312 }
3313 return V;
3314 };
3315
3316 // a) x & ((1 << nbits) + (-1))
3317 auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation,
3318 &NBits](SDValue Mask) -> bool {
3319 // Match `add`. Must only have one use!
3320 if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
3321 return false;
3322 // We should be adding all-ones constant (i.e. subtracting one.)
3323 if (!isAllOnesConstant(Mask->getOperand(1)))
3324 return false;
3325 // Match `1 << nbits`. Might be truncated. Must only have one use!
3326 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3327 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3328 return false;
3329 if (!isOneConstant(M0->getOperand(0)))
3330 return false;
3331 NBits = M0->getOperand(1);
3332 return true;
3333 };
3334
3335 auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) {
3336 V = peekThroughOneUseTruncation(V);
3337 return CurDAG->MaskedValueIsAllOnes(
3338 V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(),
3339 NVT.getSizeInBits()));
3340 };
3341
3342 // b) x & ~(-1 << nbits)
3343 auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
3344 &NBits](SDValue Mask) -> bool {
3345 // Match `~()`. Must only have one use!
3346 if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
3347 return false;
3348 // The -1 only has to be all-ones for the final Node's NVT.
3349 if (!isAllOnes(Mask->getOperand(1)))
3350 return false;
3351 // Match `-1 << nbits`. Might be truncated. Must only have one use!
3352 SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3353 if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3354 return false;
3355 // The -1 only has to be all-ones for the final Node's NVT.
3356 if (!isAllOnes(M0->getOperand(0)))
3357 return false;
3358 NBits = M0->getOperand(1);
3359 return true;
3360 };
3361
3362 // Match potentially-truncated (bitwidth - y)
3363 auto matchShiftAmt = [checkOneUse, &NBits](SDValue ShiftAmt,
3364 unsigned Bitwidth) {
3365 // Skip over a truncate of the shift amount.
3366 if (ShiftAmt.getOpcode() == ISD::TRUNCATE) {
3367 ShiftAmt = ShiftAmt.getOperand(0);
3368 // The trunc should have been the only user of the real shift amount.
3369 if (!checkOneUse(ShiftAmt))
3370 return false;
3371 }
3372 // Match the shift amount as: (bitwidth - y). It should go away, too.
3373 if (ShiftAmt.getOpcode() != ISD::SUB)
3374 return false;
3375 auto V0 = dyn_cast<ConstantSDNode>(ShiftAmt.getOperand(0));
3376 if (!V0 || V0->getZExtValue() != Bitwidth)
3377 return false;
3378 NBits = ShiftAmt.getOperand(1);
3379 return true;
3380 };
3381
3382 // c) x & (-1 >> (32 - y))
3383 auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation,
3384 matchShiftAmt](SDValue Mask) -> bool {
3385 // The mask itself may be truncated.
3386 Mask = peekThroughOneUseTruncation(Mask);
3387 unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
3388 // Match `l>>`. Must only have one use!
3389 if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask))
3390 return false;
3391 // We should be shifting truly all-ones constant.
3392 if (!isAllOnesConstant(Mask.getOperand(0)))
3393 return false;
3394 SDValue M1 = Mask.getOperand(1);
3395 // The shift amount should not be used externally.
3396 if (!checkOneUse(M1))
3397 return false;
3398 return matchShiftAmt(M1, Bitwidth);
3399 };
3400
3401 SDValue X;
3402
3403 // d) x << (32 - y) >> (32 - y)
3404 auto matchPatternD = [checkOneUse, checkTwoUse, matchShiftAmt,
3405 &X](SDNode *Node) -> bool {
3406 if (Node->getOpcode() != ISD::SRL)
3407 return false;
3408 SDValue N0 = Node->getOperand(0);
3409 if (N0->getOpcode() != ISD::SHL || !checkOneUse(N0))
3410 return false;
3411 unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
3412 SDValue N1 = Node->getOperand(1);
3413 SDValue N01 = N0->getOperand(1);
3414 // Both of the shifts must be by the exact same value.
3415 // There should not be any uses of the shift amount outside of the pattern.
3416 if (N1 != N01 || !checkTwoUse(N1))
3417 return false;
3418 if (!matchShiftAmt(N1, Bitwidth))
3419 return false;
3420 X = N0->getOperand(0);
3421 return true;
3422 };
3423
3424 auto matchLowBitMask = [matchPatternA, matchPatternB,
3425 matchPatternC](SDValue Mask) -> bool {
3426 return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask);
3427 };
3428
3429 if (Node->getOpcode() == ISD::AND) {
3430 X = Node->getOperand(0);
3431 SDValue Mask = Node->getOperand(1);
3432
3433 if (matchLowBitMask(Mask)) {
3434 // Great.
3435 } else {
3436 std::swap(X, Mask);
3437 if (!matchLowBitMask(Mask))
3438 return false;
3439 }
3440 } else if (!matchPatternD(Node))
3441 return false;
3442
3443 SDLoc DL(Node);
3444
3445 // Truncate the shift amount.
3446 NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
3447 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3448
3449 // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3450 // All the other bits are undefined, we do not care about them.
3451 SDValue ImplDef = SDValue(
3452 CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0);
3453 insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef);
3454
3455 SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
3456 insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
3457 NBits = SDValue(
3458 CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::i32, ImplDef,
3459 NBits, SRIdxVal), 0);
3460 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3461
3462 if (Subtarget->hasBMI2()) {
3463 // Great, just emit the the BZHI..
3464 if (NVT != MVT::i32) {
3465 // But have to place the bit count into the wide-enough register first.
3466 NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
3467 insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3468 }
3469
3470 SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
3471 ReplaceNode(Node, Extract.getNode());
3472 SelectCode(Extract.getNode());
3473 return true;
3474 }
3475
3476 // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3477 // *logically* shifted (potentially with one-use trunc inbetween),
3478 // and the truncation was the only use of the shift,
3479 // and if so look past one-use truncation.
3480 {
3481 SDValue RealX = peekThroughOneUseTruncation(X);
3482 // FIXME: only if the shift is one-use?
3483 if (RealX != X && RealX.getOpcode() == ISD::SRL)
3484 X = RealX;
3485 }
3486
3487 MVT XVT = X.getSimpleValueType();
3488
3489 // Else, emitting BEXTR requires one more step.
3490 // The 'control' of BEXTR has the pattern of:
3491 // [15...8 bit][ 7...0 bit] location
3492 // [ bit count][ shift] name
3493 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3494
3495 // Shift NBits left by 8 bits, thus producing 'control'.
3496 // This makes the low 8 bits to be zero.
3497 SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
3498 SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8);
3499 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3500
3501 // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3502 // FIXME: only if the shift is one-use?
3503 if (X.getOpcode() == ISD::SRL) {
3504 SDValue ShiftAmt = X.getOperand(1);
3505 X = X.getOperand(0);
3506
3507 assert(ShiftAmt.getValueType() == MVT::i8 &&
3508 "Expected shift amount to be i8");
3509
3510 // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3511 // We could zext to i16 in some form, but we intentionally don't do that.
3512 SDValue OrigShiftAmt = ShiftAmt;
3513 ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt);
3514 insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
3515
3516 // And now 'or' these low 8 bits of shift amount into the 'control'.
3517 Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt);
3518 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3519 }
3520
3521 // But have to place the 'control' into the wide-enough register first.
3522 if (XVT != MVT::i32) {
3523 Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control);
3524 insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3525 }
3526
3527 // And finally, form the BEXTR itself.
3528 SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
3529
3530 // The 'X' was originally truncated. Do that now.
3531 if (XVT != NVT) {
3532 insertDAGNode(*CurDAG, SDValue(Node, 0), Extract);
3533 Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
3534 }
3535
3536 ReplaceNode(Node, Extract.getNode());
3537 SelectCode(Extract.getNode());
3538
3539 return true;
3540 }
3541
3542 // See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
matchBEXTRFromAndImm(SDNode * Node)3543 MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) {
3544 MVT NVT = Node->getSimpleValueType(0);
3545 SDLoc dl(Node);
3546
3547 SDValue N0 = Node->getOperand(0);
3548 SDValue N1 = Node->getOperand(1);
3549
3550 // If we have TBM we can use an immediate for the control. If we have BMI
3551 // we should only do this if the BEXTR instruction is implemented well.
3552 // Otherwise moving the control into a register makes this more costly.
3553 // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
3554 // hoisting the move immediate would make it worthwhile with a less optimal
3555 // BEXTR?
3556 bool PreferBEXTR =
3557 Subtarget->hasTBM() || (Subtarget->hasBMI() && Subtarget->hasFastBEXTR());
3558 if (!PreferBEXTR && !Subtarget->hasBMI2())
3559 return nullptr;
3560
3561 // Must have a shift right.
3562 if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
3563 return nullptr;
3564
3565 // Shift can't have additional users.
3566 if (!N0->hasOneUse())
3567 return nullptr;
3568
3569 // Only supported for 32 and 64 bits.
3570 if (NVT != MVT::i32 && NVT != MVT::i64)
3571 return nullptr;
3572
3573 // Shift amount and RHS of and must be constant.
3574 ConstantSDNode *MaskCst = dyn_cast<ConstantSDNode>(N1);
3575 ConstantSDNode *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
3576 if (!MaskCst || !ShiftCst)
3577 return nullptr;
3578
3579 // And RHS must be a mask.
3580 uint64_t Mask = MaskCst->getZExtValue();
3581 if (!isMask_64(Mask))
3582 return nullptr;
3583
3584 uint64_t Shift = ShiftCst->getZExtValue();
3585 uint64_t MaskSize = countPopulation(Mask);
3586
3587 // Don't interfere with something that can be handled by extracting AH.
3588 // TODO: If we are able to fold a load, BEXTR might still be better than AH.
3589 if (Shift == 8 && MaskSize == 8)
3590 return nullptr;
3591
3592 // Make sure we are only using bits that were in the original value, not
3593 // shifted in.
3594 if (Shift + MaskSize > NVT.getSizeInBits())
3595 return nullptr;
3596
3597 // BZHI, if available, is always fast, unlike BEXTR. But even if we decide
3598 // that we can't use BEXTR, it is only worthwhile using BZHI if the mask
3599 // does not fit into 32 bits. Load folding is not a sufficient reason.
3600 if (!PreferBEXTR && MaskSize <= 32)
3601 return nullptr;
3602
3603 SDValue Control;
3604 unsigned ROpc, MOpc;
3605
3606 if (!PreferBEXTR) {
3607 assert(Subtarget->hasBMI2() && "We must have BMI2's BZHI then.");
3608 // If we can't make use of BEXTR then we can't fuse shift+mask stages.
3609 // Let's perform the mask first, and apply shift later. Note that we need to
3610 // widen the mask to account for the fact that we'll apply shift afterwards!
3611 Control = CurDAG->getTargetConstant(Shift + MaskSize, dl, NVT);
3612 ROpc = NVT == MVT::i64 ? X86::BZHI64rr : X86::BZHI32rr;
3613 MOpc = NVT == MVT::i64 ? X86::BZHI64rm : X86::BZHI32rm;
3614 unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3615 Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3616 } else {
3617 // The 'control' of BEXTR has the pattern of:
3618 // [15...8 bit][ 7...0 bit] location
3619 // [ bit count][ shift] name
3620 // I.e. 0b000000011'00000001 means (x >> 0b1) & 0b11
3621 Control = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
3622 if (Subtarget->hasTBM()) {
3623 ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
3624 MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
3625 } else {
3626 assert(Subtarget->hasBMI() && "We must have BMI1's BEXTR then.");
3627 // BMI requires the immediate to placed in a register.
3628 ROpc = NVT == MVT::i64 ? X86::BEXTR64rr : X86::BEXTR32rr;
3629 MOpc = NVT == MVT::i64 ? X86::BEXTR64rm : X86::BEXTR32rm;
3630 unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
3631 Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
3632 }
3633 }
3634
3635 MachineSDNode *NewNode;
3636 SDValue Input = N0->getOperand(0);
3637 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3638 if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3639 SDValue Ops[] = {
3640 Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Control, Input.getOperand(0)};
3641 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
3642 NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3643 // Update the chain.
3644 ReplaceUses(Input.getValue(1), SDValue(NewNode, 2));
3645 // Record the mem-refs
3646 CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()});
3647 } else {
3648 NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, Control);
3649 }
3650
3651 if (!PreferBEXTR) {
3652 // We still need to apply the shift.
3653 SDValue ShAmt = CurDAG->getTargetConstant(Shift, dl, NVT);
3654 unsigned NewOpc = NVT == MVT::i64 ? X86::SHR64ri : X86::SHR32ri;
3655 NewNode =
3656 CurDAG->getMachineNode(NewOpc, dl, NVT, SDValue(NewNode, 0), ShAmt);
3657 }
3658
3659 return NewNode;
3660 }
3661
3662 // Emit a PCMISTR(I/M) instruction.
emitPCMPISTR(unsigned ROpc,unsigned MOpc,bool MayFoldLoad,const SDLoc & dl,MVT VT,SDNode * Node)3663 MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
3664 bool MayFoldLoad, const SDLoc &dl,
3665 MVT VT, SDNode *Node) {
3666 SDValue N0 = Node->getOperand(0);
3667 SDValue N1 = Node->getOperand(1);
3668 SDValue Imm = Node->getOperand(2);
3669 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3670 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3671
3672 // Try to fold a load. No need to check alignment.
3673 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3674 if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3675 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3676 N1.getOperand(0) };
3677 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
3678 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3679 // Update the chain.
3680 ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
3681 // Record the mem-refs
3682 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
3683 return CNode;
3684 }
3685
3686 SDValue Ops[] = { N0, N1, Imm };
3687 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
3688 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3689 return CNode;
3690 }
3691
3692 // Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
3693 // to emit a second instruction after this one. This is needed since we have two
3694 // copyToReg nodes glued before this and we need to continue that glue through.
emitPCMPESTR(unsigned ROpc,unsigned MOpc,bool MayFoldLoad,const SDLoc & dl,MVT VT,SDNode * Node,SDValue & InFlag)3695 MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
3696 bool MayFoldLoad, const SDLoc &dl,
3697 MVT VT, SDNode *Node,
3698 SDValue &InFlag) {
3699 SDValue N0 = Node->getOperand(0);
3700 SDValue N2 = Node->getOperand(2);
3701 SDValue Imm = Node->getOperand(4);
3702 const ConstantInt *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
3703 Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
3704
3705 // Try to fold a load. No need to check alignment.
3706 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
3707 if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
3708 SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
3709 N2.getOperand(0), InFlag };
3710 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
3711 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
3712 InFlag = SDValue(CNode, 3);
3713 // Update the chain.
3714 ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
3715 // Record the mem-refs
3716 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
3717 return CNode;
3718 }
3719
3720 SDValue Ops[] = { N0, N2, Imm, InFlag };
3721 SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
3722 MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
3723 InFlag = SDValue(CNode, 2);
3724 return CNode;
3725 }
3726
tryShiftAmountMod(SDNode * N)3727 bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
3728 EVT VT = N->getValueType(0);
3729
3730 // Only handle scalar shifts.
3731 if (VT.isVector())
3732 return false;
3733
3734 // Narrower shifts only mask to 5 bits in hardware.
3735 unsigned Size = VT == MVT::i64 ? 64 : 32;
3736
3737 SDValue OrigShiftAmt = N->getOperand(1);
3738 SDValue ShiftAmt = OrigShiftAmt;
3739 SDLoc DL(N);
3740
3741 // Skip over a truncate of the shift amount.
3742 if (ShiftAmt->getOpcode() == ISD::TRUNCATE)
3743 ShiftAmt = ShiftAmt->getOperand(0);
3744
3745 // This function is called after X86DAGToDAGISel::matchBitExtract(),
3746 // so we are not afraid that we might mess up BZHI/BEXTR pattern.
3747
3748 SDValue NewShiftAmt;
3749 if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB) {
3750 SDValue Add0 = ShiftAmt->getOperand(0);
3751 SDValue Add1 = ShiftAmt->getOperand(1);
3752 // If we are shifting by X+/-N where N == 0 mod Size, then just shift by X
3753 // to avoid the ADD/SUB.
3754 if (isa<ConstantSDNode>(Add1) &&
3755 cast<ConstantSDNode>(Add1)->getZExtValue() % Size == 0) {
3756 NewShiftAmt = Add0;
3757 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to
3758 // generate a NEG instead of a SUB of a constant.
3759 } else if (ShiftAmt->getOpcode() == ISD::SUB &&
3760 isa<ConstantSDNode>(Add0) &&
3761 cast<ConstantSDNode>(Add0)->getZExtValue() != 0 &&
3762 cast<ConstantSDNode>(Add0)->getZExtValue() % Size == 0) {
3763 // Insert a negate op.
3764 // TODO: This isn't guaranteed to replace the sub if there is a logic cone
3765 // that uses it that's not a shift.
3766 EVT SubVT = ShiftAmt.getValueType();
3767 SDValue Zero = CurDAG->getConstant(0, DL, SubVT);
3768 SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, Add1);
3769 NewShiftAmt = Neg;
3770
3771 // Insert these operands into a valid topological order so they can
3772 // get selected independently.
3773 insertDAGNode(*CurDAG, OrigShiftAmt, Zero);
3774 insertDAGNode(*CurDAG, OrigShiftAmt, Neg);
3775 } else
3776 return false;
3777 } else
3778 return false;
3779
3780 if (NewShiftAmt.getValueType() != MVT::i8) {
3781 // Need to truncate the shift amount.
3782 NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt);
3783 // Add to a correct topological ordering.
3784 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3785 }
3786
3787 // Insert a new mask to keep the shift amount legal. This should be removed
3788 // by isel patterns.
3789 NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt,
3790 CurDAG->getConstant(Size - 1, DL, MVT::i8));
3791 // Place in a correct topological ordering.
3792 insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
3793
3794 SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
3795 NewShiftAmt);
3796 if (UpdatedNode != N) {
3797 // If we found an existing node, we should replace ourselves with that node
3798 // and wait for it to be selected after its other users.
3799 ReplaceNode(N, UpdatedNode);
3800 return true;
3801 }
3802
3803 // If the original shift amount is now dead, delete it so that we don't run
3804 // it through isel.
3805 if (OrigShiftAmt.getNode()->use_empty())
3806 CurDAG->RemoveDeadNode(OrigShiftAmt.getNode());
3807
3808 // Now that we've optimized the shift amount, defer to normal isel to get
3809 // load folding and legacy vs BMI2 selection without repeating it here.
3810 SelectCode(N);
3811 return true;
3812 }
3813
tryShrinkShlLogicImm(SDNode * N)3814 bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) {
3815 MVT NVT = N->getSimpleValueType(0);
3816 unsigned Opcode = N->getOpcode();
3817 SDLoc dl(N);
3818
3819 // For operations of the form (x << C1) op C2, check if we can use a smaller
3820 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
3821 SDValue Shift = N->getOperand(0);
3822 SDValue N1 = N->getOperand(1);
3823
3824 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
3825 if (!Cst)
3826 return false;
3827
3828 int64_t Val = Cst->getSExtValue();
3829
3830 // If we have an any_extend feeding the AND, look through it to see if there
3831 // is a shift behind it. But only if the AND doesn't use the extended bits.
3832 // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
3833 bool FoundAnyExtend = false;
3834 if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
3835 Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
3836 isUInt<32>(Val)) {
3837 FoundAnyExtend = true;
3838 Shift = Shift.getOperand(0);
3839 }
3840
3841 if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
3842 return false;
3843
3844 // i8 is unshrinkable, i16 should be promoted to i32.
3845 if (NVT != MVT::i32 && NVT != MVT::i64)
3846 return false;
3847
3848 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
3849 if (!ShlCst)
3850 return false;
3851
3852 uint64_t ShAmt = ShlCst->getZExtValue();
3853
3854 // Make sure that we don't change the operation by removing bits.
3855 // This only matters for OR and XOR, AND is unaffected.
3856 uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1;
3857 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
3858 return false;
3859
3860 // Check the minimum bitwidth for the new constant.
3861 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
3862 auto CanShrinkImmediate = [&](int64_t &ShiftedVal) {
3863 if (Opcode == ISD::AND) {
3864 // AND32ri is the same as AND64ri32 with zext imm.
3865 // Try this before sign extended immediates below.
3866 ShiftedVal = (uint64_t)Val >> ShAmt;
3867 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3868 return true;
3869 // Also swap order when the AND can become MOVZX.
3870 if (ShiftedVal == UINT8_MAX || ShiftedVal == UINT16_MAX)
3871 return true;
3872 }
3873 ShiftedVal = Val >> ShAmt;
3874 if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) ||
3875 (!isInt<32>(Val) && isInt<32>(ShiftedVal)))
3876 return true;
3877 if (Opcode != ISD::AND) {
3878 // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
3879 ShiftedVal = (uint64_t)Val >> ShAmt;
3880 if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
3881 return true;
3882 }
3883 return false;
3884 };
3885
3886 int64_t ShiftedVal;
3887 if (!CanShrinkImmediate(ShiftedVal))
3888 return false;
3889
3890 // Ok, we can reorder to get a smaller immediate.
3891
3892 // But, its possible the original immediate allowed an AND to become MOVZX.
3893 // Doing this late due to avoid the MakedValueIsZero call as late as
3894 // possible.
3895 if (Opcode == ISD::AND) {
3896 // Find the smallest zext this could possibly be.
3897 unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits();
3898 ZExtWidth = PowerOf2Ceil(std::max(ZExtWidth, 8U));
3899
3900 // Figure out which bits need to be zero to achieve that mask.
3901 APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(),
3902 ZExtWidth);
3903 NeededMask &= ~Cst->getAPIntValue();
3904
3905 if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask))
3906 return false;
3907 }
3908
3909 SDValue X = Shift.getOperand(0);
3910 if (FoundAnyExtend) {
3911 SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X);
3912 insertDAGNode(*CurDAG, SDValue(N, 0), NewX);
3913 X = NewX;
3914 }
3915
3916 SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT);
3917 insertDAGNode(*CurDAG, SDValue(N, 0), NewCst);
3918 SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst);
3919 insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp);
3920 SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp,
3921 Shift.getOperand(1));
3922 ReplaceNode(N, NewSHL.getNode());
3923 SelectCode(NewSHL.getNode());
3924 return true;
3925 }
3926
3927 // Try to match two logic ops to a VPTERNLOG.
3928 // FIXME: Handle inverted inputs?
3929 // FIXME: Handle more complex patterns that use an operand more than once?
tryVPTERNLOG(SDNode * N)3930 bool X86DAGToDAGISel::tryVPTERNLOG(SDNode *N) {
3931 MVT NVT = N->getSimpleValueType(0);
3932
3933 // Make sure we support VPTERNLOG.
3934 if (!NVT.isVector() || !Subtarget->hasAVX512() ||
3935 NVT.getVectorElementType() == MVT::i1)
3936 return false;
3937
3938 // We need VLX for 128/256-bit.
3939 if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
3940 return false;
3941
3942 unsigned Opc1 = N->getOpcode();
3943 SDValue N0 = N->getOperand(0);
3944 SDValue N1 = N->getOperand(1);
3945
3946 auto isLogicOp = [](unsigned Opc) {
3947 return Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR ||
3948 Opc == X86ISD::ANDNP;
3949 };
3950
3951 SDValue A, B, C;
3952 unsigned Opc2;
3953 if (isLogicOp(N1.getOpcode()) && N1.hasOneUse()) {
3954 Opc2 = N1.getOpcode();
3955 A = N0;
3956 B = N1.getOperand(0);
3957 C = N1.getOperand(1);
3958 } else if (isLogicOp(N0.getOpcode()) && N0.hasOneUse()) {
3959 Opc2 = N0.getOpcode();
3960 A = N1;
3961 B = N0.getOperand(0);
3962 C = N0.getOperand(1);
3963 } else
3964 return false;
3965
3966 uint64_t Imm;
3967 switch (Opc1) {
3968 default: llvm_unreachable("Unexpected opcode!");
3969 case ISD::AND:
3970 switch (Opc2) {
3971 default: llvm_unreachable("Unexpected opcode!");
3972 case ISD::AND: Imm = 0x80; break;
3973 case ISD::OR: Imm = 0xe0; break;
3974 case ISD::XOR: Imm = 0x60; break;
3975 case X86ISD::ANDNP: Imm = 0x20; break;
3976 }
3977 break;
3978 case ISD::OR:
3979 switch (Opc2) {
3980 default: llvm_unreachable("Unexpected opcode!");
3981 case ISD::AND: Imm = 0xf8; break;
3982 case ISD::OR: Imm = 0xfe; break;
3983 case ISD::XOR: Imm = 0xf6; break;
3984 case X86ISD::ANDNP: Imm = 0xf2; break;
3985 }
3986 break;
3987 case ISD::XOR:
3988 switch (Opc2) {
3989 default: llvm_unreachable("Unexpected opcode!");
3990 case ISD::AND: Imm = 0x78; break;
3991 case ISD::OR: Imm = 0x1e; break;
3992 case ISD::XOR: Imm = 0x96; break;
3993 case X86ISD::ANDNP: Imm = 0xd2; break;
3994 }
3995 break;
3996 }
3997
3998 SDLoc DL(N);
3999 SDValue New = CurDAG->getNode(X86ISD::VPTERNLOG, DL, NVT, A, B, C,
4000 CurDAG->getTargetConstant(Imm, DL, MVT::i8));
4001 ReplaceNode(N, New.getNode());
4002 SelectCode(New.getNode());
4003 return true;
4004 }
4005
4006 /// If the high bits of an 'and' operand are known zero, try setting the
4007 /// high bits of an 'and' constant operand to produce a smaller encoding by
4008 /// creating a small, sign-extended negative immediate rather than a large
4009 /// positive one. This reverses a transform in SimplifyDemandedBits that
4010 /// shrinks mask constants by clearing bits. There is also a possibility that
4011 /// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
4012 /// case, just replace the 'and'. Return 'true' if the node is replaced.
shrinkAndImmediate(SDNode * And)4013 bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
4014 // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
4015 // have immediate operands.
4016 MVT VT = And->getSimpleValueType(0);
4017 if (VT != MVT::i32 && VT != MVT::i64)
4018 return false;
4019
4020 auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
4021 if (!And1C)
4022 return false;
4023
4024 // Bail out if the mask constant is already negative. It's can't shrink more.
4025 // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
4026 // patterns to use a 32-bit and instead of a 64-bit and by relying on the
4027 // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
4028 // are negative too.
4029 APInt MaskVal = And1C->getAPIntValue();
4030 unsigned MaskLZ = MaskVal.countLeadingZeros();
4031 if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
4032 return false;
4033
4034 // Don't extend into the upper 32 bits of a 64 bit mask.
4035 if (VT == MVT::i64 && MaskLZ >= 32) {
4036 MaskLZ -= 32;
4037 MaskVal = MaskVal.trunc(32);
4038 }
4039
4040 SDValue And0 = And->getOperand(0);
4041 APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
4042 APInt NegMaskVal = MaskVal | HighZeros;
4043
4044 // If a negative constant would not allow a smaller encoding, there's no need
4045 // to continue. Only change the constant when we know it's a win.
4046 unsigned MinWidth = NegMaskVal.getMinSignedBits();
4047 if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getMinSignedBits() <= 32))
4048 return false;
4049
4050 // Extend masks if we truncated above.
4051 if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
4052 NegMaskVal = NegMaskVal.zext(64);
4053 HighZeros = HighZeros.zext(64);
4054 }
4055
4056 // The variable operand must be all zeros in the top bits to allow using the
4057 // new, negative constant as the mask.
4058 if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
4059 return false;
4060
4061 // Check if the mask is -1. In that case, this is an unnecessary instruction
4062 // that escaped earlier analysis.
4063 if (NegMaskVal.isAllOnesValue()) {
4064 ReplaceNode(And, And0.getNode());
4065 return true;
4066 }
4067
4068 // A negative mask allows a smaller encoding. Create a new 'and' node.
4069 SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
4070 SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
4071 ReplaceNode(And, NewAnd.getNode());
4072 SelectCode(NewAnd.getNode());
4073 return true;
4074 }
4075
getVPTESTMOpc(MVT TestVT,bool IsTestN,bool FoldedLoad,bool FoldedBCast,bool Masked)4076 static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad,
4077 bool FoldedBCast, bool Masked) {
4078 #define VPTESTM_CASE(VT, SUFFIX) \
4079 case MVT::VT: \
4080 if (Masked) \
4081 return IsTestN ? X86::VPTESTNM##SUFFIX##k: X86::VPTESTM##SUFFIX##k; \
4082 return IsTestN ? X86::VPTESTNM##SUFFIX : X86::VPTESTM##SUFFIX;
4083
4084
4085 #define VPTESTM_BROADCAST_CASES(SUFFIX) \
4086 default: llvm_unreachable("Unexpected VT!"); \
4087 VPTESTM_CASE(v4i32, DZ128##SUFFIX) \
4088 VPTESTM_CASE(v2i64, QZ128##SUFFIX) \
4089 VPTESTM_CASE(v8i32, DZ256##SUFFIX) \
4090 VPTESTM_CASE(v4i64, QZ256##SUFFIX) \
4091 VPTESTM_CASE(v16i32, DZ##SUFFIX) \
4092 VPTESTM_CASE(v8i64, QZ##SUFFIX)
4093
4094 #define VPTESTM_FULL_CASES(SUFFIX) \
4095 VPTESTM_BROADCAST_CASES(SUFFIX) \
4096 VPTESTM_CASE(v16i8, BZ128##SUFFIX) \
4097 VPTESTM_CASE(v8i16, WZ128##SUFFIX) \
4098 VPTESTM_CASE(v32i8, BZ256##SUFFIX) \
4099 VPTESTM_CASE(v16i16, WZ256##SUFFIX) \
4100 VPTESTM_CASE(v64i8, BZ##SUFFIX) \
4101 VPTESTM_CASE(v32i16, WZ##SUFFIX)
4102
4103 if (FoldedLoad) {
4104 switch (TestVT.SimpleTy) {
4105 VPTESTM_FULL_CASES(rm)
4106 }
4107 }
4108
4109 if (FoldedBCast) {
4110 switch (TestVT.SimpleTy) {
4111 VPTESTM_BROADCAST_CASES(rmb)
4112 }
4113 }
4114
4115 switch (TestVT.SimpleTy) {
4116 VPTESTM_FULL_CASES(rr)
4117 }
4118
4119 #undef VPTESTM_FULL_CASES
4120 #undef VPTESTM_BROADCAST_CASES
4121 #undef VPTESTM_CASE
4122 }
4123
4124 // Try to create VPTESTM instruction. If InMask is not null, it will be used
4125 // to form a masked operation.
tryVPTESTM(SDNode * Root,SDValue Setcc,SDValue InMask)4126 bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
4127 SDValue InMask) {
4128 assert(Subtarget->hasAVX512() && "Expected AVX512!");
4129 assert(Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 &&
4130 "Unexpected VT!");
4131
4132 // Look for equal and not equal compares.
4133 ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
4134 if (CC != ISD::SETEQ && CC != ISD::SETNE)
4135 return false;
4136
4137 SDValue SetccOp0 = Setcc.getOperand(0);
4138 SDValue SetccOp1 = Setcc.getOperand(1);
4139
4140 // Canonicalize the all zero vector to the RHS.
4141 if (ISD::isBuildVectorAllZeros(SetccOp0.getNode()))
4142 std::swap(SetccOp0, SetccOp1);
4143
4144 // See if we're comparing against zero.
4145 if (!ISD::isBuildVectorAllZeros(SetccOp1.getNode()))
4146 return false;
4147
4148 SDValue N0 = SetccOp0;
4149
4150 MVT CmpVT = N0.getSimpleValueType();
4151 MVT CmpSVT = CmpVT.getVectorElementType();
4152
4153 // Start with both operands the same. We'll try to refine this.
4154 SDValue Src0 = N0;
4155 SDValue Src1 = N0;
4156
4157 {
4158 // Look through single use bitcasts.
4159 SDValue N0Temp = N0;
4160 if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse())
4161 N0Temp = N0.getOperand(0);
4162
4163 // Look for single use AND.
4164 if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) {
4165 Src0 = N0Temp.getOperand(0);
4166 Src1 = N0Temp.getOperand(1);
4167 }
4168 }
4169
4170 // Without VLX we need to widen the load.
4171 bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector();
4172
4173 // We can only fold loads if the sources are unique.
4174 bool CanFoldLoads = Src0 != Src1;
4175
4176 // Try to fold loads unless we need to widen.
4177 bool FoldedLoad = false;
4178 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Load;
4179 if (!Widen && CanFoldLoads) {
4180 Load = Src1;
4181 FoldedLoad = tryFoldLoad(Root, N0.getNode(), Load, Tmp0, Tmp1, Tmp2, Tmp3,
4182 Tmp4);
4183 if (!FoldedLoad) {
4184 // And is computative.
4185 Load = Src0;
4186 FoldedLoad = tryFoldLoad(Root, N0.getNode(), Load, Tmp0, Tmp1, Tmp2,
4187 Tmp3, Tmp4);
4188 if (FoldedLoad)
4189 std::swap(Src0, Src1);
4190 }
4191 }
4192
4193 auto findBroadcastedOp = [](SDValue Src, MVT CmpSVT, SDNode *&Parent) {
4194 // Look through single use bitcasts.
4195 if (Src.getOpcode() == ISD::BITCAST && Src.hasOneUse()) {
4196 Parent = Src.getNode();
4197 Src = Src.getOperand(0);
4198 }
4199
4200 if (Src.getOpcode() == X86ISD::VBROADCAST_LOAD && Src.hasOneUse()) {
4201 auto *MemIntr = cast<MemIntrinsicSDNode>(Src);
4202 if (MemIntr->getMemoryVT().getSizeInBits() == CmpSVT.getSizeInBits())
4203 return Src;
4204 }
4205
4206 return SDValue();
4207 };
4208
4209 // If we didn't fold a load, try to match broadcast. No widening limitation
4210 // for this. But only 32 and 64 bit types are supported.
4211 bool FoldedBCast = false;
4212 if (!FoldedLoad && CanFoldLoads &&
4213 (CmpSVT == MVT::i32 || CmpSVT == MVT::i64)) {
4214 SDNode *ParentNode = N0.getNode();
4215 if ((Load = findBroadcastedOp(Src1, CmpSVT, ParentNode))) {
4216 FoldedBCast = tryFoldBroadcast(Root, ParentNode, Load, Tmp0,
4217 Tmp1, Tmp2, Tmp3, Tmp4);
4218 }
4219
4220 // Try the other operand.
4221 if (!FoldedBCast) {
4222 SDNode *ParentNode = N0.getNode();
4223 if ((Load = findBroadcastedOp(Src0, CmpSVT, ParentNode))) {
4224 FoldedBCast = tryFoldBroadcast(Root, ParentNode, Load, Tmp0,
4225 Tmp1, Tmp2, Tmp3, Tmp4);
4226 if (FoldedBCast)
4227 std::swap(Src0, Src1);
4228 }
4229 }
4230 }
4231
4232 auto getMaskRC = [](MVT MaskVT) {
4233 switch (MaskVT.SimpleTy) {
4234 default: llvm_unreachable("Unexpected VT!");
4235 case MVT::v2i1: return X86::VK2RegClassID;
4236 case MVT::v4i1: return X86::VK4RegClassID;
4237 case MVT::v8i1: return X86::VK8RegClassID;
4238 case MVT::v16i1: return X86::VK16RegClassID;
4239 case MVT::v32i1: return X86::VK32RegClassID;
4240 case MVT::v64i1: return X86::VK64RegClassID;
4241 }
4242 };
4243
4244 bool IsMasked = InMask.getNode() != nullptr;
4245
4246 SDLoc dl(Root);
4247
4248 MVT ResVT = Setcc.getSimpleValueType();
4249 MVT MaskVT = ResVT;
4250 if (Widen) {
4251 // Widen the inputs using insert_subreg or copy_to_regclass.
4252 unsigned Scale = CmpVT.is128BitVector() ? 4 : 2;
4253 unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm;
4254 unsigned NumElts = CmpVT.getVectorNumElements() * Scale;
4255 CmpVT = MVT::getVectorVT(CmpSVT, NumElts);
4256 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4257 SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl,
4258 CmpVT), 0);
4259 Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0);
4260
4261 assert(!FoldedLoad && "Shouldn't have folded the load");
4262 if (!FoldedBCast)
4263 Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1);
4264
4265 if (IsMasked) {
4266 // Widen the mask.
4267 unsigned RegClass = getMaskRC(MaskVT);
4268 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4269 InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4270 dl, MaskVT, InMask, RC), 0);
4271 }
4272 }
4273
4274 bool IsTestN = CC == ISD::SETEQ;
4275 unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
4276 IsMasked);
4277
4278 MachineSDNode *CNode;
4279 if (FoldedLoad || FoldedBCast) {
4280 SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
4281
4282 if (IsMasked) {
4283 SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4284 Load.getOperand(0) };
4285 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4286 } else {
4287 SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4288 Load.getOperand(0) };
4289 CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4290 }
4291
4292 // Update the chain.
4293 ReplaceUses(Load.getValue(1), SDValue(CNode, 1));
4294 // Record the mem-refs
4295 CurDAG->setNodeMemRefs(CNode, {cast<MemSDNode>(Load)->getMemOperand()});
4296 } else {
4297 if (IsMasked)
4298 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1);
4299 else
4300 CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1);
4301 }
4302
4303 // If we widened, we need to shrink the mask VT.
4304 if (Widen) {
4305 unsigned RegClass = getMaskRC(ResVT);
4306 SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4307 CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4308 dl, ResVT, SDValue(CNode, 0), RC);
4309 }
4310
4311 ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
4312 CurDAG->RemoveDeadNode(Root);
4313 return true;
4314 }
4315
4316 // Try to match the bitselect pattern (or (and A, B), (andn A, C)). Turn it
4317 // into vpternlog.
tryMatchBitSelect(SDNode * N)4318 bool X86DAGToDAGISel::tryMatchBitSelect(SDNode *N) {
4319 assert(N->getOpcode() == ISD::OR && "Unexpected opcode!");
4320
4321 MVT NVT = N->getSimpleValueType(0);
4322
4323 // Make sure we support VPTERNLOG.
4324 if (!NVT.isVector() || !Subtarget->hasAVX512())
4325 return false;
4326
4327 // We need VLX for 128/256-bit.
4328 if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4329 return false;
4330
4331 SDValue N0 = N->getOperand(0);
4332 SDValue N1 = N->getOperand(1);
4333
4334 // Canonicalize AND to LHS.
4335 if (N1.getOpcode() == ISD::AND)
4336 std::swap(N0, N1);
4337
4338 if (N0.getOpcode() != ISD::AND ||
4339 N1.getOpcode() != X86ISD::ANDNP ||
4340 !N0.hasOneUse() || !N1.hasOneUse())
4341 return false;
4342
4343 // ANDN is not commutable, use it to pick down A and C.
4344 SDValue A = N1.getOperand(0);
4345 SDValue C = N1.getOperand(1);
4346
4347 // AND is commutable, if one operand matches A, the other operand is B.
4348 // Otherwise this isn't a match.
4349 SDValue B;
4350 if (N0.getOperand(0) == A)
4351 B = N0.getOperand(1);
4352 else if (N0.getOperand(1) == A)
4353 B = N0.getOperand(0);
4354 else
4355 return false;
4356
4357 SDLoc dl(N);
4358 SDValue Imm = CurDAG->getTargetConstant(0xCA, dl, MVT::i8);
4359 SDValue Ternlog = CurDAG->getNode(X86ISD::VPTERNLOG, dl, NVT, A, B, C, Imm);
4360 ReplaceNode(N, Ternlog.getNode());
4361 SelectCode(Ternlog.getNode());
4362 return true;
4363 }
4364
Select(SDNode * Node)4365 void X86DAGToDAGISel::Select(SDNode *Node) {
4366 MVT NVT = Node->getSimpleValueType(0);
4367 unsigned Opcode = Node->getOpcode();
4368 SDLoc dl(Node);
4369
4370 if (Node->isMachineOpcode()) {
4371 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
4372 Node->setNodeId(-1);
4373 return; // Already selected.
4374 }
4375
4376 switch (Opcode) {
4377 default: break;
4378 case ISD::INTRINSIC_VOID: {
4379 unsigned IntNo = Node->getConstantOperandVal(1);
4380 switch (IntNo) {
4381 default: break;
4382 case Intrinsic::x86_sse3_monitor:
4383 case Intrinsic::x86_monitorx:
4384 case Intrinsic::x86_clzero: {
4385 bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64;
4386
4387 unsigned Opc = 0;
4388 switch (IntNo) {
4389 default: llvm_unreachable("Unexpected intrinsic!");
4390 case Intrinsic::x86_sse3_monitor:
4391 if (!Subtarget->hasSSE3())
4392 break;
4393 Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr;
4394 break;
4395 case Intrinsic::x86_monitorx:
4396 if (!Subtarget->hasMWAITX())
4397 break;
4398 Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr;
4399 break;
4400 case Intrinsic::x86_clzero:
4401 if (!Subtarget->hasCLZERO())
4402 break;
4403 Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r;
4404 break;
4405 }
4406
4407 if (Opc) {
4408 unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX;
4409 SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg,
4410 Node->getOperand(2), SDValue());
4411 SDValue InFlag = Chain.getValue(1);
4412
4413 if (IntNo == Intrinsic::x86_sse3_monitor ||
4414 IntNo == Intrinsic::x86_monitorx) {
4415 // Copy the other two operands to ECX and EDX.
4416 Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3),
4417 InFlag);
4418 InFlag = Chain.getValue(1);
4419 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4),
4420 InFlag);
4421 InFlag = Chain.getValue(1);
4422 }
4423
4424 MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
4425 { Chain, InFlag});
4426 ReplaceNode(Node, CNode);
4427 return;
4428 }
4429
4430 break;
4431 }
4432 case Intrinsic::x86_tileloadd64:
4433 case Intrinsic::x86_tileloaddt164:
4434 case Intrinsic::x86_tilestored64: {
4435 if (!Subtarget->hasAMXTILE())
4436 break;
4437 unsigned Opc;
4438 switch (IntNo) {
4439 default: llvm_unreachable("Unexpected intrinsic!");
4440 case Intrinsic::x86_tileloadd64: Opc = X86::PTILELOADD; break;
4441 case Intrinsic::x86_tileloaddt164: Opc = X86::PTILELOADDT1; break;
4442 case Intrinsic::x86_tilestored64: Opc = X86::PTILESTORED; break;
4443 }
4444 // FIXME: Match displacement and scale.
4445 unsigned TIndex = Node->getConstantOperandVal(2);
4446 SDValue TReg = getI8Imm(TIndex, dl);
4447 SDValue Base = Node->getOperand(3);
4448 SDValue Scale = getI8Imm(1, dl);
4449 SDValue Index = Node->getOperand(4);
4450 SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
4451 SDValue Segment = CurDAG->getRegister(0, MVT::i16);
4452 SDValue Chain = Node->getOperand(0);
4453 MachineSDNode *CNode;
4454 if (Opc == X86::PTILESTORED) {
4455 SDValue Ops[] = { Base, Scale, Index, Disp, Segment, TReg, Chain };
4456 CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4457 } else {
4458 SDValue Ops[] = { TReg, Base, Scale, Index, Disp, Segment, Chain };
4459 CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
4460 }
4461 ReplaceNode(Node, CNode);
4462 return;
4463 }
4464 }
4465 break;
4466 }
4467 case ISD::BRIND: {
4468 if (Subtarget->isTargetNaCl())
4469 // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
4470 // leave the instruction alone.
4471 break;
4472 if (Subtarget->isTarget64BitILP32()) {
4473 // Converts a 32-bit register to a 64-bit, zero-extended version of
4474 // it. This is needed because x86-64 can do many things, but jmp %r32
4475 // ain't one of them.
4476 SDValue Target = Node->getOperand(1);
4477 assert(Target.getValueType() == MVT::i32 && "Unexpected VT!");
4478 SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, MVT::i64);
4479 SDValue Brind = CurDAG->getNode(ISD::BRIND, dl, MVT::Other,
4480 Node->getOperand(0), ZextTarget);
4481 ReplaceNode(Node, Brind.getNode());
4482 SelectCode(ZextTarget.getNode());
4483 SelectCode(Brind.getNode());
4484 return;
4485 }
4486 break;
4487 }
4488 case X86ISD::GlobalBaseReg:
4489 ReplaceNode(Node, getGlobalBaseReg());
4490 return;
4491
4492 case ISD::BITCAST:
4493 // Just drop all 128/256/512-bit bitcasts.
4494 if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() ||
4495 NVT == MVT::f128) {
4496 ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
4497 CurDAG->RemoveDeadNode(Node);
4498 return;
4499 }
4500 break;
4501
4502 case ISD::SRL:
4503 if (matchBitExtract(Node))
4504 return;
4505 LLVM_FALLTHROUGH;
4506 case ISD::SRA:
4507 case ISD::SHL:
4508 if (tryShiftAmountMod(Node))
4509 return;
4510 break;
4511
4512 case ISD::AND:
4513 if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) {
4514 // Try to form a masked VPTESTM. Operands can be in either order.
4515 SDValue N0 = Node->getOperand(0);
4516 SDValue N1 = Node->getOperand(1);
4517 if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
4518 tryVPTESTM(Node, N0, N1))
4519 return;
4520 if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
4521 tryVPTESTM(Node, N1, N0))
4522 return;
4523 }
4524
4525 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) {
4526 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
4527 CurDAG->RemoveDeadNode(Node);
4528 return;
4529 }
4530 if (matchBitExtract(Node))
4531 return;
4532 if (AndImmShrink && shrinkAndImmediate(Node))
4533 return;
4534
4535 LLVM_FALLTHROUGH;
4536 case ISD::OR:
4537 case ISD::XOR:
4538 if (tryShrinkShlLogicImm(Node))
4539 return;
4540 if (Opcode == ISD::OR && tryMatchBitSelect(Node))
4541 return;
4542 if (tryVPTERNLOG(Node))
4543 return;
4544
4545 LLVM_FALLTHROUGH;
4546 case ISD::ADD:
4547 case ISD::SUB: {
4548 // Try to avoid folding immediates with multiple uses for optsize.
4549 // This code tries to select to register form directly to avoid going
4550 // through the isel table which might fold the immediate. We can't change
4551 // the patterns on the add/sub/and/or/xor with immediate paterns in the
4552 // tablegen files to check immediate use count without making the patterns
4553 // unavailable to the fast-isel table.
4554 if (!CurDAG->shouldOptForSize())
4555 break;
4556
4557 // Only handle i8/i16/i32/i64.
4558 if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64)
4559 break;
4560
4561 SDValue N0 = Node->getOperand(0);
4562 SDValue N1 = Node->getOperand(1);
4563
4564 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
4565 if (!Cst)
4566 break;
4567
4568 int64_t Val = Cst->getSExtValue();
4569
4570 // Make sure its an immediate that is considered foldable.
4571 // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
4572 if (!isInt<8>(Val) && !isInt<32>(Val))
4573 break;
4574
4575 // If this can match to INC/DEC, let it go.
4576 if (Opcode == ISD::ADD && (Val == 1 || Val == -1))
4577 break;
4578
4579 // Check if we should avoid folding this immediate.
4580 if (!shouldAvoidImmediateInstFormsForSize(N1.getNode()))
4581 break;
4582
4583 // We should not fold the immediate. So we need a register form instead.
4584 unsigned ROpc, MOpc;
4585 switch (NVT.SimpleTy) {
4586 default: llvm_unreachable("Unexpected VT!");
4587 case MVT::i8:
4588 switch (Opcode) {
4589 default: llvm_unreachable("Unexpected opcode!");
4590 case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break;
4591 case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break;
4592 case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break;
4593 case ISD::OR: ROpc = X86::OR8rr; MOpc = X86::OR8rm; break;
4594 case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break;
4595 }
4596 break;
4597 case MVT::i16:
4598 switch (Opcode) {
4599 default: llvm_unreachable("Unexpected opcode!");
4600 case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break;
4601 case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break;
4602 case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break;
4603 case ISD::OR: ROpc = X86::OR16rr; MOpc = X86::OR16rm; break;
4604 case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break;
4605 }
4606 break;
4607 case MVT::i32:
4608 switch (Opcode) {
4609 default: llvm_unreachable("Unexpected opcode!");
4610 case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break;
4611 case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break;
4612 case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break;
4613 case ISD::OR: ROpc = X86::OR32rr; MOpc = X86::OR32rm; break;
4614 case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break;
4615 }
4616 break;
4617 case MVT::i64:
4618 switch (Opcode) {
4619 default: llvm_unreachable("Unexpected opcode!");
4620 case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break;
4621 case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break;
4622 case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break;
4623 case ISD::OR: ROpc = X86::OR64rr; MOpc = X86::OR64rm; break;
4624 case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break;
4625 }
4626 break;
4627 }
4628
4629 // Ok this is a AND/OR/XOR/ADD/SUB with constant.
4630
4631 // If this is a not a subtract, we can still try to fold a load.
4632 if (Opcode != ISD::SUB) {
4633 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4634 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4635 SDValue Ops[] = { N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
4636 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4637 MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4638 // Update the chain.
4639 ReplaceUses(N0.getValue(1), SDValue(CNode, 2));
4640 // Record the mem-refs
4641 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N0)->getMemOperand()});
4642 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
4643 CurDAG->RemoveDeadNode(Node);
4644 return;
4645 }
4646 }
4647
4648 CurDAG->SelectNodeTo(Node, ROpc, NVT, MVT::i32, N0, N1);
4649 return;
4650 }
4651
4652 case X86ISD::SMUL:
4653 // i16/i32/i64 are handled with isel patterns.
4654 if (NVT != MVT::i8)
4655 break;
4656 LLVM_FALLTHROUGH;
4657 case X86ISD::UMUL: {
4658 SDValue N0 = Node->getOperand(0);
4659 SDValue N1 = Node->getOperand(1);
4660
4661 unsigned LoReg, ROpc, MOpc;
4662 switch (NVT.SimpleTy) {
4663 default: llvm_unreachable("Unsupported VT!");
4664 case MVT::i8:
4665 LoReg = X86::AL;
4666 ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r;
4667 MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m;
4668 break;
4669 case MVT::i16:
4670 LoReg = X86::AX;
4671 ROpc = X86::MUL16r;
4672 MOpc = X86::MUL16m;
4673 break;
4674 case MVT::i32:
4675 LoReg = X86::EAX;
4676 ROpc = X86::MUL32r;
4677 MOpc = X86::MUL32m;
4678 break;
4679 case MVT::i64:
4680 LoReg = X86::RAX;
4681 ROpc = X86::MUL64r;
4682 MOpc = X86::MUL64m;
4683 break;
4684 }
4685
4686 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4687 bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4688 // Multiply is commutative.
4689 if (!FoldedLoad) {
4690 FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4691 if (FoldedLoad)
4692 std::swap(N0, N1);
4693 }
4694
4695 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
4696 N0, SDValue()).getValue(1);
4697
4698 MachineSDNode *CNode;
4699 if (FoldedLoad) {
4700 // i16/i32/i64 use an instruction that produces a low and high result even
4701 // though only the low result is used.
4702 SDVTList VTs;
4703 if (NVT == MVT::i8)
4704 VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4705 else
4706 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other);
4707
4708 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4709 InFlag };
4710 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4711
4712 // Update the chain.
4713 ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3));
4714 // Record the mem-refs
4715 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4716 } else {
4717 // i16/i32/i64 use an instruction that produces a low and high result even
4718 // though only the low result is used.
4719 SDVTList VTs;
4720 if (NVT == MVT::i8)
4721 VTs = CurDAG->getVTList(NVT, MVT::i32);
4722 else
4723 VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
4724
4725 CNode = CurDAG->getMachineNode(ROpc, dl, VTs, {N1, InFlag});
4726 }
4727
4728 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
4729 ReplaceUses(SDValue(Node, 1), SDValue(CNode, NVT == MVT::i8 ? 1 : 2));
4730 CurDAG->RemoveDeadNode(Node);
4731 return;
4732 }
4733
4734 case ISD::SMUL_LOHI:
4735 case ISD::UMUL_LOHI: {
4736 SDValue N0 = Node->getOperand(0);
4737 SDValue N1 = Node->getOperand(1);
4738
4739 unsigned Opc, MOpc;
4740 unsigned LoReg, HiReg;
4741 bool IsSigned = Opcode == ISD::SMUL_LOHI;
4742 bool UseMULX = !IsSigned && Subtarget->hasBMI2();
4743 bool UseMULXHi = UseMULX && SDValue(Node, 0).use_empty();
4744 switch (NVT.SimpleTy) {
4745 default: llvm_unreachable("Unsupported VT!");
4746 case MVT::i32:
4747 Opc = UseMULXHi ? X86::MULX32Hrr :
4748 UseMULX ? X86::MULX32rr :
4749 IsSigned ? X86::IMUL32r : X86::MUL32r;
4750 MOpc = UseMULXHi ? X86::MULX32Hrm :
4751 UseMULX ? X86::MULX32rm :
4752 IsSigned ? X86::IMUL32m : X86::MUL32m;
4753 LoReg = UseMULX ? X86::EDX : X86::EAX;
4754 HiReg = X86::EDX;
4755 break;
4756 case MVT::i64:
4757 Opc = UseMULXHi ? X86::MULX64Hrr :
4758 UseMULX ? X86::MULX64rr :
4759 IsSigned ? X86::IMUL64r : X86::MUL64r;
4760 MOpc = UseMULXHi ? X86::MULX64Hrm :
4761 UseMULX ? X86::MULX64rm :
4762 IsSigned ? X86::IMUL64m : X86::MUL64m;
4763 LoReg = UseMULX ? X86::RDX : X86::RAX;
4764 HiReg = X86::RDX;
4765 break;
4766 }
4767
4768 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4769 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4770 // Multiply is commmutative.
4771 if (!foldedLoad) {
4772 foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4773 if (foldedLoad)
4774 std::swap(N0, N1);
4775 }
4776
4777 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
4778 N0, SDValue()).getValue(1);
4779 SDValue ResHi, ResLo;
4780 if (foldedLoad) {
4781 SDValue Chain;
4782 MachineSDNode *CNode = nullptr;
4783 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4784 InFlag };
4785 if (UseMULXHi) {
4786 SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
4787 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4788 ResHi = SDValue(CNode, 0);
4789 Chain = SDValue(CNode, 1);
4790 } else if (UseMULX) {
4791 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other);
4792 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4793 ResHi = SDValue(CNode, 0);
4794 ResLo = SDValue(CNode, 1);
4795 Chain = SDValue(CNode, 2);
4796 } else {
4797 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
4798 CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4799 Chain = SDValue(CNode, 0);
4800 InFlag = SDValue(CNode, 1);
4801 }
4802
4803 // Update the chain.
4804 ReplaceUses(N1.getValue(1), Chain);
4805 // Record the mem-refs
4806 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4807 } else {
4808 SDValue Ops[] = { N1, InFlag };
4809 if (UseMULXHi) {
4810 SDVTList VTs = CurDAG->getVTList(NVT);
4811 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4812 ResHi = SDValue(CNode, 0);
4813 } else if (UseMULX) {
4814 SDVTList VTs = CurDAG->getVTList(NVT, NVT);
4815 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4816 ResHi = SDValue(CNode, 0);
4817 ResLo = SDValue(CNode, 1);
4818 } else {
4819 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
4820 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4821 InFlag = SDValue(CNode, 0);
4822 }
4823 }
4824
4825 // Copy the low half of the result, if it is needed.
4826 if (!SDValue(Node, 0).use_empty()) {
4827 if (!ResLo) {
4828 assert(LoReg && "Register for low half is not defined!");
4829 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg,
4830 NVT, InFlag);
4831 InFlag = ResLo.getValue(2);
4832 }
4833 ReplaceUses(SDValue(Node, 0), ResLo);
4834 LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);
4835 dbgs() << '\n');
4836 }
4837 // Copy the high half of the result, if it is needed.
4838 if (!SDValue(Node, 1).use_empty()) {
4839 if (!ResHi) {
4840 assert(HiReg && "Register for high half is not defined!");
4841 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg,
4842 NVT, InFlag);
4843 InFlag = ResHi.getValue(2);
4844 }
4845 ReplaceUses(SDValue(Node, 1), ResHi);
4846 LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);
4847 dbgs() << '\n');
4848 }
4849
4850 CurDAG->RemoveDeadNode(Node);
4851 return;
4852 }
4853
4854 case ISD::SDIVREM:
4855 case ISD::UDIVREM: {
4856 SDValue N0 = Node->getOperand(0);
4857 SDValue N1 = Node->getOperand(1);
4858
4859 unsigned ROpc, MOpc;
4860 bool isSigned = Opcode == ISD::SDIVREM;
4861 if (!isSigned) {
4862 switch (NVT.SimpleTy) {
4863 default: llvm_unreachable("Unsupported VT!");
4864 case MVT::i8: ROpc = X86::DIV8r; MOpc = X86::DIV8m; break;
4865 case MVT::i16: ROpc = X86::DIV16r; MOpc = X86::DIV16m; break;
4866 case MVT::i32: ROpc = X86::DIV32r; MOpc = X86::DIV32m; break;
4867 case MVT::i64: ROpc = X86::DIV64r; MOpc = X86::DIV64m; break;
4868 }
4869 } else {
4870 switch (NVT.SimpleTy) {
4871 default: llvm_unreachable("Unsupported VT!");
4872 case MVT::i8: ROpc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
4873 case MVT::i16: ROpc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
4874 case MVT::i32: ROpc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
4875 case MVT::i64: ROpc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
4876 }
4877 }
4878
4879 unsigned LoReg, HiReg, ClrReg;
4880 unsigned SExtOpcode;
4881 switch (NVT.SimpleTy) {
4882 default: llvm_unreachable("Unsupported VT!");
4883 case MVT::i8:
4884 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
4885 SExtOpcode = 0; // Not used.
4886 break;
4887 case MVT::i16:
4888 LoReg = X86::AX; HiReg = X86::DX;
4889 ClrReg = X86::DX;
4890 SExtOpcode = X86::CWD;
4891 break;
4892 case MVT::i32:
4893 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
4894 SExtOpcode = X86::CDQ;
4895 break;
4896 case MVT::i64:
4897 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
4898 SExtOpcode = X86::CQO;
4899 break;
4900 }
4901
4902 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4903 bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
4904 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
4905
4906 SDValue InFlag;
4907 if (NVT == MVT::i8) {
4908 // Special case for div8, just use a move with zero extension to AX to
4909 // clear the upper 8 bits (AH).
4910 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain;
4911 MachineSDNode *Move;
4912 if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4913 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
4914 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rm8
4915 : X86::MOVZX16rm8;
4916 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, MVT::Other, Ops);
4917 Chain = SDValue(Move, 1);
4918 ReplaceUses(N0.getValue(1), Chain);
4919 // Record the mem-refs
4920 CurDAG->setNodeMemRefs(Move, {cast<LoadSDNode>(N0)->getMemOperand()});
4921 } else {
4922 unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rr8
4923 : X86::MOVZX16rr8;
4924 Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, N0);
4925 Chain = CurDAG->getEntryNode();
4926 }
4927 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, SDValue(Move, 0),
4928 SDValue());
4929 InFlag = Chain.getValue(1);
4930 } else {
4931 InFlag =
4932 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
4933 LoReg, N0, SDValue()).getValue(1);
4934 if (isSigned && !signBitIsZero) {
4935 // Sign extend the low part into the high part.
4936 InFlag =
4937 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
4938 } else {
4939 // Zero out the high part, effectively zero extending the input.
4940 SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32);
4941 SDValue ClrNode =
4942 SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, None), 0);
4943 switch (NVT.SimpleTy) {
4944 case MVT::i16:
4945 ClrNode =
4946 SDValue(CurDAG->getMachineNode(
4947 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
4948 CurDAG->getTargetConstant(X86::sub_16bit, dl,
4949 MVT::i32)),
4950 0);
4951 break;
4952 case MVT::i32:
4953 break;
4954 case MVT::i64:
4955 ClrNode =
4956 SDValue(CurDAG->getMachineNode(
4957 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
4958 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
4959 CurDAG->getTargetConstant(X86::sub_32bit, dl,
4960 MVT::i32)),
4961 0);
4962 break;
4963 default:
4964 llvm_unreachable("Unexpected division source");
4965 }
4966
4967 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
4968 ClrNode, InFlag).getValue(1);
4969 }
4970 }
4971
4972 if (foldedLoad) {
4973 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
4974 InFlag };
4975 MachineSDNode *CNode =
4976 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
4977 InFlag = SDValue(CNode, 1);
4978 // Update the chain.
4979 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
4980 // Record the mem-refs
4981 CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4982 } else {
4983 InFlag =
4984 SDValue(CurDAG->getMachineNode(ROpc, dl, MVT::Glue, N1, InFlag), 0);
4985 }
4986
4987 // Prevent use of AH in a REX instruction by explicitly copying it to
4988 // an ABCD_L register.
4989 //
4990 // The current assumption of the register allocator is that isel
4991 // won't generate explicit references to the GR8_ABCD_H registers. If
4992 // the allocator and/or the backend get enhanced to be more robust in
4993 // that regard, this can be, and should be, removed.
4994 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
4995 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
4996 unsigned AHExtOpcode =
4997 isSigned ? X86::MOVSX32rr8_NOREX : X86::MOVZX32rr8_NOREX;
4998
4999 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
5000 MVT::Glue, AHCopy, InFlag);
5001 SDValue Result(RNode, 0);
5002 InFlag = SDValue(RNode, 1);
5003
5004 Result =
5005 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
5006
5007 ReplaceUses(SDValue(Node, 1), Result);
5008 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
5009 dbgs() << '\n');
5010 }
5011 // Copy the division (low) result, if it is needed.
5012 if (!SDValue(Node, 0).use_empty()) {
5013 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
5014 LoReg, NVT, InFlag);
5015 InFlag = Result.getValue(2);
5016 ReplaceUses(SDValue(Node, 0), Result);
5017 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
5018 dbgs() << '\n');
5019 }
5020 // Copy the remainder (high) result, if it is needed.
5021 if (!SDValue(Node, 1).use_empty()) {
5022 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
5023 HiReg, NVT, InFlag);
5024 InFlag = Result.getValue(2);
5025 ReplaceUses(SDValue(Node, 1), Result);
5026 LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
5027 dbgs() << '\n');
5028 }
5029 CurDAG->RemoveDeadNode(Node);
5030 return;
5031 }
5032
5033 case X86ISD::FCMP:
5034 case X86ISD::STRICT_FCMP:
5035 case X86ISD::STRICT_FCMPS: {
5036 bool IsStrictCmp = Node->getOpcode() == X86ISD::STRICT_FCMP ||
5037 Node->getOpcode() == X86ISD::STRICT_FCMPS;
5038 SDValue N0 = Node->getOperand(IsStrictCmp ? 1 : 0);
5039 SDValue N1 = Node->getOperand(IsStrictCmp ? 2 : 1);
5040
5041 // Save the original VT of the compare.
5042 MVT CmpVT = N0.getSimpleValueType();
5043
5044 // Floating point needs special handling if we don't have FCOMI.
5045 if (Subtarget->hasCMov())
5046 break;
5047
5048 bool IsSignaling = Node->getOpcode() == X86ISD::STRICT_FCMPS;
5049
5050 unsigned Opc;
5051 switch (CmpVT.SimpleTy) {
5052 default: llvm_unreachable("Unexpected type!");
5053 case MVT::f32:
5054 Opc = IsSignaling ? X86::COM_Fpr32 : X86::UCOM_Fpr32;
5055 break;
5056 case MVT::f64:
5057 Opc = IsSignaling ? X86::COM_Fpr64 : X86::UCOM_Fpr64;
5058 break;
5059 case MVT::f80:
5060 Opc = IsSignaling ? X86::COM_Fpr80 : X86::UCOM_Fpr80;
5061 break;
5062 }
5063
5064 SDValue Cmp;
5065 SDValue Chain =
5066 IsStrictCmp ? Node->getOperand(0) : CurDAG->getEntryNode();
5067 if (IsStrictCmp) {
5068 SDVTList VTs = CurDAG->getVTList(MVT::i16, MVT::Other);
5069 Cmp = SDValue(CurDAG->getMachineNode(Opc, dl, VTs, {N0, N1, Chain}), 0);
5070 Chain = Cmp.getValue(1);
5071 } else {
5072 Cmp = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::i16, N0, N1), 0);
5073 }
5074
5075 // Move FPSW to AX.
5076 SDValue FPSW = CurDAG->getCopyToReg(Chain, dl, X86::FPSW, Cmp, SDValue());
5077 Chain = FPSW;
5078 SDValue FNSTSW =
5079 SDValue(CurDAG->getMachineNode(X86::FNSTSW16r, dl, MVT::i16, FPSW,
5080 FPSW.getValue(1)),
5081 0);
5082
5083 // Extract upper 8-bits of AX.
5084 SDValue Extract =
5085 CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, MVT::i8, FNSTSW);
5086
5087 // Move AH into flags.
5088 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
5089 assert(Subtarget->hasLAHFSAHF() &&
5090 "Target doesn't support SAHF or FCOMI?");
5091 SDValue AH = CurDAG->getCopyToReg(Chain, dl, X86::AH, Extract, SDValue());
5092 Chain = AH;
5093 SDValue SAHF = SDValue(
5094 CurDAG->getMachineNode(X86::SAHF, dl, MVT::i32, AH.getValue(1)), 0);
5095
5096 if (IsStrictCmp)
5097 ReplaceUses(SDValue(Node, 1), Chain);
5098
5099 ReplaceUses(SDValue(Node, 0), SAHF);
5100 CurDAG->RemoveDeadNode(Node);
5101 return;
5102 }
5103
5104 case X86ISD::CMP: {
5105 SDValue N0 = Node->getOperand(0);
5106 SDValue N1 = Node->getOperand(1);
5107
5108 // Optimizations for TEST compares.
5109 if (!isNullConstant(N1))
5110 break;
5111
5112 // Save the original VT of the compare.
5113 MVT CmpVT = N0.getSimpleValueType();
5114
5115 // If we are comparing (and (shr X, C, Mask) with 0, emit a BEXTR followed
5116 // by a test instruction. The test should be removed later by
5117 // analyzeCompare if we are using only the zero flag.
5118 // TODO: Should we check the users and use the BEXTR flags directly?
5119 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
5120 if (MachineSDNode *NewNode = matchBEXTRFromAndImm(N0.getNode())) {
5121 unsigned TestOpc = CmpVT == MVT::i64 ? X86::TEST64rr
5122 : X86::TEST32rr;
5123 SDValue BEXTR = SDValue(NewNode, 0);
5124 NewNode = CurDAG->getMachineNode(TestOpc, dl, MVT::i32, BEXTR, BEXTR);
5125 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
5126 CurDAG->RemoveDeadNode(Node);
5127 return;
5128 }
5129 }
5130
5131 // We can peek through truncates, but we need to be careful below.
5132 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
5133 N0 = N0.getOperand(0);
5134
5135 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
5136 // use a smaller encoding.
5137 // Look past the truncate if CMP is the only use of it.
5138 if (N0.getOpcode() == ISD::AND &&
5139 N0.getNode()->hasOneUse() &&
5140 N0.getValueType() != MVT::i8) {
5141 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5142 if (!C) break;
5143 uint64_t Mask = C->getZExtValue();
5144
5145 // Check if we can replace AND+IMM64 with a shift. This is possible for
5146 // masks/ like 0xFF000000 or 0x00FFFFFF and if we care only about the zero
5147 // flag.
5148 if (CmpVT == MVT::i64 && !isInt<32>(Mask) &&
5149 onlyUsesZeroFlag(SDValue(Node, 0))) {
5150 if (isMask_64(~Mask)) {
5151 unsigned TrailingZeros = countTrailingZeros(Mask);
5152 SDValue Imm = CurDAG->getTargetConstant(TrailingZeros, dl, MVT::i64);
5153 SDValue Shift =
5154 SDValue(CurDAG->getMachineNode(X86::SHR64ri, dl, MVT::i64, MVT::i32,
5155 N0.getOperand(0), Imm), 0);
5156 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
5157 MVT::i32, Shift, Shift);
5158 ReplaceNode(Node, Test);
5159 return;
5160 }
5161 if (isMask_64(Mask)) {
5162 unsigned LeadingZeros = countLeadingZeros(Mask);
5163 SDValue Imm = CurDAG->getTargetConstant(LeadingZeros, dl, MVT::i64);
5164 SDValue Shift =
5165 SDValue(CurDAG->getMachineNode(X86::SHL64ri, dl, MVT::i64, MVT::i32,
5166 N0.getOperand(0), Imm), 0);
5167 MachineSDNode *Test = CurDAG->getMachineNode(X86::TEST64rr, dl,
5168 MVT::i32, Shift, Shift);
5169 ReplaceNode(Node, Test);
5170 return;
5171 }
5172 }
5173
5174 MVT VT;
5175 int SubRegOp;
5176 unsigned ROpc, MOpc;
5177
5178 // For each of these checks we need to be careful if the sign flag is
5179 // being used. It is only safe to use the sign flag in two conditions,
5180 // either the sign bit in the shrunken mask is zero or the final test
5181 // size is equal to the original compare size.
5182
5183 if (isUInt<8>(Mask) &&
5184 (!(Mask & 0x80) || CmpVT == MVT::i8 ||
5185 hasNoSignFlagUses(SDValue(Node, 0)))) {
5186 // For example, convert "testl %eax, $8" to "testb %al, $8"
5187 VT = MVT::i8;
5188 SubRegOp = X86::sub_8bit;
5189 ROpc = X86::TEST8ri;
5190 MOpc = X86::TEST8mi;
5191 } else if (OptForMinSize && isUInt<16>(Mask) &&
5192 (!(Mask & 0x8000) || CmpVT == MVT::i16 ||
5193 hasNoSignFlagUses(SDValue(Node, 0)))) {
5194 // For example, "testl %eax, $32776" to "testw %ax, $32776".
5195 // NOTE: We only want to form TESTW instructions if optimizing for
5196 // min size. Otherwise we only save one byte and possibly get a length
5197 // changing prefix penalty in the decoders.
5198 VT = MVT::i16;
5199 SubRegOp = X86::sub_16bit;
5200 ROpc = X86::TEST16ri;
5201 MOpc = X86::TEST16mi;
5202 } else if (isUInt<32>(Mask) && N0.getValueType() != MVT::i16 &&
5203 ((!(Mask & 0x80000000) &&
5204 // Without minsize 16-bit Cmps can get here so we need to
5205 // be sure we calculate the correct sign flag if needed.
5206 (CmpVT != MVT::i16 || !(Mask & 0x8000))) ||
5207 CmpVT == MVT::i32 ||
5208 hasNoSignFlagUses(SDValue(Node, 0)))) {
5209 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
5210 // NOTE: We only want to run that transform if N0 is 32 or 64 bits.
5211 // Otherwize, we find ourselves in a position where we have to do
5212 // promotion. If previous passes did not promote the and, we assume
5213 // they had a good reason not to and do not promote here.
5214 VT = MVT::i32;
5215 SubRegOp = X86::sub_32bit;
5216 ROpc = X86::TEST32ri;
5217 MOpc = X86::TEST32mi;
5218 } else {
5219 // No eligible transformation was found.
5220 break;
5221 }
5222
5223 SDValue Imm = CurDAG->getTargetConstant(Mask, dl, VT);
5224 SDValue Reg = N0.getOperand(0);
5225
5226 // Emit a testl or testw.
5227 MachineSDNode *NewNode;
5228 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5229 if (tryFoldLoad(Node, N0.getNode(), Reg, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
5230 if (auto *LoadN = dyn_cast<LoadSDNode>(N0.getOperand(0).getNode())) {
5231 if (!LoadN->isSimple()) {
5232 unsigned NumVolBits = LoadN->getValueType(0).getSizeInBits();
5233 if (MOpc == X86::TEST8mi && NumVolBits != 8)
5234 break;
5235 else if (MOpc == X86::TEST16mi && NumVolBits != 16)
5236 break;
5237 else if (MOpc == X86::TEST32mi && NumVolBits != 32)
5238 break;
5239 }
5240 }
5241 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
5242 Reg.getOperand(0) };
5243 NewNode = CurDAG->getMachineNode(MOpc, dl, MVT::i32, MVT::Other, Ops);
5244 // Update the chain.
5245 ReplaceUses(Reg.getValue(1), SDValue(NewNode, 1));
5246 // Record the mem-refs
5247 CurDAG->setNodeMemRefs(NewNode,
5248 {cast<LoadSDNode>(Reg)->getMemOperand()});
5249 } else {
5250 // Extract the subregister if necessary.
5251 if (N0.getValueType() != VT)
5252 Reg = CurDAG->getTargetExtractSubreg(SubRegOp, dl, VT, Reg);
5253
5254 NewNode = CurDAG->getMachineNode(ROpc, dl, MVT::i32, Reg, Imm);
5255 }
5256 // Replace CMP with TEST.
5257 ReplaceNode(Node, NewNode);
5258 return;
5259 }
5260 break;
5261 }
5262 case X86ISD::PCMPISTR: {
5263 if (!Subtarget->hasSSE42())
5264 break;
5265
5266 bool NeedIndex = !SDValue(Node, 0).use_empty();
5267 bool NeedMask = !SDValue(Node, 1).use_empty();
5268 // We can't fold a load if we are going to make two instructions.
5269 bool MayFoldLoad = !NeedIndex || !NeedMask;
5270
5271 MachineSDNode *CNode;
5272 if (NeedMask) {
5273 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrr : X86::PCMPISTRMrr;
5274 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrm : X86::PCMPISTRMrm;
5275 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node);
5276 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5277 }
5278 if (NeedIndex || !NeedMask) {
5279 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : X86::PCMPISTRIrr;
5280 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrm : X86::PCMPISTRIrm;
5281 CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node);
5282 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5283 }
5284
5285 // Connect the flag usage to the last instruction created.
5286 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5287 CurDAG->RemoveDeadNode(Node);
5288 return;
5289 }
5290 case X86ISD::PCMPESTR: {
5291 if (!Subtarget->hasSSE42())
5292 break;
5293
5294 // Copy the two implicit register inputs.
5295 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EAX,
5296 Node->getOperand(1),
5297 SDValue()).getValue(1);
5298 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
5299 Node->getOperand(3), InFlag).getValue(1);
5300
5301 bool NeedIndex = !SDValue(Node, 0).use_empty();
5302 bool NeedMask = !SDValue(Node, 1).use_empty();
5303 // We can't fold a load if we are going to make two instructions.
5304 bool MayFoldLoad = !NeedIndex || !NeedMask;
5305
5306 MachineSDNode *CNode;
5307 if (NeedMask) {
5308 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrr : X86::PCMPESTRMrr;
5309 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrm : X86::PCMPESTRMrm;
5310 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node,
5311 InFlag);
5312 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
5313 }
5314 if (NeedIndex || !NeedMask) {
5315 unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : X86::PCMPESTRIrr;
5316 unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrm : X86::PCMPESTRIrm;
5317 CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node, InFlag);
5318 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5319 }
5320 // Connect the flag usage to the last instruction created.
5321 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
5322 CurDAG->RemoveDeadNode(Node);
5323 return;
5324 }
5325
5326 case ISD::SETCC: {
5327 if (NVT.isVector() && tryVPTESTM(Node, SDValue(Node, 0), SDValue()))
5328 return;
5329
5330 break;
5331 }
5332
5333 case ISD::STORE:
5334 if (foldLoadStoreIntoMemOperand(Node))
5335 return;
5336 break;
5337
5338 case X86ISD::SETCC_CARRY: {
5339 // We have to do this manually because tblgen will put the eflags copy in
5340 // the wrong place if we use an extract_subreg in the pattern.
5341 MVT VT = Node->getSimpleValueType(0);
5342
5343 // Copy flags to the EFLAGS register and glue it to next node.
5344 SDValue EFLAGS =
5345 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EFLAGS,
5346 Node->getOperand(1), SDValue());
5347
5348 // Create a 64-bit instruction if the result is 64-bits otherwise use the
5349 // 32-bit version.
5350 unsigned Opc = VT == MVT::i64 ? X86::SETB_C64r : X86::SETB_C32r;
5351 MVT SetVT = VT == MVT::i64 ? MVT::i64 : MVT::i32;
5352 SDValue Result = SDValue(
5353 CurDAG->getMachineNode(Opc, dl, SetVT, EFLAGS, EFLAGS.getValue(1)), 0);
5354
5355 // For less than 32-bits we need to extract from the 32-bit node.
5356 if (VT == MVT::i8 || VT == MVT::i16) {
5357 int SubIndex = VT == MVT::i16 ? X86::sub_16bit : X86::sub_8bit;
5358 Result = CurDAG->getTargetExtractSubreg(SubIndex, dl, VT, Result);
5359 }
5360
5361 ReplaceUses(SDValue(Node, 0), Result);
5362 CurDAG->RemoveDeadNode(Node);
5363 return;
5364 }
5365 case X86ISD::SBB: {
5366 if (isNullConstant(Node->getOperand(0)) &&
5367 isNullConstant(Node->getOperand(1))) {
5368 MVT VT = Node->getSimpleValueType(0);
5369
5370 // Create zero.
5371 SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32);
5372 SDValue Zero =
5373 SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, None), 0);
5374 if (VT == MVT::i64) {
5375 Zero = SDValue(
5376 CurDAG->getMachineNode(
5377 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
5378 CurDAG->getTargetConstant(0, dl, MVT::i64), Zero,
5379 CurDAG->getTargetConstant(X86::sub_32bit, dl, MVT::i32)),
5380 0);
5381 }
5382
5383 // Copy flags to the EFLAGS register and glue it to next node.
5384 SDValue EFLAGS =
5385 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EFLAGS,
5386 Node->getOperand(2), SDValue());
5387
5388 // Create a 64-bit instruction if the result is 64-bits otherwise use the
5389 // 32-bit version.
5390 unsigned Opc = VT == MVT::i64 ? X86::SBB64rr : X86::SBB32rr;
5391 MVT SBBVT = VT == MVT::i64 ? MVT::i64 : MVT::i32;
5392 VTs = CurDAG->getVTList(SBBVT, MVT::i32);
5393 SDValue Result =
5394 SDValue(CurDAG->getMachineNode(Opc, dl, VTs, {Zero, Zero, EFLAGS,
5395 EFLAGS.getValue(1)}),
5396 0);
5397
5398 // Replace the flag use.
5399 ReplaceUses(SDValue(Node, 1), Result.getValue(1));
5400
5401 // Replace the result use.
5402 if (!SDValue(Node, 0).use_empty()) {
5403 // For less than 32-bits we need to extract from the 32-bit node.
5404 if (VT == MVT::i8 || VT == MVT::i16) {
5405 int SubIndex = VT == MVT::i16 ? X86::sub_16bit : X86::sub_8bit;
5406 Result = CurDAG->getTargetExtractSubreg(SubIndex, dl, VT, Result);
5407 }
5408 ReplaceUses(SDValue(Node, 0), Result);
5409 }
5410
5411 CurDAG->RemoveDeadNode(Node);
5412 return;
5413 }
5414 break;
5415 }
5416 case X86ISD::MGATHER: {
5417 auto *Mgt = cast<X86MaskedGatherSDNode>(Node);
5418 SDValue IndexOp = Mgt->getIndex();
5419 SDValue Mask = Mgt->getMask();
5420 MVT IndexVT = IndexOp.getSimpleValueType();
5421 MVT ValueVT = Node->getSimpleValueType(0);
5422 MVT MaskVT = Mask.getSimpleValueType();
5423
5424 // This is just to prevent crashes if the nodes are malformed somehow. We're
5425 // otherwise only doing loose type checking in here based on type what
5426 // a type constraint would say just like table based isel.
5427 if (!ValueVT.isVector() || !MaskVT.isVector())
5428 break;
5429
5430 unsigned NumElts = ValueVT.getVectorNumElements();
5431 MVT ValueSVT = ValueVT.getVectorElementType();
5432
5433 bool IsFP = ValueSVT.isFloatingPoint();
5434 unsigned EltSize = ValueSVT.getSizeInBits();
5435
5436 unsigned Opc = 0;
5437 bool AVX512Gather = MaskVT.getVectorElementType() == MVT::i1;
5438 if (AVX512Gather) {
5439 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32)
5440 Opc = IsFP ? X86::VGATHERDPSZ128rm : X86::VPGATHERDDZ128rm;
5441 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32)
5442 Opc = IsFP ? X86::VGATHERDPSZ256rm : X86::VPGATHERDDZ256rm;
5443 else if (IndexVT == MVT::v16i32 && NumElts == 16 && EltSize == 32)
5444 Opc = IsFP ? X86::VGATHERDPSZrm : X86::VPGATHERDDZrm;
5445 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64)
5446 Opc = IsFP ? X86::VGATHERDPDZ128rm : X86::VPGATHERDQZ128rm;
5447 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64)
5448 Opc = IsFP ? X86::VGATHERDPDZ256rm : X86::VPGATHERDQZ256rm;
5449 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 64)
5450 Opc = IsFP ? X86::VGATHERDPDZrm : X86::VPGATHERDQZrm;
5451 else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32)
5452 Opc = IsFP ? X86::VGATHERQPSZ128rm : X86::VPGATHERQDZ128rm;
5453 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32)
5454 Opc = IsFP ? X86::VGATHERQPSZ256rm : X86::VPGATHERQDZ256rm;
5455 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 32)
5456 Opc = IsFP ? X86::VGATHERQPSZrm : X86::VPGATHERQDZrm;
5457 else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64)
5458 Opc = IsFP ? X86::VGATHERQPDZ128rm : X86::VPGATHERQQZ128rm;
5459 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64)
5460 Opc = IsFP ? X86::VGATHERQPDZ256rm : X86::VPGATHERQQZ256rm;
5461 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 64)
5462 Opc = IsFP ? X86::VGATHERQPDZrm : X86::VPGATHERQQZrm;
5463 } else {
5464 assert(EVT(MaskVT) == EVT(ValueVT).changeVectorElementTypeToInteger() &&
5465 "Unexpected mask VT!");
5466 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32)
5467 Opc = IsFP ? X86::VGATHERDPSrm : X86::VPGATHERDDrm;
5468 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32)
5469 Opc = IsFP ? X86::VGATHERDPSYrm : X86::VPGATHERDDYrm;
5470 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64)
5471 Opc = IsFP ? X86::VGATHERDPDrm : X86::VPGATHERDQrm;
5472 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64)
5473 Opc = IsFP ? X86::VGATHERDPDYrm : X86::VPGATHERDQYrm;
5474 else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32)
5475 Opc = IsFP ? X86::VGATHERQPSrm : X86::VPGATHERQDrm;
5476 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32)
5477 Opc = IsFP ? X86::VGATHERQPSYrm : X86::VPGATHERQDYrm;
5478 else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64)
5479 Opc = IsFP ? X86::VGATHERQPDrm : X86::VPGATHERQQrm;
5480 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64)
5481 Opc = IsFP ? X86::VGATHERQPDYrm : X86::VPGATHERQQYrm;
5482 }
5483
5484 if (!Opc)
5485 break;
5486
5487 SDValue Base, Scale, Index, Disp, Segment;
5488 if (!selectVectorAddr(Mgt, Mgt->getBasePtr(), IndexOp, Mgt->getScale(),
5489 Base, Scale, Index, Disp, Segment))
5490 break;
5491
5492 SDValue PassThru = Mgt->getPassThru();
5493 SDValue Chain = Mgt->getChain();
5494 // Gather instructions have a mask output not in the ISD node.
5495 SDVTList VTs = CurDAG->getVTList(ValueVT, MaskVT, MVT::Other);
5496
5497 MachineSDNode *NewNode;
5498 if (AVX512Gather) {
5499 SDValue Ops[] = {PassThru, Mask, Base, Scale,
5500 Index, Disp, Segment, Chain};
5501 NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops);
5502 } else {
5503 SDValue Ops[] = {PassThru, Base, Scale, Index,
5504 Disp, Segment, Mask, Chain};
5505 NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops);
5506 }
5507 CurDAG->setNodeMemRefs(NewNode, {Mgt->getMemOperand()});
5508 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
5509 ReplaceUses(SDValue(Node, 1), SDValue(NewNode, 2));
5510 CurDAG->RemoveDeadNode(Node);
5511 return;
5512 }
5513 case X86ISD::MSCATTER: {
5514 auto *Sc = cast<X86MaskedScatterSDNode>(Node);
5515 SDValue Value = Sc->getValue();
5516 SDValue IndexOp = Sc->getIndex();
5517 MVT IndexVT = IndexOp.getSimpleValueType();
5518 MVT ValueVT = Value.getSimpleValueType();
5519
5520 // This is just to prevent crashes if the nodes are malformed somehow. We're
5521 // otherwise only doing loose type checking in here based on type what
5522 // a type constraint would say just like table based isel.
5523 if (!ValueVT.isVector())
5524 break;
5525
5526 unsigned NumElts = ValueVT.getVectorNumElements();
5527 MVT ValueSVT = ValueVT.getVectorElementType();
5528
5529 bool IsFP = ValueSVT.isFloatingPoint();
5530 unsigned EltSize = ValueSVT.getSizeInBits();
5531
5532 unsigned Opc;
5533 if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32)
5534 Opc = IsFP ? X86::VSCATTERDPSZ128mr : X86::VPSCATTERDDZ128mr;
5535 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32)
5536 Opc = IsFP ? X86::VSCATTERDPSZ256mr : X86::VPSCATTERDDZ256mr;
5537 else if (IndexVT == MVT::v16i32 && NumElts == 16 && EltSize == 32)
5538 Opc = IsFP ? X86::VSCATTERDPSZmr : X86::VPSCATTERDDZmr;
5539 else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64)
5540 Opc = IsFP ? X86::VSCATTERDPDZ128mr : X86::VPSCATTERDQZ128mr;
5541 else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64)
5542 Opc = IsFP ? X86::VSCATTERDPDZ256mr : X86::VPSCATTERDQZ256mr;
5543 else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 64)
5544 Opc = IsFP ? X86::VSCATTERDPDZmr : X86::VPSCATTERDQZmr;
5545 else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32)
5546 Opc = IsFP ? X86::VSCATTERQPSZ128mr : X86::VPSCATTERQDZ128mr;
5547 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32)
5548 Opc = IsFP ? X86::VSCATTERQPSZ256mr : X86::VPSCATTERQDZ256mr;
5549 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 32)
5550 Opc = IsFP ? X86::VSCATTERQPSZmr : X86::VPSCATTERQDZmr;
5551 else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64)
5552 Opc = IsFP ? X86::VSCATTERQPDZ128mr : X86::VPSCATTERQQZ128mr;
5553 else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64)
5554 Opc = IsFP ? X86::VSCATTERQPDZ256mr : X86::VPSCATTERQQZ256mr;
5555 else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 64)
5556 Opc = IsFP ? X86::VSCATTERQPDZmr : X86::VPSCATTERQQZmr;
5557 else
5558 break;
5559
5560 SDValue Base, Scale, Index, Disp, Segment;
5561 if (!selectVectorAddr(Sc, Sc->getBasePtr(), IndexOp, Sc->getScale(),
5562 Base, Scale, Index, Disp, Segment))
5563 break;
5564
5565 SDValue Mask = Sc->getMask();
5566 SDValue Chain = Sc->getChain();
5567 // Scatter instructions have a mask output not in the ISD node.
5568 SDVTList VTs = CurDAG->getVTList(Mask.getValueType(), MVT::Other);
5569 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, Mask, Value, Chain};
5570
5571 MachineSDNode *NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops);
5572 CurDAG->setNodeMemRefs(NewNode, {Sc->getMemOperand()});
5573 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 1));
5574 CurDAG->RemoveDeadNode(Node);
5575 return;
5576 }
5577 case ISD::PREALLOCATED_SETUP: {
5578 auto *MFI = CurDAG->getMachineFunction().getInfo<X86MachineFunctionInfo>();
5579 auto CallId = MFI->getPreallocatedIdForCallSite(
5580 cast<SrcValueSDNode>(Node->getOperand(1))->getValue());
5581 SDValue Chain = Node->getOperand(0);
5582 SDValue CallIdValue = CurDAG->getTargetConstant(CallId, dl, MVT::i32);
5583 MachineSDNode *New = CurDAG->getMachineNode(
5584 TargetOpcode::PREALLOCATED_SETUP, dl, MVT::Other, CallIdValue, Chain);
5585 ReplaceUses(SDValue(Node, 0), SDValue(New, 0)); // Chain
5586 CurDAG->RemoveDeadNode(Node);
5587 return;
5588 }
5589 case ISD::PREALLOCATED_ARG: {
5590 auto *MFI = CurDAG->getMachineFunction().getInfo<X86MachineFunctionInfo>();
5591 auto CallId = MFI->getPreallocatedIdForCallSite(
5592 cast<SrcValueSDNode>(Node->getOperand(1))->getValue());
5593 SDValue Chain = Node->getOperand(0);
5594 SDValue CallIdValue = CurDAG->getTargetConstant(CallId, dl, MVT::i32);
5595 SDValue ArgIndex = Node->getOperand(2);
5596 SDValue Ops[3];
5597 Ops[0] = CallIdValue;
5598 Ops[1] = ArgIndex;
5599 Ops[2] = Chain;
5600 MachineSDNode *New = CurDAG->getMachineNode(
5601 TargetOpcode::PREALLOCATED_ARG, dl,
5602 CurDAG->getVTList(TLI->getPointerTy(CurDAG->getDataLayout()),
5603 MVT::Other),
5604 Ops);
5605 ReplaceUses(SDValue(Node, 0), SDValue(New, 0)); // Arg pointer
5606 ReplaceUses(SDValue(Node, 1), SDValue(New, 1)); // Chain
5607 CurDAG->RemoveDeadNode(Node);
5608 return;
5609 }
5610 }
5611
5612 SelectCode(Node);
5613 }
5614
5615 bool X86DAGToDAGISel::
SelectInlineAsmMemoryOperand(const SDValue & Op,unsigned ConstraintID,std::vector<SDValue> & OutOps)5616 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
5617 std::vector<SDValue> &OutOps) {
5618 SDValue Op0, Op1, Op2, Op3, Op4;
5619 switch (ConstraintID) {
5620 default:
5621 llvm_unreachable("Unexpected asm memory constraint");
5622 case InlineAsm::Constraint_o: // offsetable ??
5623 case InlineAsm::Constraint_v: // not offsetable ??
5624 case InlineAsm::Constraint_m: // memory
5625 case InlineAsm::Constraint_X:
5626 if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
5627 return true;
5628 break;
5629 }
5630
5631 OutOps.push_back(Op0);
5632 OutOps.push_back(Op1);
5633 OutOps.push_back(Op2);
5634 OutOps.push_back(Op3);
5635 OutOps.push_back(Op4);
5636 return false;
5637 }
5638
5639 /// This pass converts a legalized DAG into a X86-specific DAG,
5640 /// ready for instruction scheduling.
createX86ISelDag(X86TargetMachine & TM,CodeGenOpt::Level OptLevel)5641 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
5642 CodeGenOpt::Level OptLevel) {
5643 return new X86DAGToDAGISel(TM, OptLevel);
5644 }
5645